repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
Angryrou/udao
udao/optimization/tests/moo/conftest.py
[ { "identifier": "TabularContainer", "path": "udao/data/containers/tabular_container.py", "snippet": "class TabularContainer(BaseContainer):\n \"\"\"Container for tabular data, stored in DataFrame format.\"\"\"\n\n data: pd.DataFrame\n\n def get(self, key: str) -> np.ndarray:\n return sel...
from typing import Dict, Sequence from torch import nn from ....data.containers.tabular_container import TabularContainer from ....data.extractors.tabular_extractor import TabularFeatureExtractor from ....data.handler.data_processor import DataProcessor from ....data.preprocessors.base_preprocessor import StaticPreprocessor from ....data.tests.iterators.dummy_udao_iterator import DummyUdaoIterator from ....utils.interfaces import UdaoEmbedInput from ...concepts import ( BoolVariable, Constraint, FloatVariable, IntegerVariable, Objective, Variable, ) from ...concepts.problem import MOProblem from ...concepts.utils import InputParameters, InputVariables from ...soo.mogd import MOGD import pytest import torch as th
11,361
class ObjModel1(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2, (-1, 1)) class ObjModel2(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 1] ** 2, (-1, 1)) class ComplexObj1(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2 - x.features[:, 1] ** 2, (-1, 1)) class ComplexObj2(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2 + x.features[:, 1] ** 2, (-1, 1)) class TabularFeaturePreprocessor(StaticPreprocessor): def preprocess(self, tabular_feature: TabularContainer) -> TabularContainer: tabular_feature.data.loc[:, "v1"] = tabular_feature.data["v1"] / 1 tabular_feature.data.loc[:, "v2"] = (tabular_feature.data["v2"] - 1) / 6 return tabular_feature def inverse_transform(self, tabular_feature: TabularContainer) -> TabularContainer: tabular_feature.data.loc[:, "v1"] = tabular_feature.data["v1"] * 1 tabular_feature.data.loc[:, "v2"] = tabular_feature.data["v2"] * 6 + 1 return tabular_feature @pytest.fixture() def data_processor() -> DataProcessor: return DataProcessor( iterator_cls=DummyUdaoIterator, feature_extractors={ "tabular_features": TabularFeatureExtractor( columns=["v1", "v2"], ), "objectives": TabularFeatureExtractor(columns=["objective_input"]), }, feature_preprocessors={"tabular_features": [TabularFeaturePreprocessor()]}, ) @pytest.fixture def mogd() -> MOGD: return MOGD( MOGD.Params( learning_rate=0.1, max_iters=100, patience=10, multistart=2, objective_stress=10, constraint_stress=1e5, device=th.device("cpu"), ) ) @pytest.fixture def two_obj_problem(data_processor: DataProcessor) -> MOProblem: objectives = [ Objective("obj1", minimize=True, function=ObjModel1()), Objective("obj2", minimize=True, function=ObjModel2()), ]
class ObjModel1(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2, (-1, 1)) class ObjModel2(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 1] ** 2, (-1, 1)) class ComplexObj1(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2 - x.features[:, 1] ** 2, (-1, 1)) class ComplexObj2(nn.Module): def forward(self, x: UdaoEmbedInput) -> th.Tensor: return th.reshape(x.features[:, 0] ** 2 + x.features[:, 1] ** 2, (-1, 1)) class TabularFeaturePreprocessor(StaticPreprocessor): def preprocess(self, tabular_feature: TabularContainer) -> TabularContainer: tabular_feature.data.loc[:, "v1"] = tabular_feature.data["v1"] / 1 tabular_feature.data.loc[:, "v2"] = (tabular_feature.data["v2"] - 1) / 6 return tabular_feature def inverse_transform(self, tabular_feature: TabularContainer) -> TabularContainer: tabular_feature.data.loc[:, "v1"] = tabular_feature.data["v1"] * 1 tabular_feature.data.loc[:, "v2"] = tabular_feature.data["v2"] * 6 + 1 return tabular_feature @pytest.fixture() def data_processor() -> DataProcessor: return DataProcessor( iterator_cls=DummyUdaoIterator, feature_extractors={ "tabular_features": TabularFeatureExtractor( columns=["v1", "v2"], ), "objectives": TabularFeatureExtractor(columns=["objective_input"]), }, feature_preprocessors={"tabular_features": [TabularFeaturePreprocessor()]}, ) @pytest.fixture def mogd() -> MOGD: return MOGD( MOGD.Params( learning_rate=0.1, max_iters=100, patience=10, multistart=2, objective_stress=10, constraint_stress=1e5, device=th.device("cpu"), ) ) @pytest.fixture def two_obj_problem(data_processor: DataProcessor) -> MOProblem: objectives = [ Objective("obj1", minimize=True, function=ObjModel1()), Objective("obj2", minimize=True, function=ObjModel2()), ]
variables: Dict[str, Variable] = {
11
2023-12-20 09:10:42+00:00
16k
XLearning-SCU/2023-TPAMI-SMILE
Net.py
[ { "identifier": "get_dist_release", "path": "DistComput.py", "snippet": "def get_dist_release(loader, dist_path):\r\n if not os.path.exists(dist_path):\r\n # loader = test_loader\r\n num_data = [10]\r\n with torch.no_grad():\r\n dist_list = [[] for i in range(len(num_d...
import math import os import time import warnings import numpy as np import torch import torchvision import torch.nn.functional as F import evaluate import faiss import scipy.io as sio from torch import nn from torch.autograd import Variable from DistComput import get_dist_release from _Utils.Calculator import get_nearest_k from _Utils.Logs import update_log from _Utils.Scatter import visualize2 from _Utils.Visualize import visualize, visual_matrix_console, visualize_image, plot_heat_map from _Utils import TimeOperator, DirectoryOperator from DataSetMaster.dataset import get_clusters from classification import svm_classify from evaluate import UMAP, evaluate2 from sklearn import metrics from munkres import Munkres from figures.ScatterMaster import visual_image_scatter
10,952
# xs_hat=fea1_rec[mask[:, 1] == 1], xs=fea1[mask[:, 1] == 1]).cpu().numpy() # rnmse_vec[1].extend(n1_v0) # rnmse_vec[1].extend(n1_v1) g = torch.concat((torch.zeros(len(fea0), device=fea0.device, dtype=torch.int), torch.ones(len(fea1), device=fea0.device, dtype=torch.int))) h = torch.cat([h0, h1]).detach().cpu().numpy() feature_vec.extend(h) data_vec.extend(torch.cat([fea0, fea1]).detach().cpu().numpy()) group_vec.extend(g.cpu().numpy()) type_vec.extend(torch.concat((class_labels0, class_labels1)).numpy()) inf_data_t = time.time() feature_vec = np.array(feature_vec) data_vec = np.array(data_vec) feature_vec_cluster = np.array(feature_vec_cluster) is_pair_all = np.array(is_pair_all) feature_vec_classification = np.array(feature_vec_classification) group_vec = np.array(group_vec) group_vec_cluster = np.array(group_vec_cluster) type_vec = np.array(type_vec) type_vec_cluster = np.array(type_vec_cluster) rnmse_vec[0] = np.array(rnmse_vec[0]) rnmse_vec[1] = np.array(rnmse_vec[1]) kmeans_time = TimeOperator.Timer() if args.ShowReconstruct: if args.dataset == 'MNISTUSPS': dims = [np.product(d.data.shape[1:]) for d in test_dataloader.dataset.datasets] data_list = [np.asarray(it.data, dtype=np.float32) for it in test_dataloader.dataset.datasets] Y = test_dataloader.dataset.datasets[0].targets else: dims = [d.shape[1] for d in test_dataloader.dataset.data] data_list = [np.asarray(it, dtype=np.float32) for it in test_dataloader.dataset.data] Y = test_dataloader.dataset.class_labels0 mask = test_dataloader.dataset.mask n_per_cat = 10 rec0, rec1 = self.decode([ torch.from_numpy(feature_vec[group_vec == 0]).cuda(), torch.from_numpy(feature_vec[group_vec == 1]).cuda()]) rec0 = rec0.detach().cpu().numpy() rec1 = rec1.detach().cpu().numpy() show_img = np.asarray([]) inds_map = np.asarray([]) for v in range(2): col = np.asarray([]) inds_map_col = np.asarray([]) for y in range(10): inds = np.arange(len(Y))[ np.logical_and(np.logical_and(mask[:, v] == 1, mask[:, 1 - v] == 0), Y == y) ] np.random.shuffle(inds) assert len(inds) >= n_per_cat inds = inds[:n_per_cat] raw_imgs = data_list[v][inds] missing_imgs = data_list[1 - v][inds] rec_imgs = [rec0, rec1][v][inds] rec_imgs_miss = [rec0, rec1][1 - v][inds] pack = np.asarray( [raw_imgs, rec_imgs, missing_imgs, rec_imgs_miss]).reshape([-1, n_per_cat, 28, 28]) if len(col): col = np.concatenate([col, pack], axis=0) else: col = pack if len(inds_map_col): inds_map_col = np.concatenate([inds_map_col, inds.reshape([1, -1])], axis=0) else: inds_map_col = inds.reshape([1, -1]) if len(show_img): show_img = np.concatenate([show_img, col], axis=1) else: show_img = col if len(inds_map): inds_map = np.concatenate([inds_map, inds_map_col], axis=1) else: inds_map = inds_map_col plot_heat_map(inds_map, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecIM.svg') visualize_image(show_img, show=True, fig_path='/xlearning/pengxin/Temp/MissingRec.svg') selected_ind = [ [8, 2, 8, 9, 7, 2, 5, 9, 9, 9], [0, 2, 2, 3, 5, 7, 7, 9, 7, 0], ] # ToMouxin inds_to_mouxin = [ [im[si] for im, si in zip(inds_map[:, :n_per_cat], selected_ind[0])], [im[si] for im, si in zip(inds_map[:, n_per_cat:], selected_ind[1])], ] re_dt = np.load( '/xlearning/pengxin/Checkpoints/MultiClustering/RunSets/230105/IMvC_RunSet0114_Ablation_FakeSampleWise/ --QuickConfig X50C50 --dataset MNISTUSPS --loss_sim_contras 0.02 --seed 1998/SampleCache/Np.npz') np.savez('/xlearning/pengxin/Temp/MNISTUSPS_show.npz', feature_vec=np.asarray([ re_dt['d0_data'][inds_to_mouxin[0]], re_dt['d1_data'][inds_to_mouxin[1]] ])) selected_ind_global = np.concatenate( (np.asarray(selected_ind[0]).reshape([-1, 1]), np.asarray(selected_ind[1]).reshape([-1, 1]) + n_per_cat), axis=1 ) show_img_final = np.concatenate( [show_img[4 * i:4 * i + 4, selected_ind_global[i]] for i in range(len(selected_ind_global))], axis=1 )[:, [i * 2 for i in range(10)] + [i * 2 + 1 for i in range(10)]] visualize_image(show_img_final, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecFinal.svg') return def cluster_and_measure(features, types, groups, row_pred=False): kst = time.time() centroids = torch.from_numpy(kmeans(features, self.class_num)) if args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: centroids = F.normalize(centroids, dim=1) pred_vec = np.argmax(self.soft_ass(torch.from_numpy(features), centroids).numpy(), axis=1)
def show_distribution_ct(type_vec, group_vec, pred_vec, class_num, group_num): v = np.zeros((class_num, class_num, group_num), dtype=int) for t, c, g in zip(type_vec, pred_vec, group_vec): v[t, c, g] += 1 visual_matrix_console(x=v) def kmeans(feature_vec, class_num): d = feature_vec.shape[1] kmeans = faiss.Clustering(d, class_num) kmeans.verbose = False kmeans.niter = 300 kmeans.nredo = 10 # kmeans.spherical = True # if LimitKmeans: # kmeans.max_points_per_centroid = 1000 # kmeans.min_points_per_centroid = 10 res = faiss.StandardGpuResources() cfg = faiss.GpuIndexFlatConfig() cfg.useFloat16 = True cfg.device = 0 index = faiss.GpuIndexFlatL2(res, d, cfg) # print(feature_vec.shape) kmeans.train(feature_vec, index) centroids = faiss.vector_to_array(kmeans.centroids).reshape(class_num, d) return centroids def show_distribution(cluster_vec, group_vec, class_num, group_num): for it in np.arange(group_num): print('{:4d}, '.format(it), end='') print('') cluster_group = torch.zeros((class_num, group_num), dtype=torch.int) for i, j in zip(cluster_vec, group_vec): cluster_group[i, j] += 1 # cluster_group = cluster_group[torch.argsort(torch.sum(cluster_group, dim=1))] for line in cluster_group: print('{:4d}: '.format(torch.sum(line)), end='') for it in line: print('{:4d}, '.format(it), end='') print('') def save_checkpoint(state, epoch): """ it has been trained for *epoch* epochs """ filename = 'Epoch{:03d}.checkpoint'.format(epoch) checkpoint_dir = os.path.join( os.path.dirname(os.getcwd()), 'Checkpoints', filename ) DirectoryOperator.FoldOperator(directory=checkpoint_dir).make_fold() if os.path.exists(checkpoint_dir): warnings.warn('Checkpoint exist and been replaced.({})'.format(checkpoint_dir)) print('Save check point into {}'.format(checkpoint_dir)) torch.save(state, checkpoint_dir) def get_ffn(dims, last_layers=None, with_bn=False, drop_out=0): layers = [] for ind in range(len(dims) - 1): in_dim = dims[ind] out_dim = dims[ind + 1] layers.append(nn.Linear(in_dim, out_dim)) if with_bn: layers.append(nn.BatchNorm1d(out_dim)) layers.append(nn.ReLU()) if drop_out: layers.append(nn.Dropout(drop_out)) if last_layers is not None: layers.extend(last_layers) return nn.Sequential(*layers) def get_cov(dims, strides, last_layers=None, with_bn=False, drop_out=0): layers = [] for ind in range(len(dims) - 1): in_dim = dims[ind] out_dim = dims[ind + 1] stride = strides[ind] # layers.append(nn.Linear(in_dim, out_dim)) if stride >= 0: layers.append(nn.Conv2d(in_dim, out_dim, kernel_size=3, stride=stride, padding=1)) else: layers.append(nn.ConvTranspose2d( in_dim, out_dim, kernel_size=3, stride=-stride, padding=1, output_padding=0 if stride == -1 else 1)) if with_bn: # layers.append(nn.BatchNorm1d(out_dim)) layers.append(nn.BatchNorm2d(out_dim)) layers.append(nn.ReLU()) if drop_out: layers.append(nn.Dropout(drop_out)) if last_layers is not None: layers.extend(last_layers) return nn.Sequential(*layers) class Net(nn.Module): def __init__(self, args, in_dims, class_num, group_num): super(Net, self).__init__() self.encoder_adaption = nn.ModuleList([ get_ffn([in_dims[i], 1024], with_bn=args.BatchNormType[0] == '1', drop_out=args.Dropout) for i in range(group_num if args.GroupWiseLayer[0] == '1' else 1)]) self.encoder = nn.ModuleList([ get_ffn([1024, 1024, 512], with_bn=args.BatchNormType[1] == '1', drop_out=args.Dropout) for _ in range(group_num if args.GroupWiseLayer[1] == '1' else 1)]) if args.representation_dim == 0: args.representation_dim = class_num self.class_num = class_num self.group_num = group_num self.pred_cac = None self.pred_center_cac = None if args.ElActivationType == 'None': el_activation_ = [] elif args.ElActivationType == 'Normalize': el_activation_ = [] elif args.ElActivationType == 'BnNormalize': el_activation_ = [nn.BatchNorm1d(args.representation_dim)] elif args.ElActivationType == 'BnReNormalize': el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()] elif args.ElActivationType == 'BnRe': el_activation_ = [nn.BatchNorm1d(args.representation_dim), nn.ReLU()] else: raise NotImplementedError('') self.el_activation_ = el_activation_ self.encoder_linear = nn.ModuleList([ get_ffn([512, 256], with_bn=args.BatchNormType[2] == '1', drop_out=args.Dropout, last_layers=[nn.Linear(256, args.representation_dim)] + self.el_activation_) for _ in range(group_num if args.GroupWiseLayer[2] == '1' else 1)]) dec_in = args.representation_dim if args.McDecoder: dec_in *= group_num self.dec_in = dec_in self.decoder_linear = nn.ModuleList([ get_ffn([self.dec_in, 256, 512], with_bn=args.BatchNormType[3] == '1', drop_out=args.Dropout) for _ in range(group_num if args.GroupWiseLayer[3] == '1' else 1)]) if args.ActivationType == 'None': final_activation_ = [] elif args.ActivationType == 'Sigmoid': final_activation_ = [nn.Sigmoid()] elif args.ActivationType == 'Tanh': final_activation_ = [nn.Tanh()] else: raise NotImplementedError('') self.final_activation_ = final_activation_ self.decoder = nn.ModuleList([ get_ffn([512, 1024, 1024], with_bn=args.BatchNormType[4] == '1', drop_out=args.Dropout) for _ in range(group_num if args.GroupWiseLayer[4] == '1' else 1)]) self.decoder_adaption = nn.ModuleList([ get_ffn([], last_layers=[nn.Linear(1024, in_dims[i])] + self.final_activation_) for i in range(group_num if args.GroupWiseLayer[5] == '1' else 1)]) self.args = args self.in_dims = in_dims # def update_cluster_center(self, center): # self.cluster_centers = F.normalize(torch.from_numpy(center), dim=1).cuda() def forward(self, x, **kwargs): return self.decode(self.encode([x])) def encode(self, xs: list): hs = [] for g, x in enumerate(xs): if self.args.noise_type == 'None': pass elif self.args.noise_type == 'Drop': x = x * (Variable(x.data.new(x.size()).normal_(0, 0.1)) < self.args.noise_weight).type_as(x) elif self.args.noise_type == 'Add': x = x + Variable(x.data.new(x.size()).normal_(0, self.args.noise_weight)).type_as(x) else: raise NotImplementedError('') if len(x) != 0: if len(x) == 1: x = torch.concat([x, x]) # print(x.shape) # x = x.view((len(x), -1)) # print(x.shape) x = self.encoder_adaption[g if self.args.GroupWiseLayer[0] == '1' else 0](x) x = self.encoder[g if self.args.GroupWiseLayer[1] == '1' else 0](x) x = self.encoder_linear[g if self.args.GroupWiseLayer[2] == '1' else 0](x) if len(x) == 1: x = x[[0]] if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: x = F.normalize(x, dim=1) else: x = torch.zeros([0, self.args.representation_dim], device=torch.device('cuda:0')) hs.append(x) return hs def soft_ass(self, h, centroids): if self.args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: return h @ centroids.T else: dst = torch.cdist(h, centroids) # return (torch.mean(dst) - dst) / (torch.amax(dst) - torch.amin(dst)) * 2 return -dst / 2 # def encode_class(self, hs): # cs = [] # for h in hs: # c = h @ self.cluster_centers.T # cs.append(c) # return cs def decode(self, hs): xs = [] for g, h in enumerate(hs): if self.args.McDecoder: h = torch.cat(hs, dim=1) if len(h) != 0: if len(h) == 1: h = torch.concat([h, h]) h = self.decoder_linear[g if self.args.GroupWiseLayer[3] == '1' else 0](h) h = self.decoder[g if self.args.GroupWiseLayer[4] == '1' else 0](h) h = self.decoder_adaption[g if self.args.GroupWiseLayer[5] == '1' else 0](h) if len(h) == 1: h = h[[0]] else: h = torch.zeros([0, self.in_dims[g]], device=torch.device('cuda:0')) xs.append(h) return xs def run(self, epochs, train_dataloader, test_dataloader, args): # if args.loss_self_cons: # clusters = get_clusters(args=args) optimizer_g = torch.optim.Adam( self.parameters(), lr=args.LearnRate, betas=(args.betas_a, args.betas_v), weight_decay=args.WeightDecay ) mse_loss = nn.MSELoss().cuda() timer_all = TimeOperator.Timer() timer_train = TimeOperator.Timer() timer_save = TimeOperator.Timer() ce_loss = nn.CrossEntropyLoss().cuda() type_detail_shown = False start_epoch = 0 if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) # if args.gpu is None: # checkpoint = torch.load(args.resume) # else: # # Map model to be loaded to specified single gpu. # loc = 'cuda:{}'.format(args.gpu) # checkpoint = torch.load(args.resume, map_location=loc) start_epoch = checkpoint['epoch'] self.load_state_dict(checkpoint['state_dict']) optimizer_g.load_state_dict(checkpoint['optimizer']['optimizer_g']) # self.__dict__ = checkpoint['self_dic'] print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) # self.args = args # warnings.warn('This is not equal to start from the beginning due to different rands states.') # else: raise NotImplementedError("=> no checkpoint found at '{}'".format(args.resume)) if args.CodeTest: args.train_epoch = start_epoch + 1 epochs = start_epoch + 1 best_acc = 0 for epoch in range(start_epoch, epochs): if (epoch + 1) <= args.LearnRateWarm: lr = args.LearnRate * (epoch + 1) / args.LearnRateWarm else: if args.LearnRateDecayType == 'None': lr = args.LearnRate elif args.LearnRateDecayType == 'Exp': lr = args.LearnRate * ((1 + 10 * (epoch + 1 - args.LearnRateWarm) / ( args.train_epoch - args.LearnRateWarm)) ** -0.75) elif args.LearnRateDecayType == 'Cosine': lr = args.LearnRate * 0.5 * (1. + math.cos( math.pi * (epoch + 1 - args.LearnRateWarm) / (args.train_epoch - args.LearnRateWarm))) else: raise NotImplementedError('args.LearnRateDecayType') if lr != args.LearnRate: def adjust_learning_rate(optimizer): print('adjust_learning_rate: {}'.format(lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr adjust_learning_rate(optimizer_g) timer_all_time = time.time() # inf_t = time.time() # print('start epoch {}'.format(epoch)) self.eval() feature_vec, type_vec, group_vec = [], [], [] feature_vec_cluster = [] group_vec_cluster = [] feature_vec_classification = [] type_vec_cluster = [] data_vec = [] is_pair_all = [] timer_infer_data = TimeOperator.Timer() rnmse_vec = [[], []] # mask = 0 1 with torch.no_grad(): inf_data_t = time.time() for (fea0, fea1, class_labels0, class_labels1, mask, is_pair, index) in test_dataloader: timer_infer_data.update(time.time() - inf_data_t) # timer_infer_data.show(prefix='InferDataTime', total_count=len(test_dataloader), # print_end_time=False) fea0 = fea0.cuda() fea1 = fea1.cuda() if args.Rev: h1, h0 = self.encode([fea0, fea1]) if args.SingleView != -1: for v in range(len(mask[0])): if v != 1 - args.SingleView: mask[:, v] = 0 else: h0, h1 = self.encode([fea0, fea1]) if args.SingleView != -1: for v in range(len(mask[0])): if v != args.SingleView: mask[:, v] = 0 cluster_h0 = h0[mask[:, 0] == 1] cluster_h1 = h1[mask[:, 1] == 1] # if args.SingleView != -1: # mask[:, args.SingleView] = 0 # # if args.SingleView == 0: # # cluster_h1 = cluster_h1[[]] # # class_labels1 = class_labels1[[]] # # elif args.SingleView == 1: # # class_labels0 = class_labels0[[]] # # cluster_h0 = cluster_h0[[]] # # else: # # raise NotImplementedError('') is_pair_all.extend(is_pair) feature_vec_cluster.extend(torch.cat([cluster_h0, cluster_h1]).detach().cpu().numpy()) group_vec_cluster.extend(torch.concat((torch.zeros(len(cluster_h0), dtype=torch.int), torch.ones(len(cluster_h1), dtype=torch.int))).numpy()) type_vec_cluster.extend(torch.concat((class_labels0[mask[:, 0] == 1], class_labels1[mask[:, 1] == 1])).numpy()) feature_vec_classification.extend(torch.cat([h0, h1]).detach().cpu().numpy()) if (epoch + 1) == epochs or (epoch + 1) % args.VisualFreq == 0: if torch.sum(torch.logical_not(torch.logical_or(mask[:, 1], mask[:, 0]))): raise NotImplementedError('存在一个pair两个模态都缺失') if args.reFill == 'Copy': if torch.sum(mask[:, 0] == 0): h0[mask[:, 0] == 0] = h1[mask[:, 0] == 0] if torch.sum(mask[:, 1] == 0): h1[mask[:, 1] == 0] = h0[mask[:, 1] == 0] elif args.reFill == 'Center': # raise NotImplementedError('') if self.pred_center_cac is None: pass warnings.warn('self.pred_center_cac == None') else: centors = torch.zeros((len(mask), 2, len(self.pred_center_cac[0]))).cuda() centors[mask[:, 0] == 1, 0] = self.pred_center_cac[ self.pred_cac[:torch.sum(mask[:, 0] == 1)]] centors[mask[:, 1] == 1, 1] = self.pred_center_cac[ self.pred_cac[torch.sum(mask[:, 0] == 1):]] if torch.sum(mask[:, 0] == 0): h0[mask[:, 0] == 0] = centors[mask[:, 0] == 0, 1] if torch.sum(mask[:, 1] == 0): h1[mask[:, 1] == 0] = centors[mask[:, 1] == 0, 0] elif args.reFill == 'KnnMapMean': if torch.sum(mask[:, 0] == 0): nearest = get_nearest_k(h1[mask[:, 0] == 0], h1[is_pair], args.reAlignK) h0p = h0[is_pair] h1[mask[:, 0] == 0] = torch.cat([torch.mean(h0p[ns], dim=0) for ns in nearest]) if torch.sum(mask[:, 1] == 0): nearest = get_nearest_k(h0[mask[:, 1] == 0], h0[is_pair], args.reAlignK) h1p = h1[is_pair] h1[mask[:, 1] == 0] = torch.cat([torch.mean(h1p[ns], dim=0) for ns in nearest]) # raise NotImplementedError('') elif args.reFill == 'KnnMean': # 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean if torch.sum(mask[:, 1] == 0): hs0 = h0[mask[:, 1] == 0] he1 = h1[mask[:, 1] == 1] nearest = get_nearest_k(hs0, he1, args.reAlignK) # nearest = torch.argsort(torch.cdist(hs0.cpu(), he1.cpu()), dim=1)[:, :args.reAlignK] h1[mask[:, 1] == 0] = torch.cat([torch.mean(he1[ns], dim=0) for ns in nearest]) # class_labels1[mask[:, 1] == 0] = class_labels1[mask[:, 1] == 1][nearest[:, 0]] if torch.sum(mask[:, 0] == 0): hs1 = h1[mask[:, 0] == 0] he0 = h0[mask[:, 0] == 1] nearest = get_nearest_k(hs1, he0, args.reAlignK) # nearest = torch.argsort(torch.cdist(hs1.cpu(), he0.cpu()), dim=1)[:, :args.reAlignK] h0[mask[:, 0] == 0] = torch.cat([torch.mean(he0[ns], dim=0) for ns in nearest]) # class_labels0[mask[:, 0] == 0] = class_labels0[mask[:, 0] == 1][nearest[:, 0]] ############################################################### # 缺失补全, xi2 = mean(离xi1最近的k个view2的点) # fill_num = k # C = euclidean_dist(h0, h1) # row_idx = C.argsort() # col_idx = (C.t()).argsort() # # Mij denotes the flag of i-th sample in view 0 and j-th sample in view 1 # M = torch.logical_and((mask[:, 0].repeat(test_num, 1)).t(), mask[:, 1].repeat(test_num, 1)) # for i in range(test_num): # idx0 = col_idx[i, :][ # M[col_idx[i, :], i]] # idx for view 0 to sort and find the non-missing neighbors # idx1 = row_idx[i, :][ # M[i, row_idx[i, :]]] # idx for view 1 to sort and find the non-missing neighbors # if len(idx1) != 0 and len(idx0) == 0: # i-th sample in view 1 is missing # avg_fill = h1[idx1[0:fill_num], :].sum(dim=0) / fill_num # cnt += (class_labels1[idx1[0:fill_num]] == class_labels1[i]).sum() # missing_cnt += 1 # recover_out0[i, :] = h0[i, :] # recover_out1[i, :] = avg_fill # missing # elif len(idx0) != 0 and len(idx1) == 0: # avg_fill = h0[idx0[0:fill_num], :].sum(dim=0) / fill_num # cnt += (class_labels0[idx0[0:fill_num]] == class_labels0[i]).sum() # missing_cnt += 1 # recover_out0[i, :] = avg_fill # missing # recover_out1[i, :] = h1[i, :] # elif len(idx0) != 0 and len(idx1) != 0: # recover_out0[i, :] = h0[i, :] # recover_out1[i, :] = h1[i, :] # else: # raise Exception('error') # if setting == 1: # align_out0.extend((recover_out0.cpu()).numpy()) # align_out1.extend((recover_out1.cpu()).numpy()) # continue # else: raise NotImplementedError('') to_realign = torch.logical_and(is_pair == 0, torch.logical_and(mask[:, 1], mask[:, 0])) if args.reAlign == 'KnnMean': # 关联对齐, xi1 不变, xi2替换成离xi1最近的k个view2的点的mean if torch.sum(to_realign): ha1 = h1[to_realign] nearest = get_nearest_k(h0[to_realign], ha1, args.reAlignK) # dist = torch.cdist(h0[to_realign].cpu(), ha1.cpu()) # nearest = torch.argsort(dist, dim=1)[:, :args.reAlignK] h1[to_realign] = torch.cat([torch.mean(ha1[ns], dim=0) for ns in nearest]) # class_labels1[is_pair == 0] = class_labels1[is_pair == 0][nearest[:, 0]] elif args.reAlign == 'Copy': if torch.sum(to_realign): h1[to_realign] = h0[to_realign] # class_labels1[is_pair == 0] = class_labels0[is_pair == 0] elif args.reAlign == 'KnnMapMean': if torch.sum(to_realign): targ_v1 = h1[is_pair] nearest = get_nearest_k(h0[to_realign], h0[is_pair], args.reAlignK) h1[to_realign] = torch.cat([torch.mean(targ_v1[ns], dim=0) for ns in nearest]) # class_labels1[is_pair == 0] = ... elif args.reAlign == 'Ignore': pass else: raise NotImplementedError('') if args.Rev: fea0_rec, fea1_rec = self.decode([h1, h0]) else: fea0_rec, fea1_rec = self.decode([h0, h1]) # if len(fea0_rec[0]) == len(fea1_rec[0]): # fea_rec = torch.concat([fea0_rec, fea1_rec]) # fea = torch.concat([fea0, fea1]) # mask_c = torch.concat([mask[:, 0], mask[:, 1]]) # if torch.sum(mask_c == 0): # rnmse_vec[0].extend( # evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 0], xs=fea[mask_c == 0]).cpu().numpy()) # if torch.sum(mask_c == 1): # rnmse_vec[1].extend( # evaluate.get_rnmse(xs_hat=fea_rec[mask_c == 1], xs=fea[mask_c == 1]).cpu().numpy()) # else: # if torch.sum(mask == 0): # n0_v0 = evaluate.get_rnmse( # xs_hat=fea0_rec[mask[:, 0] == 0], xs=fea0[mask[:, 0] == 0]).cpu().numpy() # n0_v1 = evaluate.get_rnmse( # xs_hat=fea1_rec[mask[:, 1] == 0], xs=fea1[mask[:, 1] == 0]).cpu().numpy() # rnmse_vec[0].extend(n0_v0) # rnmse_vec[0].extend(n0_v1) # if torch.sum(mask == 1): # n1_v0 = evaluate.get_rnmse( # xs_hat=fea0_rec[mask[:, 0] == 1], xs=fea0[mask[:, 0] == 1]).cpu().numpy() # n1_v1 = evaluate.get_rnmse( # xs_hat=fea1_rec[mask[:, 1] == 1], xs=fea1[mask[:, 1] == 1]).cpu().numpy() # rnmse_vec[1].extend(n1_v0) # rnmse_vec[1].extend(n1_v1) g = torch.concat((torch.zeros(len(fea0), device=fea0.device, dtype=torch.int), torch.ones(len(fea1), device=fea0.device, dtype=torch.int))) h = torch.cat([h0, h1]).detach().cpu().numpy() feature_vec.extend(h) data_vec.extend(torch.cat([fea0, fea1]).detach().cpu().numpy()) group_vec.extend(g.cpu().numpy()) type_vec.extend(torch.concat((class_labels0, class_labels1)).numpy()) inf_data_t = time.time() feature_vec = np.array(feature_vec) data_vec = np.array(data_vec) feature_vec_cluster = np.array(feature_vec_cluster) is_pair_all = np.array(is_pair_all) feature_vec_classification = np.array(feature_vec_classification) group_vec = np.array(group_vec) group_vec_cluster = np.array(group_vec_cluster) type_vec = np.array(type_vec) type_vec_cluster = np.array(type_vec_cluster) rnmse_vec[0] = np.array(rnmse_vec[0]) rnmse_vec[1] = np.array(rnmse_vec[1]) kmeans_time = TimeOperator.Timer() if args.ShowReconstruct: if args.dataset == 'MNISTUSPS': dims = [np.product(d.data.shape[1:]) for d in test_dataloader.dataset.datasets] data_list = [np.asarray(it.data, dtype=np.float32) for it in test_dataloader.dataset.datasets] Y = test_dataloader.dataset.datasets[0].targets else: dims = [d.shape[1] for d in test_dataloader.dataset.data] data_list = [np.asarray(it, dtype=np.float32) for it in test_dataloader.dataset.data] Y = test_dataloader.dataset.class_labels0 mask = test_dataloader.dataset.mask n_per_cat = 10 rec0, rec1 = self.decode([ torch.from_numpy(feature_vec[group_vec == 0]).cuda(), torch.from_numpy(feature_vec[group_vec == 1]).cuda()]) rec0 = rec0.detach().cpu().numpy() rec1 = rec1.detach().cpu().numpy() show_img = np.asarray([]) inds_map = np.asarray([]) for v in range(2): col = np.asarray([]) inds_map_col = np.asarray([]) for y in range(10): inds = np.arange(len(Y))[ np.logical_and(np.logical_and(mask[:, v] == 1, mask[:, 1 - v] == 0), Y == y) ] np.random.shuffle(inds) assert len(inds) >= n_per_cat inds = inds[:n_per_cat] raw_imgs = data_list[v][inds] missing_imgs = data_list[1 - v][inds] rec_imgs = [rec0, rec1][v][inds] rec_imgs_miss = [rec0, rec1][1 - v][inds] pack = np.asarray( [raw_imgs, rec_imgs, missing_imgs, rec_imgs_miss]).reshape([-1, n_per_cat, 28, 28]) if len(col): col = np.concatenate([col, pack], axis=0) else: col = pack if len(inds_map_col): inds_map_col = np.concatenate([inds_map_col, inds.reshape([1, -1])], axis=0) else: inds_map_col = inds.reshape([1, -1]) if len(show_img): show_img = np.concatenate([show_img, col], axis=1) else: show_img = col if len(inds_map): inds_map = np.concatenate([inds_map, inds_map_col], axis=1) else: inds_map = inds_map_col plot_heat_map(inds_map, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecIM.svg') visualize_image(show_img, show=True, fig_path='/xlearning/pengxin/Temp/MissingRec.svg') selected_ind = [ [8, 2, 8, 9, 7, 2, 5, 9, 9, 9], [0, 2, 2, 3, 5, 7, 7, 9, 7, 0], ] # ToMouxin inds_to_mouxin = [ [im[si] for im, si in zip(inds_map[:, :n_per_cat], selected_ind[0])], [im[si] for im, si in zip(inds_map[:, n_per_cat:], selected_ind[1])], ] re_dt = np.load( '/xlearning/pengxin/Checkpoints/MultiClustering/RunSets/230105/IMvC_RunSet0114_Ablation_FakeSampleWise/ --QuickConfig X50C50 --dataset MNISTUSPS --loss_sim_contras 0.02 --seed 1998/SampleCache/Np.npz') np.savez('/xlearning/pengxin/Temp/MNISTUSPS_show.npz', feature_vec=np.asarray([ re_dt['d0_data'][inds_to_mouxin[0]], re_dt['d1_data'][inds_to_mouxin[1]] ])) selected_ind_global = np.concatenate( (np.asarray(selected_ind[0]).reshape([-1, 1]), np.asarray(selected_ind[1]).reshape([-1, 1]) + n_per_cat), axis=1 ) show_img_final = np.concatenate( [show_img[4 * i:4 * i + 4, selected_ind_global[i]] for i in range(len(selected_ind_global))], axis=1 )[:, [i * 2 for i in range(10)] + [i * 2 + 1 for i in range(10)]] visualize_image(show_img_final, show=True, fig_path='/xlearning/pengxin/Temp/MissingRecFinal.svg') return def cluster_and_measure(features, types, groups, row_pred=False): kst = time.time() centroids = torch.from_numpy(kmeans(features, self.class_num)) if args.ElActivationType in ['Normalize', 'BnNormalize', 'BnReNormalize']: centroids = F.normalize(centroids, dim=1) pred_vec = np.argmax(self.soft_ass(torch.from_numpy(features), centroids).numpy(), axis=1)
pred_adjusted, met = evaluate2(features, pred_vec, types, groups)
13
2023-12-21 08:50:36+00:00
16k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/connection.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-in...
import datetime import logging import os import re import socket import sys import typing import warnings import ssl from http.client import HTTPConnection as _HTTPConnection from http.client import HTTPException as HTTPException # noqa: F401 from http.client import ResponseNotReady from socket import timeout as SocketTimeout from typing import Literal from .response import HTTPResponse from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT from .util.ssltransport import SSLTransport from ._collections import HTTPHeaderDict from .util.response import assert_header_parsing from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout from .util.util import to_str from .util.wait import wait_for_read from ._base_connection import _TYPE_BODY from ._base_connection import ProxyConfig as ProxyConfig from ._base_connection import _ResponseOptions as _ResponseOptions from ._version import __version__ from .exceptions import ( ConnectTimeoutError, HeaderParsingError, NameResolutionError, NewConnectionError, ProxyError, SystemTimeWarning, ) from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_ from .util.request import body_to_chunks from .util.ssl_ import assert_fingerprint as _assert_fingerprint from .util.ssl_ import ( create_urllib3_context, is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .util.ssl_match_hostname import CertificateError, match_hostname from .util.url import Url from .response import HTTPResponse
13,429
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass
from ._base_connection import ProxyConfig as ProxyConfig
8
2023-12-16 04:12:01+00:00
16k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\...
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,390
'%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]],
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]],
video: VideoStream,
7
2023-10-25 02:50:01+00:00
16k
EulerSearch/embedding_studio
embedding_studio/models/plugin.py
[ { "identifier": "ClickstreamParser", "path": "embedding_studio/embeddings/data/clickstream/parsers/parser.py", "snippet": "class ClickstreamParser(object):\n # TODO: annotate types precisely\n def __init__(\n self,\n query_item_type: type,\n search_result_type: type,\n ...
from dataclasses import dataclass from typing import Any, Dict, List, Optional from pydantic import BaseModel from embedding_studio.embeddings.data.clickstream.parsers.parser import ( ClickstreamParser, ) from embedding_studio.embeddings.data.clickstream.query_retriever import ( QueryRetriever, ) from embedding_studio.embeddings.data.clickstream.splitter import ( ClickstreamSessionsSplitter, ) from embedding_studio.embeddings.data.loaders.data_loader import DataLoader from embedding_studio.embeddings.data.ranking_data import RankingData from embedding_studio.embeddings.data.storages.producer import ( ItemStorageProducer, ) from embedding_studio.embeddings.data.utils.fields_normalizer import ( DatasetFieldsNormalizer, ) from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricsAccumulator, )
10,896
class PluginMeta(BaseModel): name: str version: str = "1.0.0" description: Optional[str] = None @dataclass class FineTuningBuilder: data_loader: DataLoader query_retriever: QueryRetriever clickstream_parser: ClickstreamParser clickstream_sessions_splitter: ClickstreamSessionsSplitter dataset_fields_normalizer: DatasetFieldsNormalizer item_storage_producer: ItemStorageProducer
class PluginMeta(BaseModel): name: str version: str = "1.0.0" description: Optional[str] = None @dataclass class FineTuningBuilder: data_loader: DataLoader query_retriever: QueryRetriever clickstream_parser: ClickstreamParser clickstream_sessions_splitter: ClickstreamSessionsSplitter dataset_fields_normalizer: DatasetFieldsNormalizer item_storage_producer: ItemStorageProducer
accumulators: List[MetricsAccumulator]
9
2023-10-31 00:33:13+00:00
16k
nv-tlabs/vid2player3d
uhc/smpllib/smpl_local_robot.py
[ { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_local.py", "snippet": "class Skeleton:\n def __init__(\n self, template_dir=\"/hdd/zen/dev/copycat/Copycat/assets/bigfoot_template_v1.pkl\"\n ):\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1...
import os import sys import time import argparse import torch import os.path as osp import mujoco_py import numpy as np import math import uuid import atexit import shutil from copy import deepcopy from lxml.etree import XMLParser, parse, Element, SubElement from lxml import etree from io import BytesIO from scipy.spatial import ConvexHull from stl import mesh from mujoco_py import load_model_from_path, MjSim, MjViewer from uhc.khrylib.mocap.skeleton_local import Skeleton from uhc.khrylib.mocap.skeleton_mesh_local import Skeleton as SkeletonMesh from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.utils.geom import quadric_mesh_decimation from uhc.utils.flags import flags
12,711
sys.path.append(os.getcwd()) def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[ None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix = None, verbose=False, min_num_vert = 50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue vert = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(vert) norm_verts = vert - smpl_jts[jind] norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": vert, "hull": hull, } # print(jname, hull.simplices.shape[0]) center = vert[hull.vertices].mean(axis=0) jgeom = mesh.Mesh(np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = vert[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert)
sys.path.append(os.getcwd()) def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[ None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix = None, verbose=False, min_num_vert = 50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue vert = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(vert) norm_verts = vert - smpl_jts[jind] norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": vert, "hull": hull, } # print(jname, hull.simplices.shape[0]) center = vert[hull.vertices].mean(axis=0) jgeom = mesh.Mesh(np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = vert[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert)
quadric_mesh_decimation(fname, reduction_rate, verbose=verbose)
5
2023-10-30 20:43:43+00:00
16k
masked-spacetime-hashing/msth
MSTH/SpaceTimeHashing/trainer.py
[ { "identifier": "ExperimentConfig", "path": "nerfstudio/configs/experiment_config.py", "snippet": "class ExperimentConfig(InstantiateConfig):\n \"\"\"Full config contents for running an experiment. Any experiment types (like training) will be\n subclassed from this, and must have their _target fie...
import dataclasses import functools import os import time import numpy as np import torch import yappi import wandb from dataclasses import dataclass, field from pathlib import Path from typing import Dict, List, Optional, Tuple, Type, Union from rich.console import Console from torch.cuda.amp.grad_scaler import GradScaler from typing_extensions import Literal from nerfstudio.configs.experiment_config import ExperimentConfig from nerfstudio.engine.callbacks import ( TrainingCallback, TrainingCallbackAttributes, TrainingCallbackLocation, ) from nerfstudio.engine.optimizers import Optimizers from nerfstudio.pipelines.base_pipeline import VanillaPipeline from nerfstudio.utils import profiler, writer from nerfstudio.utils.decorators import ( check_eval_enabled, check_main_thread, check_viewer_enabled, ) from nerfstudio.utils.misc import step_check from nerfstudio.utils.writer import EventName, TimeWriter from nerfstudio.viewer.server import viewer_utils from MSTH.utils import Timer from MSTH.video_pipeline import ( VideoPipeline, VideoPipelineConfig, SpaceTimeDataManagerConfig, SpaceTimePipelineConfig, SpaceTimePipeline, ) from nerfstudio.engine.trainer import Trainer, TrainerConfig
13,359
from __future__ import annotations CONSOLE = Console(width=120) TRAIN_INTERATION_OUTPUT = Tuple[ # pylint: disable=invalid-name torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor] ] TORCH_DEVICE = Union[torch.device, str] # pylint: disable=invalid-name @dataclass class SpaceTimeHashingTrainerConfig(TrainerConfig): """Configuration for training regimen""" _target: Type = field(default_factory=lambda: SpaceTimeHashingTrainer) pipeline: SpaceTimePipelineConfig """target class to instantiate""" steps_per_save: int = 1000 """Number of steps between saves.""" steps_per_eval_batch: int = 500 """Number of steps between randomly sampled batches of rays.""" steps_per_eval_image: int = 2000 """Number of steps between single eval images.""" steps_per_eval_all_images: int = 25000 """Number of steps between eval all images.""" max_num_iterations: int = 1000000 """Maximum number of iterations to run.""" mixed_precision: bool = False """Whether or not to use mixed precision for training.""" save_only_latest_checkpoint: bool = True """Whether to only save the latest checkpoint or all checkpoints.""" # optional parameters if we want to resume training load_dir: Optional[Path] = None """Optionally specify a pre-trained model directory to load from.""" load_step: Optional[int] = None """Optionally specify model step to load from; if none, will find most recent model in load_dir.""" load_config: Optional[Path] = None """Path to config YAML file.""" log_gradients: bool = False """Optionally log gradients during training""" wandb_name: str = "none" steps_full_video: int = 10000000000 eval_total_frames: Optional[int] = None save_eval_video: bool = False render_camera_offset: Optional[List[float]] = None class SpaceTimeHashingTrainer(Trainer): config: SpaceTimeHashingTrainerConfig pipeline: SpaceTimePipeline optimizers: Optimizers callbacks: List[TrainingCallback] @profiler.time_function def train_iteration(self, step: int) -> TRAIN_INTERATION_OUTPUT: """Run one iteration with a batch of inputs. Returns dictionary of model losses. Args: step: Current training step. """ self.optimizers.zero_grad_all() self.pipeline.train() cpu_or_cuda_str: str = self.device.split(":")[0] with torch.autocast(device_type=cpu_or_cuda_str, enabled=self.mixed_precision): _, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step) loss = functools.reduce(torch.add, loss_dict.values()) self.grad_scaler.scale(loss).backward() # type: ignore # TODO remove this # self.pipeline.model.field.mlp_base.spatial_net.grad_total_variation(weight=1e-2, B=10000) # print(self.pipeline.get_param_groups()["proposal_networks"][0].grad) self.optimizers.optimizer_scaler_step_all(self.grad_scaler) if self.config.log_gradients: total_grad = 0 for tag, value in self.pipeline.model.named_parameters(): assert tag != "Total" if value.grad is not None: grad = value.grad.norm() metrics_dict[f"Gradients/{tag}"] = grad total_grad += grad metrics_dict["Gradients/Total"] = total_grad self.grad_scaler.update() self.optimizers.scheduler_step_all(step) # Merging loss and metrics dict into a single output. return loss, loss_dict, metrics_dict @check_eval_enabled @profiler.time_function def eval_iteration(self, step: int) -> None: """Run one iteration with different batch/image/all image evaluations depending on step size. Args: step: Current training step. """ # a batch of eval rays # if step_check(step, self.config.steps_per_eval_batch): # _, eval_loss_dict, eval_metrics_dict = self.pipeline.get_eval_loss_dict(step=step) # eval_loss = functools.reduce(torch.add, eval_loss_dict.values()) # writer.put_scalar(name="Eval Loss", scalar=eval_loss, step=step) # writer.put_dict(name="Eval Loss Dict", scalar_dict=eval_loss_dict, step=step) # writer.put_dict(name="Eval Metrics Dict", scalar_dict=eval_metrics_dict, step=step) # one eval image
from __future__ import annotations CONSOLE = Console(width=120) TRAIN_INTERATION_OUTPUT = Tuple[ # pylint: disable=invalid-name torch.Tensor, Dict[str, torch.Tensor], Dict[str, torch.Tensor] ] TORCH_DEVICE = Union[torch.device, str] # pylint: disable=invalid-name @dataclass class SpaceTimeHashingTrainerConfig(TrainerConfig): """Configuration for training regimen""" _target: Type = field(default_factory=lambda: SpaceTimeHashingTrainer) pipeline: SpaceTimePipelineConfig """target class to instantiate""" steps_per_save: int = 1000 """Number of steps between saves.""" steps_per_eval_batch: int = 500 """Number of steps between randomly sampled batches of rays.""" steps_per_eval_image: int = 2000 """Number of steps between single eval images.""" steps_per_eval_all_images: int = 25000 """Number of steps between eval all images.""" max_num_iterations: int = 1000000 """Maximum number of iterations to run.""" mixed_precision: bool = False """Whether or not to use mixed precision for training.""" save_only_latest_checkpoint: bool = True """Whether to only save the latest checkpoint or all checkpoints.""" # optional parameters if we want to resume training load_dir: Optional[Path] = None """Optionally specify a pre-trained model directory to load from.""" load_step: Optional[int] = None """Optionally specify model step to load from; if none, will find most recent model in load_dir.""" load_config: Optional[Path] = None """Path to config YAML file.""" log_gradients: bool = False """Optionally log gradients during training""" wandb_name: str = "none" steps_full_video: int = 10000000000 eval_total_frames: Optional[int] = None save_eval_video: bool = False render_camera_offset: Optional[List[float]] = None class SpaceTimeHashingTrainer(Trainer): config: SpaceTimeHashingTrainerConfig pipeline: SpaceTimePipeline optimizers: Optimizers callbacks: List[TrainingCallback] @profiler.time_function def train_iteration(self, step: int) -> TRAIN_INTERATION_OUTPUT: """Run one iteration with a batch of inputs. Returns dictionary of model losses. Args: step: Current training step. """ self.optimizers.zero_grad_all() self.pipeline.train() cpu_or_cuda_str: str = self.device.split(":")[0] with torch.autocast(device_type=cpu_or_cuda_str, enabled=self.mixed_precision): _, loss_dict, metrics_dict = self.pipeline.get_train_loss_dict(step=step) loss = functools.reduce(torch.add, loss_dict.values()) self.grad_scaler.scale(loss).backward() # type: ignore # TODO remove this # self.pipeline.model.field.mlp_base.spatial_net.grad_total_variation(weight=1e-2, B=10000) # print(self.pipeline.get_param_groups()["proposal_networks"][0].grad) self.optimizers.optimizer_scaler_step_all(self.grad_scaler) if self.config.log_gradients: total_grad = 0 for tag, value in self.pipeline.model.named_parameters(): assert tag != "Total" if value.grad is not None: grad = value.grad.norm() metrics_dict[f"Gradients/{tag}"] = grad total_grad += grad metrics_dict["Gradients/Total"] = total_grad self.grad_scaler.update() self.optimizers.scheduler_step_all(step) # Merging loss and metrics dict into a single output. return loss, loss_dict, metrics_dict @check_eval_enabled @profiler.time_function def eval_iteration(self, step: int) -> None: """Run one iteration with different batch/image/all image evaluations depending on step size. Args: step: Current training step. """ # a batch of eval rays # if step_check(step, self.config.steps_per_eval_batch): # _, eval_loss_dict, eval_metrics_dict = self.pipeline.get_eval_loss_dict(step=step) # eval_loss = functools.reduce(torch.add, eval_loss_dict.values()) # writer.put_scalar(name="Eval Loss", scalar=eval_loss, step=step) # writer.put_dict(name="Eval Loss Dict", scalar_dict=eval_loss_dict, step=step) # writer.put_dict(name="Eval Metrics Dict", scalar_dict=eval_metrics_dict, step=step) # one eval image
if step_check(step, self.config.steps_per_eval_image):
11
2023-10-26 04:39:15+00:00
16k
Trustworthy-AI-Group/TransferAttack
transferattack/model_related/ghost.py
[ { "identifier": "Attack", "path": "transferattack/attack.py", "snippet": "class Attack(object):\n \"\"\"\n Base class for all attacks.\n \"\"\"\n def __init__(self, attack, model_name, epsilon, targeted, random_start, norm, loss, device=None):\n \"\"\"\n Initialize the hyperpar...
from ..utils import * from ..attack import Attack from .ghost_networks.resnet import ghost_resnet101, ghost_resnet152 from ..gradient.mifgsm import MIFGSM from ..gradient.nifgsm import NIFGSM from ..gradient.vmifgsm import VMIFGSM from ..input_transformation.dim import DIM from ..input_transformation.tim import TIM from ..input_transformation.sim import SIM from ..input_transformation.admix import Admix from torch import Tensor from ..utils import * from ..gradient.mifgsm import MIFGSM from ..gradient.nifgsm import NIFGSM from ..input_transformation.dim import DIM from ..input_transformation.tim import TIM from ..input_transformation.sim import SIM from ..input_transformation.admix import Admix
11,663
# example bash: python main.py --attack=ghost_network support_models = { "resnet101": ghost_resnet101, "resnet152": ghost_resnet152, } class GhostNetwork_MIFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_IFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) self.decay = 0. def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_NIFGSM(NIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_VMIFGSM(VMIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model
# example bash: python main.py --attack=ghost_network support_models = { "resnet101": ghost_resnet101, "resnet152": ghost_resnet152, } class GhostNetwork_MIFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_IFGSM(MIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) self.decay = 0. def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_NIFGSM(NIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model_name='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model_name, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model class GhostNetwork_VMIFGSM(VMIFGSM): """ Ghost Network Attack: Arguments: model (str): the surrogate model for attack. ghost_keep_prob (float): the dropout rate when generating ghost networks. ghost_random_range (float): the dropout rate when generating ghost networks of residual structure. """ def __init__(self, model='inc_v3', ghost_keep_prob=0.994, ghost_random_range=0.16, *args, **kwargs): self.ghost_keep_prob = ghost_keep_prob # do not use self.ghost_random_range = ghost_random_range # do not use super().__init__(model, *args, **kwargs) def load_model(self, model_name): if model_name in support_models.keys(): # The ghost_keep_prob and ghost_random_range are correctly set as param default value, # in the __init__ function of each GhostNetwork. model = wrap_model(support_models[model_name](weights='DEFAULT').eval().cuda()) else: raise ValueError('Model {} not supported for GhostNetwork'.format(model_name)) return model
class GhostNetwork_DIM(DIM):
12
2023-10-31 03:43:26+00:00
16k
chenruduan/OAReactDiff
demo.py
[ { "identifier": "LEFTNet", "path": "oa_reactdiff/model/leftnet.py", "snippet": "class LEFTNet(torch.nn.Module):\n r\"\"\"\n LEFTNet\n\n Args:\n pos_require_grad (bool, optional): If set to :obj:`True`, will require to take derivative of model output with respect to the atomic positions. ...
import torch import py3Dmol import numpy as np import plotly.express as px import json from typing import Optional from torch import tensor from e3nn import o3 from torch_scatter import scatter_mean from oa_reactdiff.model import LEFTNet from oa_reactdiff.tests.model.utils import ( generate_full_eij, get_cut_graph_mask, ) from torch.utils.data import DataLoader from oa_reactdiff.trainer.pl_trainer import DDPMModule from oa_reactdiff.dataset import ProcessedTS1x from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import FEATURE_MAPPING from oa_reactdiff.analyze.rmsd import batch_rmsd from oa_reactdiff.utils.sampling_tools import ( assemble_sample_inputs, write_tmp_xyz, ) from glob import glob from oa_reactdiff.analyze.rmsd import xyz2pmg, pymatgen_rmsd from pymatgen.core import Molecule from collections import OrderedDict from sklearn.cluster import KMeans from glob import glob from pymatgen.io.xyz import XYZ from openbabel import pybel from oa_reactdiff.analyze.rmsd import pymatgen_rmsd
13,278
use_by_ind=True, ) loader = DataLoader( dataset, batch_size=1, shuffle=False, num_workers=0, collate_fn=dataset.collate_fn ) itl = iter(loader) idx = -1 for _ in range(4): representations, res = next(itl) idx += 1 n_samples = representations[0]["size"].size(0) fragments_nodes = [ repre["size"] for repre in representations ] conditions = torch.tensor([[0] for _ in range(n_samples)], device=device) new_order_react = torch.randperm(representations[0]["size"].item()) for k in ["pos", "one_hot", "charge"]: representations[0][k] = representations[0][k][new_order_react] xh_fixed = [ torch.cat( [repre[feature_type] for feature_type in FEATURE_MAPPING], dim=1, ) for repre in representations ] out_samples, out_masks = ddpm_trainer.ddpm.inpaint( n_samples=n_samples, fragments_nodes=fragments_nodes, conditions=conditions, return_frames=1, resamplings=5, jump_length=5, timesteps=None, xh_fixed=xh_fixed, frag_fixed=[0, 2], ) rmsds = batch_rmsd( fragments_nodes, out_samples[0], xh_fixed, idx=1, ) write_tmp_xyz( fragments_nodes, out_samples[0], idx=[0, 1, 2], localpath="demo/inpainting" ) rmsds = [min(1, _x) for _x in rmsds] [(ii, round(rmsd, 2)) for ii, rmsd in enumerate(rmsds)], np.mean(rmsds), np.median(rmsds) print("Cell 33, Done") def draw_reaction(react_path: str, idx: int = 0, prefix: str = "gen") -> py3Dmol.view: """画出反应的的{反应物,过渡态,生成物} Args: react_path (str): path to the reaction. idx (int, optional): index for the generated reaction. Defaults to 0. prefix (str, optional): prefix for distinguishing true sample and generated structure. Defaults to "gen". Returns: py3Dmol.view: _description_ """ with open(f"{react_path}/{prefix}_{idx}_react.xyz", "r") as fo: natoms = int(fo.readline()) * 3 mol = f"{natoms}\n\n" for ii, t in enumerate(["react", "ts", "prod"]): pmatg_mol = xyz2pmg(f"{react_path}/{prefix}_{idx}_{t}.xyz") pmatg_mol_prime = Molecule( species=pmatg_mol.atomic_numbers, coords=pmatg_mol.cart_coords + 8 * ii, ) mol += "\n".join(pmatg_mol_prime.to(fmt="xyz").split("\n")[2:]) + "\n" viewer = py3Dmol.view(1024, 576) viewer.addModel(mol, "xyz") viewer.setStyle({'stick': {}, "sphere": {"radius": 0.3}}) viewer.zoomTo() return viewer opt_ts_path = "./demo/example-3/opt_ts/" opt_ts_xyzs = glob(f"{opt_ts_path}/*ts.opt.xyz") order_dict = {} for xyz in opt_ts_xyzs: order_dict.update( {int(xyz.split("/")[-1].split(".")[0]): xyz} ) order_dict = OrderedDict(sorted(order_dict.items())) opt_ts_xyzs = [] ind_dict = {} for ii, v in enumerate(order_dict.values()): opt_ts_xyzs.append(v) ind_dict.update( {ii: v} ) n_ts = len(opt_ts_xyzs) rmsd_mat = np.ones((n_ts, n_ts)) * -2.5 for ii in range(n_ts): for jj in range(ii+1, n_ts): try: rmsd_mat[ii, jj] = np.log10(
# --- 导入和定义一些函数 ---- default_float = torch.float64 torch.set_default_dtype(default_float) # 使用双精度,测试更准确 def remove_mean_batch( x: tensor, indices: Optional[tensor] = None ) -> tensor: """将x中的每个batch的均值去掉 Args: x (tensor): input tensor. indices (Optional[tensor], optional): batch indices. Defaults to None. Returns: tensor: output tensor with batch mean as 0. """ if indices == None: return x - torch.mean(x, dim=0) mean = scatter_mean(x, indices, dim=0) x = x - mean[indices] return x def draw_in_3dmol(mol: str, fmt: str = "xyz") -> py3Dmol.view: """画分子 Args: mol (str): str content of molecule. fmt (str, optional): format. Defaults to "xyz". Returns: py3Dmol.view: output viewer """ viewer = py3Dmol.view(1024, 576) viewer.addModel(mol, fmt) viewer.setStyle({'stick': {}, "sphere": {"radius": 0.36}}) viewer.zoomTo() return viewer def assemble_xyz(z: list, pos: tensor) -> str: """将原子序数和位置组装成xyz格式 Args: z (list): chemical elements pos (tensor): 3D coordinates Returns: str: xyz string """ natoms =len(z) xyz = f"{natoms}\n\n" for _z, _pos in zip(z, pos.numpy()): xyz += f"{_z}\t" + "\t".join([str(x) for x in _pos]) + "\n" return xyz num_layers = 2 hidden_channels = 8 in_hidden_channels = 4 num_radial = 4 model = LEFTNet( num_layers=num_layers, hidden_channels=hidden_channels, in_hidden_channels=in_hidden_channels, num_radial=num_radial, object_aware=False, ) sum(p.numel() for p in model.parameters() if p.requires_grad) h = torch.rand(3, in_hidden_channels) z = ["O", "H", "H"] pos = tensor([ [0, 0, 0], [1, 0, 0], [0, 1, 0], ]).double() # 方便起见,我们这里把H-O-H的角度设为90度 edge_index = tensor([ [0, 0, 1, 1, 2, 2], [1, 2, 0, 2, 0, 1] ]).long() # 使用全连接的方式,这里的边是无向的 _h, _pos, __ = model.forward( h=h, pos=remove_mean_batch(pos), edge_index=edge_index, ) rot = o3.rand_matrix() pos_rot = torch.matmul(pos, rot).double() _h_rot, _pos_rot, __ = model.forward( h=h, pos=remove_mean_batch(pos_rot), edge_index=edge_index, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 torch.max( torch.abs( torch.matmul(_pos, rot).double() - _pos_rot ) ) # 旋转后的pos应该旋转 print("At Cell 9, Done.") # --- Cell 9 --- ns = [3, ] + [2, 1] # 反应物 3个原子 (H2O),生成物 2个原子 (H2),1个原子 (O自由基) ntot = np.sum(ns) mask = tensor([0, 0, 0, 1, 1, 1]) # 用于区分反应物和生成物 z = ["O", "H", "H"] + ["H", "H", "O"] pos_react = tensor([ [0, 0, 0], [1, 0, 0], [0, 1, 0], ]).double() # 方便起见,我们这里把H-O-H的角度设为90度 pos_prod = tensor([ [0, 3, -0.4], [0, 3, 0.4], [0, -3, 0], ]) # 将H2和O自由基分开 pos = torch.cat( [pos_react, pos_prod], dim=0, ) # 拼接 h = torch.rand(ntot, in_hidden_channels) edge_index = generate_full_eij(ntot) edge_index _h, _pos, __ = model.forward( h=h, pos=remove_mean_batch(pos, mask), edge_index=edge_index, ) rot = o3.rand_matrix() pos_react_rot = torch.matmul(pos_react, rot).double() pos_rot = torch.cat( [pos_react_rot, pos_prod], dim=0, ) # 拼接旋转过后的H2O和未旋转的H2和O自由基 _h_rot, _pos_rot, __ = model.forward( h=h, pos=remove_mean_batch(pos_rot, mask), edge_index=edge_index, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 _pos_rot_prime = torch.cat( [ torch.matmul(_pos[:3], rot), _pos[3:] ] ) torch.max( torch.abs( _pos_rot_prime - _pos_rot ) ) # 旋转后的pos应该旋转 print("At Cell 16, Done.") model_oa = LEFTNet( num_layers=num_layers, hidden_channels=hidden_channels, in_hidden_channels=in_hidden_channels, num_radial=num_radial, object_aware=True, # 使用object-aware模型 ) subgraph_mask = get_cut_graph_mask(edge_index, 3) # 0-2是反应物的原子数 edge_index.T[torch.where(subgraph_mask.squeeze()>0)[0]] _h, _pos, __ = model_oa.forward( h=h, pos=remove_mean_batch(pos, mask), edge_index=edge_index, subgraph_mask=subgraph_mask, ) rot = o3.rand_matrix() pos_react_rot = torch.matmul(pos_react, rot).double() pos_rot = torch.cat( [pos_react_rot, pos_prod], dim=0, ) _h_rot, _pos_rot, __ = model_oa.forward( h=h, pos=remove_mean_batch(pos_rot, mask), edge_index=edge_index, subgraph_mask=subgraph_mask, ) torch.max( torch.abs( _h - _h_rot ) ) # 旋转后的h应该不变 _pos_rot_prime = torch.cat( [ torch.matmul(_pos[:3], rot), _pos[3:] ] ) torch.max( torch.abs( _pos_rot_prime - _pos_rot ) ) # 旋转后的pos应该旋转 print("Cell 22, done") device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda") ddpm_trainer = DDPMModule.load_from_checkpoint( checkpoint_path="./pretrained-ts1x-diff.ckpt", map_location=device, ) ddpm_trainer = ddpm_trainer.to(device) noise_schedule: str = "polynomial_2" timesteps: int = 150 precision: float = 1e-5 gamma_module = PredefinedNoiseSchedule( noise_schedule=noise_schedule, timesteps=timesteps, precision=precision, ) schedule = DiffSchedule( gamma_module=gamma_module, norm_values=ddpm_trainer.ddpm.norm_values ) ddpm_trainer.ddpm.schedule = schedule ddpm_trainer.ddpm.T = timesteps ddpm_trainer = ddpm_trainer.to(device) dataset = ProcessedTS1x( npz_path="./oa_reactdiff/data/transition1x/train.pkl", center=True, pad_fragments=0, device=device, zero_charge=False, remove_h=False, single_frag_only=False, swapping_react_prod=False, use_by_ind=True, ) loader = DataLoader( dataset, batch_size=1, shuffle=False, num_workers=0, collate_fn=dataset.collate_fn ) itl = iter(loader) idx = -1 for _ in range(4): representations, res = next(itl) idx += 1 n_samples = representations[0]["size"].size(0) fragments_nodes = [ repre["size"] for repre in representations ] conditions = torch.tensor([[0] for _ in range(n_samples)], device=device) new_order_react = torch.randperm(representations[0]["size"].item()) for k in ["pos", "one_hot", "charge"]: representations[0][k] = representations[0][k][new_order_react] xh_fixed = [ torch.cat( [repre[feature_type] for feature_type in FEATURE_MAPPING], dim=1, ) for repre in representations ] out_samples, out_masks = ddpm_trainer.ddpm.inpaint( n_samples=n_samples, fragments_nodes=fragments_nodes, conditions=conditions, return_frames=1, resamplings=5, jump_length=5, timesteps=None, xh_fixed=xh_fixed, frag_fixed=[0, 2], ) rmsds = batch_rmsd( fragments_nodes, out_samples[0], xh_fixed, idx=1, ) write_tmp_xyz( fragments_nodes, out_samples[0], idx=[0, 1, 2], localpath="demo/inpainting" ) rmsds = [min(1, _x) for _x in rmsds] [(ii, round(rmsd, 2)) for ii, rmsd in enumerate(rmsds)], np.mean(rmsds), np.median(rmsds) print("Cell 33, Done") def draw_reaction(react_path: str, idx: int = 0, prefix: str = "gen") -> py3Dmol.view: """画出反应的的{反应物,过渡态,生成物} Args: react_path (str): path to the reaction. idx (int, optional): index for the generated reaction. Defaults to 0. prefix (str, optional): prefix for distinguishing true sample and generated structure. Defaults to "gen". Returns: py3Dmol.view: _description_ """ with open(f"{react_path}/{prefix}_{idx}_react.xyz", "r") as fo: natoms = int(fo.readline()) * 3 mol = f"{natoms}\n\n" for ii, t in enumerate(["react", "ts", "prod"]): pmatg_mol = xyz2pmg(f"{react_path}/{prefix}_{idx}_{t}.xyz") pmatg_mol_prime = Molecule( species=pmatg_mol.atomic_numbers, coords=pmatg_mol.cart_coords + 8 * ii, ) mol += "\n".join(pmatg_mol_prime.to(fmt="xyz").split("\n")[2:]) + "\n" viewer = py3Dmol.view(1024, 576) viewer.addModel(mol, "xyz") viewer.setStyle({'stick': {}, "sphere": {"radius": 0.3}}) viewer.zoomTo() return viewer opt_ts_path = "./demo/example-3/opt_ts/" opt_ts_xyzs = glob(f"{opt_ts_path}/*ts.opt.xyz") order_dict = {} for xyz in opt_ts_xyzs: order_dict.update( {int(xyz.split("/")[-1].split(".")[0]): xyz} ) order_dict = OrderedDict(sorted(order_dict.items())) opt_ts_xyzs = [] ind_dict = {} for ii, v in enumerate(order_dict.values()): opt_ts_xyzs.append(v) ind_dict.update( {ii: v} ) n_ts = len(opt_ts_xyzs) rmsd_mat = np.ones((n_ts, n_ts)) * -2.5 for ii in range(n_ts): for jj in range(ii+1, n_ts): try: rmsd_mat[ii, jj] = np.log10(
pymatgen_rmsd(
13
2023-10-30 02:53:38+00:00
16k
Weitheskmt/WeiDMD
build/lib/weidmd/bopdmd.py
[ { "identifier": "DMDBase", "path": "build/lib/weidmd/dmdbase.py", "snippet": "class DMDBase:\n \"\"\"\n Dynamic Mode Decomposition base class.\n\n :param svd_rank: the rank for the truncation; If 0, the method computes the\n optimal rank and uses it for truncation; if positive interger, ...
import warnings import numpy as np from collections import OrderedDict from scipy.sparse import csr_matrix from scipy.linalg import qr from .dmdbase import DMDBase from .dmdoperator import DMDOperator from .utils import compute_svd from .rdmd import compute_rank from .snapshots import Snapshots
12,638
@property def trial_size(self): """ :return: size of the data subsets used during each BOP-DMD trial. :rtype: int or float """ return self._trial_size @property def time(self): """ Get the vector that contains the time points of the fitted snapshots. :return: the vector that contains the original time points. :rtype: numpy.ndarray """ if self._time is None: raise RuntimeError("fit() hasn't been called.") return self._time @property def atilde(self): """ Get the reduced Koopman operator A, called Atilde. :return: the reduced Koopman operator A. :rtype: numpy.ndarray """ return self.operator.as_numpy_array @property def A(self): """ Get the full Koopman operator A. :return: the full Koopman operator A. :rtype: numpy.ndarray """ return self.operator.A @property def dynamics(self): """ Get the time evolution of each mode. :return: matrix that contains all the time evolution, stored by row. :rtype: numpy.ndarray """ t_omega = np.exp(np.outer(self.eigs, self._time)) return np.diag(self.amplitudes).dot(t_omega) def print_varpro_opts(self): """ Prints a formatted information string that displays all chosen variable projection parameter values. """ if self._Atilde is None: raise ValueError("You need to call fit before") opt_names = [ "init_lambda", "maxlam", "lamup", "use_levmarq", "maxiter", "tol", "eps_stall", "use_fulljac", "verbose", ] print("VARIABLE PROJECTION OPTIONS:") print("============================") for name, value in zip(opt_names, self.operator.varpro_opts): if len(name) < 7: print(name + ":\t\t" + str(value)) else: print(name + ":\t" + str(value)) def _initialize_alpha(self): """ Uses projected trapezoidal rule to approximate the eigenvalues of A in z' = Az. The computed eigenvalues will serve as our initial guess for alpha. :return: Approximated eigenvalues of the matrix A. :rtype: numpy.ndarray """ # Project the snapshot data onto the projection basis. ux = self._proj_basis.conj().T.dot(self.snapshots) ux1 = ux[:, :-1] ux2 = ux[:, 1:] # Define the diagonal matrix T as the following. t1 = self._time[:-1] t2 = self._time[1:] T = np.diag(t2 - t1) # Define the matrices Y and Z as the following and compute the # rank-truncated SVD of Y. Y = (ux1 + ux2) / 2 Z = (ux2 - ux1).dot(np.linalg.inv(T)) U, s, V = compute_svd(Y, self._svd_rank) S = np.diag(s) # Compute the matrix Atilde and return its eigenvalues. Atilde = np.linalg.multi_dot([U.conj().T, Z, V, np.linalg.inv(S)]) return np.linalg.eig(Atilde)[0] def fit(self, X, t): """ Compute the Optimized Dynamic Mode Decomposition. :param X: the input snapshots. :type X: numpy.ndarray or iterable :param t: the input time vector. :type t: numpy.ndarray or iterable """ # Process the input data and convert to numpy.ndarrays. self._reset()
class BOPDMDOperator(DMDOperator): """ BOP-DMD operator. :param compute_A: Flag that determines whether or not to compute the full Koopman operator A. :type compute_A: bool :param use_proj: Flag that determines the type of computation to perform. If True, fit input data projected onto the first svd_rank POD modes or columns of proj_basis if provided. If False, fit the full input data. :type use_proj: bool :param init_alpha: Initial guess for the continuous-time DMD eigenvalues. :type init_alpha: numpy.ndarray :param proj_basis: Orthogonal basis for projection, where each column of proj_basis contains a basis mode. :type proj_basis: numpy.ndarray :param num_trials: Number of BOP-DMD trials to perform. If num_trials is a positive integer, num_trials BOP-DMD trials are performed. Otherwise, standard optimized dmd is performed. :type num_trials: int :param trial_size: Size of the randomly selected subset of observations to use for each trial of bagged optimized dmd (BOP-DMD). If trial_size is a positive integer, trial_size many observations will be used per trial. If trial_size is a float between 0 and 1, int(trial_size * m) many observations will be used per trial, where m denotes the total number of data points observed. Note that any other type of input for trial_size will yield an error. :type trial_size: int or float :param eig_sort: Method used to sort eigenvalues (and modes accordingly) when performing BOP-DMD. Eigenvalues will be sorted by real part and then by imaginary part to break ties if `eig_sort="real"`, by imaginary part and then by real part to break ties if `eig_sort="imag"`, or by magnitude if `eig_sort="abs"`. If `eig_sort="auto"`, one of the previously-mentioned sorting methods is chosen depending on eigenvalue variance. :type eig_sort: {"real", "imag", "abs", "auto"} :param init_lambda: Initial value used for the regularization parameter in the Levenberg method. Default is 1.0. Note: Larger lambda values make the method more like gradient descent. :type init_lambda: float :param maxlam: Maximum number of of steps used in the inner Levenberg loop, i.e. the number of times you increase lambda before quitting. Default is 52. :type maxlam: int :param lamup: The factor by which you increase lambda when searching for an appropriate step. Default is 2.0. :type lamup: float :param use_levmarq: Flag that determines whether you use the Levenberg algorithm or the Levenberg-Marquardt algorithm. Default is True, use Levenberg-Marquardt. :type use_levmarq: bool :param maxiter: The maximum number of outer loop iterations to use before quitting. Default is 30. :type maxiter: int :param tol: The tolerance for the relative error in the residual. i.e. the program will terminate if norm(y-Phi(alpha)*b,'fro')/norm(y,'fro') < tol is achieved. Default is 1e-6. :type tol: float :param eps_stall: The tolerance for detecting a stall. i.e. if error(iter-1)-error(iter) < eps_stall*err(iter-1) the program halts. Default is 1e-12. :type eps_stall: float :param use_fulljac: Flag that determines whether or not to use the full expression for the Jacobian or Kaufman's approximation. Default is True, use full expression. :type use_fulljac: bool :param verbose: Flag that determines whether or not to print warning messages that arise during the variable projection routine, and whether or not to print information regarding the method's iterative progress. Default is False, don't print information. :type verbose: bool """ def __init__( self, compute_A, use_proj, init_alpha, proj_basis, num_trials, trial_size, eig_sort, init_lambda=1.0, maxlam=52, lamup=2.0, use_levmarq=True, maxiter=30, tol=1e-6, eps_stall=1e-12, use_fulljac=True, verbose=False, ): self._compute_A = compute_A self._use_proj = use_proj self._init_alpha = init_alpha self._proj_basis = proj_basis self._num_trials = num_trials self._trial_size = trial_size self._eig_sort = eig_sort self._varpro_opts = ( init_lambda, maxlam, lamup, use_levmarq, maxiter, tol, eps_stall, use_fulljac, verbose, ) self._varpro_opts_warn() self._modes = None self._eigenvalues = None self._eigenvalues_std = None self._amplitudes_std = None self._Atilde = None self._A = None @property def varpro_opts(self): """ Get the variable projection options. :return: the variable projection options. :rtype: tuple """ return self._varpro_opts @property def A(self): """ Get the full Koopman operator A. :return: the full Koopman operator A. :rtype: numpy.ndarray """ if not self._compute_A: msg = ( "A not computed during fit. " "Set parameter compute_A = True to compute A." ) raise ValueError(msg) if self._A is None: raise ValueError("You need to call fit before") return self._A @property def amplitudes_std(self): """ Get the amplitudes standard deviation. :return: amplitudes standard deviation. :rtype: numpy.ndarray """ return self._amplitudes_std @property def eigenvalues_std(self): """ Get the eigenvalues standard deviation. :return: eigenvalues standard deviation. :rtype: numpy.ndarray """ return self._eigenvalues_std def _varpro_opts_warn(self): """ Checks the validity of the parameter values in _varpro_opts. Throws an error if any parameter value has an invalid type and generates a warning if any value lies outside of the recommended range. """ # Generate dictionary of recommended value range for each parameter. rec_ranges = OrderedDict() rec_ranges["init_lambda"] = [0.0, 1e16] rec_ranges["maxlam"] = [0, 200] rec_ranges["lamup"] = [1.0, 1e16] rec_ranges["use_levmarq"] = [-np.inf, np.inf] rec_ranges["maxiter"] = [0, 1e12] rec_ranges["tol"] = [0.0, 1e16] rec_ranges["eps_stall"] = [-np.inf, 1.0] rec_ranges["use_fulljac"] = [-np.inf, np.inf] rec_ranges["verbose"] = [-np.inf, np.inf] for opt_value, (opt_name, (opt_min, opt_max)) in zip( self._varpro_opts, rec_ranges.items() ): if not isinstance(opt_value, (int, float, bool)): raise ValueError("Invalid variable projection option given.") if opt_value < opt_min: msg = ( "Option {} with value {} is less than {}, " "which is not recommended." ) warnings.warn(msg.format(opt_name, opt_value, opt_min)) elif opt_value > opt_max: msg = ( "Option {} with value {} is greater than {}, " "which is not recommended." ) warnings.warn(msg.format(opt_name, opt_value, opt_max)) def _exp_function(self, alpha, t): """ Matrix of exponentials. :param alpha: Vector of time scalings in the exponent. :type alpha: numpy.ndarray :param t: Vector of time values. :type t: numpy.ndarray :return: Matrix A such that A[i, j] = exp(t_i * alpha_j). :rtype: numpy.ndarray """ return np.exp(np.outer(t, alpha)) def _exp_function_deriv(self, alpha, t, i): """ Derivatives of the matrix of exponentials. :param alpha: Vector of time scalings in the exponent. :type alpha: numpy.ndarray :param t: Vector of time values. :type t: numpy.ndarray :param i: Index in alpha of the derivative variable. :type i: int :return: Derivatives of Phi(alpha, t) with respect to alpha[i]. :rtype: scipy.sparse.csr_matrix """ m = len(t) n = len(alpha) if i < 0 or i > n - 1: raise ValueError("Invalid index i given to exp_function_deriv.") A = np.multiply(t, np.exp(alpha[i] * t)) return csr_matrix( (A, (np.arange(m), np.full(m, fill_value=i))), shape=(m, n) ) def _compute_irank_svd(self, X, tolrank): """ Helper function that computes and returns the SVD of X with a rank truncation of irank, which denotes the number of singular values of X greater than tolrank * s1, where s1 is the largest singular value of the matrix X. :param X: Matrix to decompose. :type X: numpy.ndarray :param tolrank: Determines the rank of the returned SVD. :type tolrank: float :return: irank truncated SVD of X. :rtype: numpy.ndarray, numpy.ndarray, numpy.ndarray """ U, s, Vh = np.linalg.svd(X, full_matrices=False) irank = np.sum(s > tolrank * s[0]) U = U[:, :irank] S = np.diag(s[:irank]) Vh = Vh[:irank] return U, S, Vh def _bag(self, H, trial_size): """ Given a 2D array of data X, where each row contains a data snapshot, randomly sub-selects and returns data snapshots while preserving the original snapshot order. Note that if trial_size is a positive integer, trial_size many observations will be used per trial. If trial_size is a float between 0 and 1, int(trial_size * m) many observations will be used per trial, where m denotes the total number of snapshots in X. The indices of the sub-selected snapshots are also returned. :param H: Full data matrix to be sub-selected from. :type H: numpy.ndarray :param trial_size: Size of the sub-selection from H. :type trial_size: int or float :return: Matrix of sub-selected data snapshots, stored in each row, and a vector of each snapshots's row index location in H. :rtype: numpy.ndarray, numpy.ndarray """ # Ensure that H is a 2D numpy.ndarray. if not isinstance(H, np.ndarray) or H.ndim != 2: msg = "H must be a 2D np.ndarray." raise ValueError(msg) if 0 < trial_size < 1: batch_size = int(trial_size * H.shape[0]) elif trial_size >= 1 and isinstance(trial_size, int): batch_size = trial_size else: msg = ( "Invalid trial_size parameter. trial_size must be either " "a positive integer or a float between 0 and 1." ) raise ValueError(msg) # Throw an error if the batch size is too large or too small. if batch_size > H.shape[0]: msg = ( "Error bagging the input data. Please ensure that the " "trial_size parameter is small enough for bagging." ) raise ValueError(msg) if batch_size == 0: msg = ( "Error bagging the input data. Please ensure that the " "trial_size parameter is large enough for bagging." ) raise ValueError(msg) # Obtain and return subset of the data. all_inds = np.arange(H.shape[0]) subset_inds = np.sort( np.random.choice(all_inds, size=batch_size, replace=False) ) return H[subset_inds], subset_inds def _variable_projection(self, H, t, init_alpha, Phi, dPhi): """ Variable projection routine for multivariate data. Attempts to fit the columns of H as linear combinations of the columns of Phi(alpha,t) such that H = Phi(alpha,t)B. Note that M denotes the number of data samples, N denotes the number of columns of Phi, IS denotes the number of functions to fit, and IA denotes the length of the alpha vector. :param H: (M, IS) matrix of data. :type H: numpy.ndarray :param t: (M,) vector of sample times. :type t: numpy.ndarray :param init_alpha: initial guess for alpha. :type init_alpha: numpy.ndarray :param Phi: (M, N) matrix-valued function Phi(alpha,t). :type Phi: function :param dPhi: (M, N) matrix-valued function dPhi(alpha,t,i) that contains the derivatives of Phi wrt the ith component of alpha. :type dPhi: function :return: Tuple of two numpy arrays representing... 1. (N, IS) best-fit matrix B. 2. (N,) best-fit vector alpha. :rtype: Tuple[numpy.ndarray, numpy.ndarray] References: - Extensions and Uses of the Variable Projection Algorith for Solving Nonlinear Least Squares Problems by G. H. Golub and R. J. LeVeque ARO Report 79-3, Proceedings of the 1979 Army Numerical Analsysis and Computers Conference. - Variable projection for nonlinear least squares problems. Computational Optimization and Applications 54.3 (2013): 579-593 by Dianne P. O'Leary and Bert W. Rust. """ def compute_residual(alpha): """ Helper function that, given alpha, and using H, t, Phi as they are passed to the _variable_projection function, computes and returns the matrix Phi(alpha,t), B from the expression H = Phi(alpha,t)B, the residual H - Phi(alpha,t)B, and 0.5*norm(residual,'fro')^2, which will be used to denote the error. """ Phi_matrix = Phi(alpha, t) B = np.linalg.lstsq(Phi_matrix, H, rcond=None)[0] residual = H - Phi_matrix.dot(B) error = 0.5 * np.linalg.norm(residual, "fro") ** 2 return B, residual, error # Define M, IS, and IA. M, IS = H.shape IA = len(init_alpha) # Unpack all variable projection parameters stored in varpro_opts. ( init_lambda, maxlam, lamup, use_levmarq, maxiter, tol, eps_stall, use_fulljac, verbose, ) = self._varpro_opts # Initialize values. tolrank = M * np.finfo(float).eps _lambda = init_lambda alpha = np.copy(init_alpha) B, residual, error = compute_residual(alpha) U, S, Vh = self._compute_irank_svd(Phi(alpha, t), tolrank) # Initialize storage. all_error = np.zeros(maxiter) djac_matrix = np.zeros((M * IS, IA), dtype="complex") rjac = np.zeros((2 * IA, IA), dtype="complex") scales = np.zeros(IA) for itr in range(maxiter): # Build Jacobian matrix, looping over alpha indices. for i in range(IA): # Build the approximate expression for the Jacobian. dphi_temp = dPhi(alpha, t, i) ut_dphi = csr_matrix(U.conj().T @ dphi_temp) uut_dphi = csr_matrix(U @ ut_dphi) djac_a = (dphi_temp - uut_dphi) @ B djac_matrix[:, i] = djac_a.ravel(order="F") # Compute the full expression for the Jacobian. if use_fulljac: transform = np.linalg.multi_dot([U, np.linalg.inv(S), Vh]) dphit_res = csr_matrix(dphi_temp.conj().T @ residual) djac_b = transform @ dphit_res djac_matrix[:, i] += djac_b.ravel(order="F") # Scale for the Levenberg algorithm. scales[i] = 1 # Scale for the Levenberg-Marquardt algorithm. if use_levmarq: scales[i] = min(np.linalg.norm(djac_matrix[:, i]), 1) scales[i] = max(scales[i], 1e-6) # Loop to determine lambda (the step-size parameter). rhs_temp = np.copy(residual.ravel(order="F"))[:, None] q_out, djac_out, j_pvt = qr( djac_matrix, mode="economic", pivoting=True ) ij_pvt = np.arange(IA) ij_pvt = ij_pvt[j_pvt] rjac[:IA] = np.triu(djac_out[:IA]) rhs_top = q_out.conj().T.dot(rhs_temp) scales_pvt = scales[j_pvt[:IA]] rhs = np.concatenate( (rhs_top[:IA], np.zeros(IA, dtype="complex")), axis=None ) def step(_lambda, rhs, scales_pvt, ij_pvt): """ Helper function that, given a step size _lambda and the current right-hand side and pivots, computes and returns delta, the amount in which we update alpha, and the updated alpha vector. Note that this function uses rjac and alpha as they are defined outside of this function. """ # Compute the step delta. rjac[IA:] = _lambda * np.diag(scales_pvt) delta = np.linalg.lstsq(rjac, rhs, rcond=None)[0] delta = delta[ij_pvt] # Compute the updated alpha vector. alpha_updated = alpha.ravel() + delta.ravel() return delta, alpha_updated # Take a step using our initial step size init_lambda. delta_0, alpha_0 = step(_lambda, rhs, scales_pvt, ij_pvt) B_0, residual_0, error_0 = compute_residual(alpha_0) # Check actual improvement vs predicted improvement. actual_improvement = error - error_0 pred_improvement = ( 0.5 * np.linalg.multi_dot( [delta_0.conj().T, djac_matrix.conj().T, rhs_temp] ).real ) improvement_ratio = actual_improvement / pred_improvement if error_0 < error: # Rescale lambda based on the improvement ratio. _lambda *= max(1 / 3, 1 - (2 * improvement_ratio - 1) ** 3) alpha, B, residual, error = alpha_0, B_0, residual_0, error_0 else: # Increase lambda until something works. for _ in range(maxlam): _lambda *= lamup delta_0, alpha_0 = step(_lambda, rhs, scales_pvt, ij_pvt) B_0, residual_0, error_0 = compute_residual(alpha_0) if error_0 < error: alpha, B = alpha_0, B_0 residual, error = residual_0, error_0 break # Terminate if no appropriate step length was found. if error_0 >= error: if verbose: msg = ( "Failed to find appropriate step length at " "iteration {}. Current error {}." ) warnings.warn(msg.format(itr, error)) return B, alpha # Record the current error. all_error[itr] = error # Print iterative progress if the verbose flag is turned on. if verbose: update_msg = "Step {} Error {} Lambda {}" print(update_msg.format(itr, error, _lambda)) # Terminate if the tolerance is met. if error < tol: return B, alpha # Terminate if a stall is detected. if ( itr > 0 and all_error[itr - 1] - all_error[itr] < eps_stall * all_error[itr - 1] ): if verbose: msg = ( "Stall detected: error reduced by less than {} " "times the error at the previous step. " "Iteration {}. Current error {}." ) warnings.warn(msg.format(eps_stall, itr, error)) return B, alpha U, S, Vh = self._compute_irank_svd(Phi(alpha, t), tolrank) # Failed to meet tolerance in maxiter steps. if verbose: msg = ( "Failed to reach tolerance after maxiter = {} iterations. " "Current error {}." ) warnings.warn(msg.format(maxiter, error)) return B, alpha def _single_trial_compute_operator(self, H, t, init_alpha): """ Helper function that computes the standard optimized dmd operator. Returns the resulting DMD modes, eigenvalues, amplitudes, reduced system matrix, and full system matrix respectively. """ B, alpha = self._variable_projection( H, t, init_alpha, self._exp_function, self._exp_function_deriv ) # Save the modes, eigenvalues, and amplitudes respectively. w = B.T e = alpha b = np.sqrt(np.sum(np.abs(w) ** 2, axis=0)) # Normalize the modes and the amplitudes. inds_small = np.abs(b) < (10 * np.finfo(float).eps * np.max(b)) b[inds_small] = 1.0 w = w.dot(np.diag(1 / b)) w[:, inds_small] = 0.0 b[inds_small] = 0.0 # Compute the projected propagator Atilde. if self._use_proj: Atilde = np.linalg.multi_dot([w, np.diag(e), np.linalg.pinv(w)]) # Unproject the dmd modes. w = self._proj_basis.dot(w) else: w_proj = self._proj_basis.conj().T.dot(w) Atilde = np.linalg.multi_dot( [w_proj, np.diag(e), np.linalg.pinv(w_proj)] ) # Compute the full system matrix A. if self._compute_A: A = np.linalg.multi_dot([w, np.diag(e), np.linalg.pinv(w)]) else: A = None return w, e, b, Atilde, A def compute_operator(self, H, t): """ Compute the low-rank and the full BOP-DMD operators. :param H: Matrix of data to fit. :type H: numpy.ndarray :param t: Vector of sample times. :type t: numpy.ndarray :return: The BOP-DMD amplitudes. :rtype: numpy.ndarray """ # Perform an initial optimized dmd solve using init_alpha. w_0, e_0, b_0, Atilde_0, A_0 = self._single_trial_compute_operator( H, t, self._init_alpha ) # If num_trials isn't a positive int, perform standard optimized dmd. if self._num_trials <= 0 or not isinstance(self._num_trials, int): self._modes = w_0 self._eigenvalues = e_0 self._Atilde = Atilde_0 self._A = A_0 return b_0 # Perform BOP-DMD. # Initialize bagging result storage. all_w = np.empty((self._num_trials, *w_0.shape), dtype="complex") all_e = np.empty((self._num_trials, *e_0.shape), dtype="complex") all_b = np.empty((self._num_trials, *b_0.shape), dtype="complex") # Perform num_trials many trials of optimized dmd. for i in range(self._num_trials): H_i, subset_inds = self._bag(H, self._trial_size) w_i, e_i, b_i, _, _ = self._single_trial_compute_operator( H_i, t[subset_inds], e_0 ) # Set the sorting style if _eig_sort is "auto". if self._eig_sort == "auto": real_var = np.var(e_i.real) imag_var = np.var(e_i.imag) abs_var = np.var(np.abs(e_i)) all_var = [real_var, imag_var, abs_var] if np.argmax(all_var) == 0: self._eig_sort = "real" elif np.argmax(all_var) == 1: self._eig_sort = "imag" else: self._eig_sort = "abs" # Sort the results according to eigenvalue. if self._eig_sort == "real": sorted_inds = np.argsort(e_i) elif self._eig_sort == "imag": e_i_real_imag_swapped = e_i.imag + (1j * e_i.real) sorted_inds = np.argsort(e_i_real_imag_swapped) elif self._eig_sort == "abs": sorted_inds = np.argsort(np.abs(e_i)) else: raise ValueError("Provided eig_sort method is not supported.") all_w[i] = w_i[:, sorted_inds] all_e[i] = e_i[sorted_inds] all_b[i] = b_i[sorted_inds] # Compute and use the average optimized dmd results. self._modes = np.mean(all_w, axis=0) self._eigenvalues = np.mean(all_e, axis=0) # Compute Atilde using the average optimized dmd results. w_proj = self._proj_basis.conj().T.dot(self._modes) self._Atilde = np.linalg.multi_dot( [w_proj, np.diag(self._eigenvalues), np.linalg.pinv(w_proj)] ) # Compute A if requested. if self._compute_A: self._A = np.linalg.multi_dot( [ self._modes, np.diag(self._eigenvalues), np.linalg.pinv(self._modes), ] ) # Compute and save the standard deviation of the optimized dmd results. self._eigenvalues_std = np.std(all_e, axis=0) self._amplitudes_std = np.std(all_b, axis=0) return np.mean(all_b, axis=0) class BOPDMD(DMDBase): """ Bagging, Optimized Dynamic Mode Decomposition. :param svd_rank: The rank for the truncation; If 0, the method computes the optimal rank and uses it for truncation; if positive integer, the method uses the argument for the truncation; if float between 0 and 1, the rank is the number of the biggest singular values that are needed to reach the 'energy' specified by `svd_rank`; if -1, the method does not compute truncation. :type svd_rank: int or float :param compute_A: Flag that determines whether or not to compute the full Koopman operator A. Default is False, do not compute the full operator. Note that the full operator is potentially prohibitively expensive to compute. :type compute_A: bool :param use_proj: Flag that determines the type of computation to perform. If True, fit input data projected onto the first svd_rank POD modes or columns of proj_basis if provided. If False, fit the full input data. Default is True, fit projected data. :type use_proj: bool :param init_alpha: Initial guess for the continuous-time DMD eigenvalues. If not provided, one is computed via a trapezoidal rule approximation. Default is None (alpha not provided). :type init_alpha: numpy.ndarray :param proj_basis: Orthogonal basis for projection, where each column of proj_basis contains a basis mode. If not provided, POD modes are used. Default is None (basis not provided). :type proj_basis: numpy.ndarray :param num_trials: Number of BOP-DMD trials to perform. If num_trials is a positive integer, num_trials BOP-DMD trials are performed. Otherwise, standard optimized dmd is performed. Default is 0. :type num_trials: int :param trial_size: Size of the randomly selected subset of observations to use for each trial of bagged optimized dmd (BOP-DMD). If trial_size is a positive integer, trial_size many observations will be used per trial. If trial_size is a float between 0 and 1, int(trial_size * m) many observations will be used per trial, where m denotes the total number of data points observed. Note that any other type of input for trial_size will yield an error. Default is 0.2. :type trial_size: int or float :param eig_sort: Method used to sort eigenvalues (and modes accordingly) when performing BOP-DMD. Eigenvalues will be sorted by real part and then by imaginary part to break ties if `eig_sort="real"`, by imaginary part and then by real part to break ties if `eig_sort="imag"`, or by magnitude if `eig_sort="abs"`. If `eig_sort="auto"`, one of the previously-mentioned sorting methods is chosen depending on eigenvalue variance. Default is "auto". :type eig_sort: {"real", "imag", "abs", "auto"} :param varpro_opts_dict: Dictionary containing the desired parameter values for variable projection. The following parameters may be specified: `init_lambda`, `maxlam`, `lamup`, `use_levmarq`, `maxiter`, `tol`, `eps_stall`, `use_fulljac`, `verbose`. Default values will be used for any parameters not specified in `varpro_opts_dict`. See `BOPDMDOperator` documentation for default values and descriptions for each parameter. :type varpro_opts_dict: dict """ def __init__( self, svd_rank=0, compute_A=False, use_proj=True, init_alpha=None, proj_basis=None, num_trials=0, trial_size=0.2, eig_sort="auto", varpro_opts_dict=None, ): self._svd_rank = svd_rank self._compute_A = compute_A self._use_proj = use_proj self._init_alpha = init_alpha self._proj_basis = proj_basis self._num_trials = num_trials self._trial_size = trial_size self._eig_sort = eig_sort if varpro_opts_dict is None: self._varpro_opts_dict = {} elif not isinstance(varpro_opts_dict, dict): raise ValueError("varpro_opts_dict must be a dict.") else: self._varpro_opts_dict = varpro_opts_dict self._snapshots_holder = None self._time = None self._Atilde = None self._modes_activation_bitmask_proxy = None @property def svd_rank(self): """ :return: the rank used for the svd truncation. :rtype: int or float """ return self._svd_rank @property def compute_A(self): """ :return: flag that determines whether to compute the full operator A. :rtype: bool """ return self._compute_A @property def use_proj(self): """ :return: flag that determines whether to fit projected or full data. :rtype: bool """ return self._use_proj @property def init_alpha(self): """ :return: initial guess used for the continuous-time DMD eigenvalues. :rtype: numpy.ndarray """ if self._init_alpha is None: msg = ( "fit() hasn't been called " "and no initial value for alpha has been given." ) raise RuntimeError(msg) return self._init_alpha @property def proj_basis(self): """ :return: the projection basis used, with modes stored by column. :rtype: numpy.ndarray """ if self._proj_basis is None: msg = ( "fit() hasn't been called " "and no projection basis has been given." ) raise RuntimeError(msg) return self._proj_basis @property def num_trials(self): """ :return: the number of BOP-DMD trials to perform. :rtype: int """ return self._num_trials @property def trial_size(self): """ :return: size of the data subsets used during each BOP-DMD trial. :rtype: int or float """ return self._trial_size @property def time(self): """ Get the vector that contains the time points of the fitted snapshots. :return: the vector that contains the original time points. :rtype: numpy.ndarray """ if self._time is None: raise RuntimeError("fit() hasn't been called.") return self._time @property def atilde(self): """ Get the reduced Koopman operator A, called Atilde. :return: the reduced Koopman operator A. :rtype: numpy.ndarray """ return self.operator.as_numpy_array @property def A(self): """ Get the full Koopman operator A. :return: the full Koopman operator A. :rtype: numpy.ndarray """ return self.operator.A @property def dynamics(self): """ Get the time evolution of each mode. :return: matrix that contains all the time evolution, stored by row. :rtype: numpy.ndarray """ t_omega = np.exp(np.outer(self.eigs, self._time)) return np.diag(self.amplitudes).dot(t_omega) def print_varpro_opts(self): """ Prints a formatted information string that displays all chosen variable projection parameter values. """ if self._Atilde is None: raise ValueError("You need to call fit before") opt_names = [ "init_lambda", "maxlam", "lamup", "use_levmarq", "maxiter", "tol", "eps_stall", "use_fulljac", "verbose", ] print("VARIABLE PROJECTION OPTIONS:") print("============================") for name, value in zip(opt_names, self.operator.varpro_opts): if len(name) < 7: print(name + ":\t\t" + str(value)) else: print(name + ":\t" + str(value)) def _initialize_alpha(self): """ Uses projected trapezoidal rule to approximate the eigenvalues of A in z' = Az. The computed eigenvalues will serve as our initial guess for alpha. :return: Approximated eigenvalues of the matrix A. :rtype: numpy.ndarray """ # Project the snapshot data onto the projection basis. ux = self._proj_basis.conj().T.dot(self.snapshots) ux1 = ux[:, :-1] ux2 = ux[:, 1:] # Define the diagonal matrix T as the following. t1 = self._time[:-1] t2 = self._time[1:] T = np.diag(t2 - t1) # Define the matrices Y and Z as the following and compute the # rank-truncated SVD of Y. Y = (ux1 + ux2) / 2 Z = (ux2 - ux1).dot(np.linalg.inv(T)) U, s, V = compute_svd(Y, self._svd_rank) S = np.diag(s) # Compute the matrix Atilde and return its eigenvalues. Atilde = np.linalg.multi_dot([U.conj().T, Z, V, np.linalg.inv(S)]) return np.linalg.eig(Atilde)[0] def fit(self, X, t): """ Compute the Optimized Dynamic Mode Decomposition. :param X: the input snapshots. :type X: numpy.ndarray or iterable :param t: the input time vector. :type t: numpy.ndarray or iterable """ # Process the input data and convert to numpy.ndarrays. self._reset()
self._snapshots_holder = Snapshots(X)
4
2023-10-30 12:37:40+00:00
16k
lewandofskee/DiAD
ldm/models/diffusion/.ipynb_checkpoints/ddpm-checkpoint.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import os import logging import timm import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from scipy.ndimage import gaussian_filter from utils.util import cal_anomaly_map, log_local, create_logger from utils.eval_helper import dump, log_metrics, merge_together, performances
14,100
if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] # x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): input_img = batch['jpg'] input_features = self.pretrained_model(input_img) output = self.log_images_test(batch) log_local(output, batch["filename"][0]) output_img = output['samples'] output_features = self.pretrained_model(output_img) input_features = input_features[1:4] output_features = output_features[1:4] anomaly_map, _ = cal_anomaly_map(input_features, output_features, input_img.shape[-1], amap_mode='a') anomaly_map = gaussian_filter(anomaly_map, sigma=5) anomaly_map = torch.from_numpy(anomaly_map) anomaly_map_prediction = anomaly_map.unsqueeze(dim=0).unsqueeze(dim=1) batch['pred'] = anomaly_map_prediction batch["output"] = output_img.cpu() batch["input"] = input_img.cpu() dump(self.evl_dir, batch) @torch.no_grad() def on_validation_epoch_start(self): self.evl_dir = "npz_result" self.logger_val = create_logger("global_logger", "log/") pretrained_model = timm.create_model("resnet50", pretrained=True, features_only=True) self.pretrained_model = pretrained_model.to("cuda") self.pretrained_model.eval() os.makedirs(self.evl_dir, exist_ok=True) @torch.no_grad() def on_validation_epoch_end(self, *args, **kwargs): # evl_metrics = {'auc': [{'name': 'max'}, {'name': 'pixel'}, {'name': 'appx'}, {'name': 'apsp'}, # {'name': 'f1px'}, {'name': 'f1sp'}]} evl_metrics = {'auc': [{'name': 'max'}, {'name': 'pixel'}, {'name': 'pro'}, {'name': 'appx'}, {'name': 'apsp'}, {'name': 'f1px'}, {'name': 'f1sp'}]} # evl_metrics = {'auc': [{'name': 'max'}, {'name': 'pixel'}]} self.print("Gathering final results ...") fileinfos, preds, masks = merge_together(self.evl_dir) ret_metrics = performances(fileinfos, preds, masks, evl_metrics) log_metrics(ret_metrics, evl_metrics) auroc_px = ret_metrics['mean_pixel_auc'] auroc_sp = ret_metrics['mean_max_auc'] val_acc = auroc_px + auroc_sp self.log('val_acc', val_acc, on_epoch=True, prog_bar=True, logger=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() # z = x if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim:
ddim_sampler = DDIMSampler(self)
16
2023-10-30 14:21:09+00:00
16k
nv-tlabs/trace
tbsim/algos/algos.py
[ { "identifier": "batch_utils", "path": "tbsim/utils/batch_utils.py", "snippet": "def batch_utils():\n return trajdataBatchUtils()" }, { "identifier": "Action", "path": "tbsim/policies/common.py", "snippet": "class Action(Trajectory):\n pass" }, { "identifier": "DiffuserMode...
import numpy as np import copy import torch import torch.nn as nn import torch.optim as optim import pytorch_lightning as pl import torch.nn.functional as F import tbsim.utils.tensor_utils as TensorUtils import tbsim.utils.metrics as Metrics from tbsim.utils.batch_utils import batch_utils from tbsim.policies.common import Action from tbsim.models.trace import DiffuserModel from tbsim.models.trace_helpers import EMA from tbsim.utils.guidance_loss import choose_action_from_guidance, choose_action_from_gt
11,211
use_map_feat_global=algo_config.use_map_feat_global, use_map_feat_grid=algo_config.use_map_feat_grid, map_encoder_model_arch=algo_config.map_encoder_model_arch, input_image_shape=modality_shapes["image"], # [C, H, W] map_feature_dim=algo_config.map_feature_dim, map_grid_feature_dim=algo_config.map_grid_feature_dim, hist_num_frames=algo_config.history_num_frames+1, # the current step is concat to the history hist_feature_dim=algo_config.history_feature_dim, cond_feature_dim=algo_config.cond_feat_dim, diffuser_model_arch=algo_config.diffuser_model_arch, horizon=algo_config.horizon, observation_dim=observation_dim, action_dim=action_dim, output_dim=output_dim, n_timesteps=algo_config.n_diffusion_steps, loss_type=algo_config.loss_type, action_weight=algo_config.action_weight, loss_discount=algo_config.loss_discount, dim_mults=algo_config.dim_mults, dynamics_type=algo_config.dynamics.type, dynamics_kwargs=algo_config.dynamics, base_dim=algo_config.base_dim, diffuser_input_mode=algo_config.diffuser_input_mode, use_conditioning=self.use_cond, cond_fill_value=self.cond_fill_val, diffuser_norm_info=algo_config.diffuser_norm_info, agent_hist_norm_info=algo_config.agent_hist_norm_info, neighbor_hist_norm_info=algo_config.neighbor_hist_norm_info, dt=algo_config.step_time, ) # set up initial guidance if guidance_config is not None: self.set_guidance(guidance_config) # set up EMA self.use_ema = algo_config.use_ema if self.use_ema: print('DIFFUSER: using EMA... val and get_action will use ema model') self.ema = EMA(algo_config.ema_decay) self.ema_policy = copy.deepcopy(self.nets["policy"]) self.ema_policy.requires_grad_(False) self.ema_update_every = algo_config.ema_step self.ema_start_step = algo_config.ema_start_step self.reset_parameters() self.cur_train_step = 0 @property def checkpoint_monitor_keys(self): if self.use_ema: return {"valLoss": "val/ema_losses_diffusion_loss"} else: return {"valLoss": "val/losses_diffusion_loss"} def forward(self, obs_dict, num_samp=1, class_free_guide_w=0.0, guide_as_filter_only=False, guide_clean=False): cur_policy = self.nets["policy"] # this function is only called at validation time, so use ema if self.use_ema: cur_policy = self.ema_policy return cur_policy(obs_dict, num_samp, return_diffusion=True, return_guidance_losses=True, class_free_guide_w=class_free_guide_w, apply_guidance=(not guide_as_filter_only), guide_clean=guide_clean)["predictions"] def _compute_metrics(self, pred_batch, data_batch): metrics = {} predictions = pred_batch["predictions"] preds = TensorUtils.to_numpy(predictions["positions"]) gt = TensorUtils.to_numpy(data_batch["target_positions"]) avail = TensorUtils.to_numpy(data_batch["target_availabilities"]) # compute ADE & FDE based on trajectory samples sample_preds = preds conf = np.ones(sample_preds.shape[0:2]) / float(sample_preds.shape[1]) metrics["ego_avg_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() metrics["ego_avg_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() # compute diversity scores based on trajectory samples metrics["ego_avg_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "max").mean() metrics["ego_avg_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "max").mean() return metrics def reset_parameters(self): self.ema_policy.load_state_dict(self.nets["policy"].state_dict()) def step_ema(self, step): if step < self.ema_start_step: self.reset_parameters() return self.ema.update_model_average(self.ema_policy, self.nets["policy"]) def training_step_end(self, batch_parts): self.cur_train_step += 1 def training_step(self, batch, batch_idx): """ Training on a single batch of data. Args: batch (dict): dictionary with torch.Tensors sampled from a data loader and filtered by @process_batch_for_training batch_idx (int): training step number (relative to the CURRENT epoch) - required by some Algos that need to perform staged training and early stopping Returns: info (dict): dictionary of relevant inputs, outputs, and losses that might be relevant for logging """ if self.use_ema and self.cur_train_step % self.ema_update_every == 0: self.step_ema(self.cur_train_step)
class DiffuserTrafficModel(pl.LightningModule): def __init__(self, algo_config, modality_shapes, guidance_config=None): """ Creates networks and places them into @self.nets. """ super(DiffuserTrafficModel, self).__init__() self.algo_config = algo_config self.nets = nn.ModuleDict() if algo_config.diffuser_input_mode == 'state_and_action': # "Observations" are inputs to diffuser that are not outputs observation_dim = 4 # x, y, vel, yaw # "Actions" are inputs and outputs action_dim = 2 # acc, yawvel # "output" is final output of the entired denoising process output_dim = 2 # acc, yawvel else: raise self.cond_drop_map_p = algo_config.conditioning_drop_map_p self.cond_drop_neighbor_p = algo_config.conditioning_drop_neighbor_p min_cond_drop_p = min([self.cond_drop_map_p, self.cond_drop_neighbor_p]) max_cond_drop_p = max([self.cond_drop_map_p, self.cond_drop_neighbor_p]) assert min_cond_drop_p >= 0.0 and max_cond_drop_p <= 1.0 self.use_cond = self.cond_drop_map_p < 1.0 and self.cond_drop_neighbor_p < 1.0 # no need for conditioning arch if always dropping self.cond_fill_val = algo_config.conditioning_drop_fill self.use_rasterized_map = algo_config.rasterized_map if self.use_cond: if self.cond_drop_map_p > 0: print('DIFFUSER: Dropping map input conditioning with p = %f during training...' % (self.cond_drop_map_p)) if self.cond_drop_neighbor_p > 0: print('DIFFUSER: Dropping neighbor traj input conditioning with p = %f during training...' % (self.cond_drop_neighbor_p)) self.nets["policy"] = DiffuserModel( rasterized_map=algo_config.rasterized_map, use_map_feat_global=algo_config.use_map_feat_global, use_map_feat_grid=algo_config.use_map_feat_grid, map_encoder_model_arch=algo_config.map_encoder_model_arch, input_image_shape=modality_shapes["image"], # [C, H, W] map_feature_dim=algo_config.map_feature_dim, map_grid_feature_dim=algo_config.map_grid_feature_dim, hist_num_frames=algo_config.history_num_frames+1, # the current step is concat to the history hist_feature_dim=algo_config.history_feature_dim, cond_feature_dim=algo_config.cond_feat_dim, diffuser_model_arch=algo_config.diffuser_model_arch, horizon=algo_config.horizon, observation_dim=observation_dim, action_dim=action_dim, output_dim=output_dim, n_timesteps=algo_config.n_diffusion_steps, loss_type=algo_config.loss_type, action_weight=algo_config.action_weight, loss_discount=algo_config.loss_discount, dim_mults=algo_config.dim_mults, dynamics_type=algo_config.dynamics.type, dynamics_kwargs=algo_config.dynamics, base_dim=algo_config.base_dim, diffuser_input_mode=algo_config.diffuser_input_mode, use_conditioning=self.use_cond, cond_fill_value=self.cond_fill_val, diffuser_norm_info=algo_config.diffuser_norm_info, agent_hist_norm_info=algo_config.agent_hist_norm_info, neighbor_hist_norm_info=algo_config.neighbor_hist_norm_info, dt=algo_config.step_time, ) # set up initial guidance if guidance_config is not None: self.set_guidance(guidance_config) # set up EMA self.use_ema = algo_config.use_ema if self.use_ema: print('DIFFUSER: using EMA... val and get_action will use ema model') self.ema = EMA(algo_config.ema_decay) self.ema_policy = copy.deepcopy(self.nets["policy"]) self.ema_policy.requires_grad_(False) self.ema_update_every = algo_config.ema_step self.ema_start_step = algo_config.ema_start_step self.reset_parameters() self.cur_train_step = 0 @property def checkpoint_monitor_keys(self): if self.use_ema: return {"valLoss": "val/ema_losses_diffusion_loss"} else: return {"valLoss": "val/losses_diffusion_loss"} def forward(self, obs_dict, num_samp=1, class_free_guide_w=0.0, guide_as_filter_only=False, guide_clean=False): cur_policy = self.nets["policy"] # this function is only called at validation time, so use ema if self.use_ema: cur_policy = self.ema_policy return cur_policy(obs_dict, num_samp, return_diffusion=True, return_guidance_losses=True, class_free_guide_w=class_free_guide_w, apply_guidance=(not guide_as_filter_only), guide_clean=guide_clean)["predictions"] def _compute_metrics(self, pred_batch, data_batch): metrics = {} predictions = pred_batch["predictions"] preds = TensorUtils.to_numpy(predictions["positions"]) gt = TensorUtils.to_numpy(data_batch["target_positions"]) avail = TensorUtils.to_numpy(data_batch["target_availabilities"]) # compute ADE & FDE based on trajectory samples sample_preds = preds conf = np.ones(sample_preds.shape[0:2]) / float(sample_preds.shape[1]) metrics["ego_avg_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_ADE"] = Metrics.batch_average_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() metrics["ego_avg_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_min_FDE"] = Metrics.batch_final_displacement_error(gt, sample_preds, conf, avail, "oracle").mean() # compute diversity scores based on trajectory samples metrics["ego_avg_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_APD"] = Metrics.batch_average_diversity(gt, sample_preds, conf, avail, "max").mean() metrics["ego_avg_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "mean").mean() metrics["ego_max_FPD"] = Metrics.batch_final_diversity(gt, sample_preds, conf, avail, "max").mean() return metrics def reset_parameters(self): self.ema_policy.load_state_dict(self.nets["policy"].state_dict()) def step_ema(self, step): if step < self.ema_start_step: self.reset_parameters() return self.ema.update_model_average(self.ema_policy, self.nets["policy"]) def training_step_end(self, batch_parts): self.cur_train_step += 1 def training_step(self, batch, batch_idx): """ Training on a single batch of data. Args: batch (dict): dictionary with torch.Tensors sampled from a data loader and filtered by @process_batch_for_training batch_idx (int): training step number (relative to the CURRENT epoch) - required by some Algos that need to perform staged training and early stopping Returns: info (dict): dictionary of relevant inputs, outputs, and losses that might be relevant for logging """ if self.use_ema and self.cur_train_step % self.ema_update_every == 0: self.step_ema(self.cur_train_step)
batch = batch_utils().parse_batch(batch)
0
2023-10-31 18:43:07+00:00
16k
nv-tlabs/pacer
uhc/smpllib/np_smpl_humanoid_batch.py
[ { "identifier": "dict_to_torch", "path": "uhc/utils/torch_ext.py", "snippet": "def dict_to_torch(input_dict, dtype = None, device = None, add_dim = False):\n if not isinstance(input_dict, dict):\n return None\n out_dict = {}\n for key, value in input_dict.items():\n if isinstance(...
import torch import glob import os import sys import pdb import os.path as osp import joblib import pytorch3d.transforms as tR import autograd.numpy as np import time import ipdb from uhc.utils.torch_ext import dict_to_torch from uhc.utils.torch_utils import * from uhc.utils.transform_utils import * from scipy.spatial.transform import Rotation as sRot from uhc.smpllib.smpl_mujoco import SMPLConverter, smpl_to_qpose, smpl_to_qpose_torch, SMPL_BONE_ORDER_NAMES from uhc.smpllib.smpl_parser import SMPL_EE_NAMES from uhc.utils.tools import get_expert, get_expert_master from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from autograd import elementwise_grad as egrad from uhc.smpllib.smpl_robot import Robot from uhc.smpllib.torch_smpl_humanoid import Humanoid from uhc.utils.config_utils.copycat_config import Config from uhc.data_loaders.dataset_amass_single import DatasetAMASSSingle from uhc.utils.torch_ext import dict_to_torch from uhc.smpllib.smpl_mujoco import smpl_to_qpose_torch, smplh_to_smpl
11,852
pred_joints2d = pred_joints3d @ (self.K.T) z = pred_joints2d[:, :, 2:] pred_joints2d = pred_joints2d[:, :, :2] / z pred_joints2d = smpl_op_to_op(pred_joints2d) if return_cam_3d: return pred_joints2d, pred_joints3d else: return pred_joints2d def proj_2d_line_loss(self, input_vec): wbpos = self.fk_batch_grad(input_vec) _, pred_joints3d = self.proj2d(wbpos, return_cam_3d=True) dist = np.cross(pred_joints3d[0], pred_joints3d[0] - self.camera_rays)**2 return dist.mean() def proj_2d_loss(self, input_vec, ord=2, normalize = True): wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs( gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_body_loss(self, input_vec, ord=2, normalize = False): # Has to use the current translation (to roughly put at the same position, and then zero out the translation) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) gt2d_center = self.gt_2d_joints[..., 7:8, :].copy() pred_joints2d += (gt2d_center - pred_joints2d[..., 7:8, :]) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs(gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_root_loss(self, root_pos_rot): input_vec = np.concatenate( [root_pos_rot.reshape([1, 1, 6]), np.zeros([1, 1, 69])], axis=2) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) return np.abs(self.gt_2d_joints[7:8] - pred_joints2d.squeeze()[7:8]).squeeze().mean() def fk_batch(self, pose, trans, convert_to_mat=True, count_offset=True): pose, trans = pose.cpu().numpy(), trans.cpu().numpy() B, seq_len = pose.shape[:2] if convert_to_mat: pose_mat = rodrigues(pose.reshape(B * seq_len * 24, 1, 3)).reshape( B, seq_len, -1, 3, 3) else: pose_mat = pose if pose_mat.shape != 5: pose_mat = pose_mat.reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return_dic = {} return_dic["wbpos"] = wbody_pos return_dic["wbmat"] = wbody_mat return return_dic def fk_batch_grad(self, input_vec, count_offset=True): trans, pose = input_vec[:, :, :3], input_vec[:, :, 3:] B, seq_len = pose.shape[:2] pose_mat = rodrigues(pose.reshape(-1, 1, 3)).reshape(B, seq_len, -1, 3, 3) # pose_mat = [ # rodrigues_vec_to_rotation_mat(a) for a in pose.reshape(-1, 3) # ] # pose_mat = np.stack(pose_mat).reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return wbody_pos def get_ee_pos(self, body_xpos, root_q, transform):
# import numpy as np sys.path.append(os.getcwd()) def smpl_op_to_op(pred_joints2d): new_2d = np.concatenate([pred_joints2d[..., [1, 4], :].mean(axis = -2, keepdims = True), \ pred_joints2d[..., 1:7, :], \ pred_joints2d[..., [7, 8, 11], :].mean(axis = -2, keepdims = True), \ pred_joints2d[..., 9:11, :], \ pred_joints2d[..., 12:, :]], \ axis = -2) return new_2d def normalize_screen_coordinates(X, w=1920, h=1080): assert X.shape[-1] == 2 # Normalize so that [0, w] is mapped to # [-1, 1], while preserving the aspect ratio return X / w * 2 - np.array([1, h / w]) def rodrigues(r): """ Rodrigues' rotation formula that turns axis-angle vector into rotation matrix in a batch-ed manner. Parameter: ---------- r: Axis-angle rotation vector of shape [batch_size, 1, 3]. Return: ------- Rotation matrix of shape [batch_size, 3, 3]. """ theta = np.linalg.norm(r, axis=(1, 2))[:, None, None] # avoid zero divide theta = np.maximum(theta, np.finfo(r.dtype).eps) r_hat = r / theta cos = np.cos(theta) z_stick = np.zeros(theta.shape[0]) m = np.stack([ z_stick, -r_hat[:, 0, 2], r_hat[:, 0, 1], r_hat[:, 0, 2], z_stick, -r_hat[:, 0, 0], -r_hat[:, 0, 1], r_hat[:, 0, 0], z_stick ], axis=1).reshape([-1, 3, 3]) i_cube = np.broadcast_to(np.expand_dims(np.eye(3), axis=0), [theta.shape[0], 3, 3]) A = np.transpose(r_hat, axes=[0, 2, 1]) B = r_hat dot = np.matmul(A, B) R = cos * i_cube + (1 - cos) * dot + np.sin(theta) * m return R def rodrigues_vec_to_rotation_mat(rot): theta = np.linalg.norm(rot, axis=0) if theta < sys.float_info.epsilon: rotation_mat = np.eye(3, dtype=float) else: rot = rot / theta I = np.eye(3, dtype=float) r_rT = np.array([[rot[0] * rot[0], rot[0] * rot[1], rot[0] * rot[2]], [rot[1] * rot[0], rot[1] * rot[1], rot[1] * rot[2]], [rot[2] * rot[0], rot[2] * rot[1], rot[2] * rot[2]]]) r_cross = np.array([[0, -rot[2], rot[1]], [rot[2], 0, -rot[0]], [-rot[1], rot[0], 0]]) rotation_mat = np.cos(theta) * I + ( 1 - np.cos(theta)) * r_rT + np.sin(theta) * r_cross return rotation_mat class Humanoid_Batch: def __init__(self, smpl_model="smpl", data_dir="data/smpl"): self.smpl_model = smpl_model if self.smpl_model == "smpl": self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") elif self.smpl_model == "smplh": self.smpl_parser_n = SMPLH_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLH_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLH_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) elif self.smpl_model == "smplx": self.smpl_parser_n = SMPLX_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLX_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLX_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) self.model_names = [ 'Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand' ] self._parents = [ -1, 0, 1, 2, 3, 0, 5, 6, 7, 0, 9, 10, 11, 12, 11, 14, 15, 16, 17, 11, 19, 20, 21, 22 ] self.smpl_index = [ SMPL_BONE_ORDER_NAMES.index(i) for i in self.model_names ] def update_model(self, betas, gender): betas, gender = betas.cpu().float(), gender.cpu().long() B, _ = betas.shape betas_f = betas[gender == 2] if len(betas_f) > 0: _, _, _, _, joint_offsets_f, _, _, _, _, _, _, = self.smpl_parser_f.get_mesh_offsets_batch( betas=betas_f[:, :10]) betas_n = betas[gender == 0] if len(betas_n) > 0: _, _, _, _, joint_offsets_n, _, _, _, _, _, _, = self.smpl_parser_n.get_mesh_offsets_batch( betas=betas_n[:, :10]) betas_m = betas[gender == 1] if len(betas_m) > 0: _, _, _, _, joint_offsets_m, _, _, _, _, _, _, = self.smpl_parser_m.get_mesh_offsets_batch( betas=betas_m[:, :10]) joint_offsets_all = dict() for n in SMPL_BONE_ORDER_NAMES: joint_offsets_all[n] = torch.zeros([B, 3]).float() if len(betas_f) > 0: joint_offsets_all[n][gender == 2] = joint_offsets_f[n] if len(betas_n) > 0: joint_offsets_all[n][gender == 0] = joint_offsets_n[n] if len(betas_m) > 0: joint_offsets_all[n][gender == 1] = joint_offsets_m[n] off_sets = [] for n in self.model_names: off_sets.append(joint_offsets_all[n]) # self._offsets = torch.from_numpy(np.stack(off_sets, axis=1)) self._offsets = np.round(np.stack(off_sets, axis=1), decimals=5) self.trans2joint = -self._offsets[:, 0:1] self.trans2joint[:, :, 2] = 0 # self._offsets = joblib.load("curr_offset.pkl")[None, ] def update_projection(self, cam_params, smpl2op_map, MUJOCO_2_SMPL): self.full_R = cam_params['full_R'] self.full_t = cam_params['full_t'] self.K = cam_params['K'] self.img_w = cam_params['img_w'] self.img_h = cam_params['img_h'] self.openpose_subindex = smpl2op_map < 22 self.smpl2op_map = smpl2op_map self.smpl2op_partial = self.smpl2op_map[self.openpose_subindex] self.MUJOCO_2_SMPL = MUJOCO_2_SMPL def update_tgt_joints(self, tgt_joints, inliers): self.gt_2d_joints = tgt_joints self.inliers = inliers.astype(bool) num_joints = self.gt_2d_joints.shape[-2] self.gt_2d_joints_norm = normalize_screen_coordinates(self.gt_2d_joints, self.img_w, self.img_h) self.num_frames = self.gt_2d_joints.shape[0] self.camera_rays = np.concatenate([self.gt_2d_joints, np.ones([self.num_frames, num_joints, 1])], axis=2).dot(np.linalg.inv(self.K).T) self.camera_rays /= np.linalg.norm(self.camera_rays, axis=2)[..., None] lam = 0.3 self.weighting = np.exp(lam * -np.arange(self.num_frames)) / np.sum( np.exp(lam * -np.arange(self.num_frames))) self.weighting = np.tile(self.weighting[:, None, None], [1, num_joints, 2]) # self.weighting = np.ones(self.num_frames) / self.num_frames def proj2d(self, wbpos, return_cam_3d=False): # wbpos in mujoco pred_joints3d = wbpos.squeeze()[self.MUJOCO_2_SMPL][ self.smpl2op_partial][None, ] pred_joints3d = pred_joints3d @ self.full_R.T + self.full_t pred_joints2d = pred_joints3d @ (self.K.T) z = pred_joints2d[:, :, 2:] pred_joints2d = pred_joints2d[:, :, :2] / z pred_joints2d = smpl_op_to_op(pred_joints2d) if return_cam_3d: return pred_joints2d, pred_joints3d else: return pred_joints2d def proj_2d_line_loss(self, input_vec): wbpos = self.fk_batch_grad(input_vec) _, pred_joints3d = self.proj2d(wbpos, return_cam_3d=True) dist = np.cross(pred_joints3d[0], pred_joints3d[0] - self.camera_rays)**2 return dist.mean() def proj_2d_loss(self, input_vec, ord=2, normalize = True): wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs( gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_body_loss(self, input_vec, ord=2, normalize = False): # Has to use the current translation (to roughly put at the same position, and then zero out the translation) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) gt2d_center = self.gt_2d_joints[..., 7:8, :].copy() pred_joints2d += (gt2d_center - pred_joints2d[..., 7:8, :]) curr_weighting = np.array(self.weighting) if normalize: pred_joints2d = normalize_screen_coordinates(pred_joints2d, self.img_w, self.img_h) gt_2d_joints = self.gt_2d_joints_norm else: gt_2d_joints = self.gt_2d_joints if ord == 1: loss = np.abs(gt_2d_joints[self.inliers] - pred_joints2d.squeeze()[self.inliers]).squeeze().mean() else: diff = (gt_2d_joints - pred_joints2d.squeeze())**2 curr_weighting[~self.inliers] = 0 loss = (diff * curr_weighting).sum(axis=0).mean() return loss def proj_2d_root_loss(self, root_pos_rot): input_vec = np.concatenate( [root_pos_rot.reshape([1, 1, 6]), np.zeros([1, 1, 69])], axis=2) wbpos = self.fk_batch_grad(input_vec) pred_joints2d = self.proj2d(wbpos) return np.abs(self.gt_2d_joints[7:8] - pred_joints2d.squeeze()[7:8]).squeeze().mean() def fk_batch(self, pose, trans, convert_to_mat=True, count_offset=True): pose, trans = pose.cpu().numpy(), trans.cpu().numpy() B, seq_len = pose.shape[:2] if convert_to_mat: pose_mat = rodrigues(pose.reshape(B * seq_len * 24, 1, 3)).reshape( B, seq_len, -1, 3, 3) else: pose_mat = pose if pose_mat.shape != 5: pose_mat = pose_mat.reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return_dic = {} return_dic["wbpos"] = wbody_pos return_dic["wbmat"] = wbody_mat return return_dic def fk_batch_grad(self, input_vec, count_offset=True): trans, pose = input_vec[:, :, :3], input_vec[:, :, 3:] B, seq_len = pose.shape[:2] pose_mat = rodrigues(pose.reshape(-1, 1, 3)).reshape(B, seq_len, -1, 3, 3) # pose_mat = [ # rodrigues_vec_to_rotation_mat(a) for a in pose.reshape(-1, 3) # ] # pose_mat = np.stack(pose_mat).reshape(B, seq_len, -1, 3, 3) J = pose_mat.shape[2] - 1 # Exclude root if count_offset: trans = trans + self._offsets[:, 0:1] pose_mat_ordered = pose_mat[:, :, self.smpl_index] wbody_pos, wbody_mat = self.forward_kinematics_batch( pose_mat_ordered[:, :, 1:], pose_mat_ordered[:, :, 0:1], trans) return wbody_pos def get_ee_pos(self, body_xpos, root_q, transform):
ee_name = SMPL_EE_NAMES
2
2023-10-31 20:47:12+00:00
16k
Improbable-AI/dexenv
dexenv/envs/dclaw_base.py
[ { "identifier": "VecTask", "path": "dexenv/envs/base/vec_task.py", "snippet": "class VecTask(Env):\n\n def __init__(self, config, sim_device, rl_device, graphics_device_id, headless):\n \"\"\"Initialise the `VecTask`.\n Args:\n config: config dictionary for the environment.\n...
import time import torch import dexenv from isaacgym import gymapi from isaacgym import gymtorch from isaacgym.gymutil import get_property_getter_map from isaacgym.gymutil import get_property_setter_map from isaacgymenvs.utils.torch_jit_utils import * from loguru import logger from dexenv.envs.base.vec_task import VecTask from dexenv.envs.rewards import compute_dclaw_reward from dexenv.utils.common import get_module_path from dexenv.utils.common import pathlib_file from dexenv.utils.hand_color import dclaw_body_color_mapping from dexenv.utils.isaac_utils import get_camera_params from dexenv.utils.torch_utils import random_quaternions from dexenv.utils.torch_utils import torch_long
11,165
if self.cfg["env"]["effort_limit"] is not None: effort_limit = self.cfg["env"]["effort_limit"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_effort_limit"] print(f'Setting DOF effort limit to:{effort_limit}') set_dof_prop(dclaw_dof_props, 'effort', effort_limit) if self.cfg["env"]["stiffness"] is not None: stiffness = self.cfg["env"]["stiffness"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_stiffness"] print(f'Setting stiffness to:{stiffness}') set_dof_prop(dclaw_dof_props, 'stiffness', stiffness) if self.cfg["env"]["damping"] is not None: damping = self.cfg["env"]["damping"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_damping"] print(f'Setting damping to:{damping}') set_dof_prop(dclaw_dof_props, 'damping', damping) self.dclaw_dof_lower_limits = [] self.dclaw_dof_upper_limits = [] self.dclaw_default_dof_states = np.zeros(self.num_dclaw_dofs, dtype=gymapi.DofState.dtype) self.dclaw_default_dof_pos = self.dclaw_default_dof_states['pos'] self.dclaw_default_dof_vel = self.dclaw_default_dof_states['vel'] for i in range(self.num_dclaw_dofs): self.dclaw_dof_lower_limits.append(dclaw_dof_props['lower'][i]) self.dclaw_dof_upper_limits.append(dclaw_dof_props['upper'][i]) if i % 3 == 1: self.dclaw_default_dof_pos[i] = 0.8 elif i % 3 == 2: self.dclaw_default_dof_pos[i] = -1.1 else: self.dclaw_default_dof_pos[i] = 0. self.dclaw_default_dof_vel[i] = 0.0 self.dof_joint_indices = to_torch(self.dof_joint_indices, dtype=torch.long, device=self.device) self.dclaw_dof_lower_limits = to_torch(self.dclaw_dof_lower_limits, device=self.device) self.dclaw_dof_upper_limits = to_torch(self.dclaw_dof_upper_limits, device=self.device) self.dclaw_default_dof_pos = to_torch(self.dclaw_default_dof_pos, device=self.device) self.dclaw_default_dof_vel = to_torch(self.dclaw_default_dof_vel, device=self.device) self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_asset_props = self.gym.get_asset_rigid_shape_properties(dclaw_asset) for p in dclaw_asset_props: p.friction = self.cfg.env.hand.friction p.torsion_friction = self.cfg.env.hand.torsion_friction p.rolling_friction = self.cfg.env.hand.rolling_friction p.restitution = self.cfg.env.hand.restitution self.gym.set_asset_rigid_shape_properties(dclaw_asset, dclaw_asset_props) return dclaw_asset, dclaw_dof_props def get_object_start_pose(self, dclaw_start_pose): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() if self.cfg.env.obj_init_delta_pos is not None: delta_pos = self.cfg.env.obj_init_delta_pos object_start_pose.p.x = dclaw_start_pose.p.x + delta_pos[0] object_start_pose.p.y = dclaw_start_pose.p.y + delta_pos[1] object_start_pose.p.z = dclaw_start_pose.p.z + delta_pos[2] else: object_start_pose.p.x = dclaw_start_pose.p.x pose_dy, pose_dz = 0., -0.13 object_start_pose.p.y = dclaw_start_pose.p.y + pose_dy object_start_pose.p.z = dclaw_start_pose.p.z + pose_dz return object_start_pose def get_goal_object_start_pose(self, object_start_pose): self.goal_displacement = gymapi.Vec3(0., 0, 0.25) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement return goal_start_pose def set_dof_props(self, props_dict): param_setters_map = get_property_setter_map(self.gym) param_getters_map = get_property_getter_map(self.gym) prop_name = 'dof_properties' setter = param_setters_map[prop_name] for env_id in range(len(self.envs)): env = self.envs[env_id] handle = self.gym.find_actor_handle(env, 'hand') prop = param_getters_map[prop_name](env, handle) for dof_prop_name, dof_prop_values in props_dict.items(): if env_id == 0: assert len(dof_prop_values) == len(self.envs) prop_val = dof_prop_values[env_id] prop[dof_prop_name].fill(prop_val) success = setter(env, handle, prop) if not success: logger.warning(f'Setting dof properties is not successful!') def update_obj_mass(self, env_ids=None): object_rb_masses = [] env_pool = env_ids if env_ids is not None else list(range(self.num_envs)) if len(env_pool) < 1: return for env_id, object_handle in zip(env_pool, self.object_handles): env_ptr = self.envs[env_id] object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) object_rb_masses.append([prop.mass for prop in object_rb_props]) if self.object_rb_masses is None: self.object_rb_masses = to_torch(object_rb_masses, dtype=torch.float, device=self.device) else: self.object_rb_masses[env_pool] = to_torch(object_rb_masses, dtype=torch.float, device=self.device) def reset(self) -> torch.Tensor: """Reset the environment. Returns: Observation dictionary """ zero_actions = self.zero_actions() self.reset_buf.fill_(1) self.reset_goal_buf.fill_(1) if self.cfg.env.action_ema is not None: self.action_ema_val = zero_actions.clone() # step the simulator self.step(zero_actions) return self.update_obs() def compute_reward(self, actions):
class DClawBase(VecTask): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.cfg = cfg headless = self.cfg.headless self.randomize = self.cfg["task"]["randomize"] if self.randomize: logger.warning(f'Domain randomization is enabled!') self.randomization_params = self.cfg["task"]["randomization_params"] self.aggregate_mode = self.cfg["env"]["aggregateMode"] self.dist_reward_scale = self.cfg["env"]["rew"]["distRewardScale"] self.rot_reward_scale = self.cfg["env"]["rew"]["rotRewardScale"] self.success_tolerance = self.cfg["env"]["rew"]["successTolerance"] self.reach_goal_bonus = self.cfg["env"]["rew"]["reachGoalBonus"] self.fall_dist = self.cfg["env"]["rew"]["fallDistance"] self.fall_penalty = self.cfg["env"]["rew"]["fallPenalty"] self.rot_eps = self.cfg["env"]["rew"]["rotEps"] self.vel_obs_scale = 0.2 # scale factor of velocity based observations self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations self.reset_position_noise = self.cfg["env"]["resetPositionNoise"] self.reset_rotation_noise = self.cfg["env"]["resetRotationNoise"] self.reset_dof_pos_noise = self.cfg["env"]["resetDofPosRandomInterval"] self.reset_dof_vel_noise = self.cfg["env"]["resetDofVelRandomInterval"] self.force_scale = self.cfg["env"].get("forceScale", 0.0) self.force_prob_range = self.cfg["env"].get("forceProbRange", [0.001, 0.1]) self.force_decay = self.cfg["env"].get("forceDecay", 0.99) self.force_decay_interval = self.cfg["env"].get("forceDecayInterval", 0.08) self.dclaw_dof_speed_scale = self.cfg["env"]["dofSpeedScale"] # self.act_moving_average = self.cfg["env"]["actionsMovingAverage"] self.debug_viz = self.cfg["env"]["enableDebugVis"] self.max_episode_length = self.cfg["env"]["episodeLength"] self.reset_time = self.cfg["env"].get("resetTime", -1.0) self.print_success_stat = self.cfg["env"]["printNumSuccesses"] self.max_consecutive_successes = self.cfg["env"]["maxConsecutiveSuccesses"] self.av_factor = self.cfg["env"].get("averFactor", 0.1) self.object_type = self.cfg["env"]["objectType"] self.asset_files_dict = { "block": "urdf/objects/cube_multicolor.urdf", "egg": "mjcf/open_ai_assets/hand/egg.xml", "airplane": "single_objects/airplane/model.urdf", 'power_drill': 'single_objects/power_drill/model.urdf', 'mug': 'single_objects/mug/model.urdf', 'elephant': 'asymm/train/elephant/var_000/model.urdf', 'train': 'asymm/train/train/var_000/model.urdf', 'stanford_bunny': 'asymm/train/stanford_bunny/var_004/model.urdf' } self.objs_in_isaacgym = ['block', 'egg'] if "asset" in self.cfg["env"]: self.asset_files_dict["block"] = self.cfg["env"]["asset"].get("assetFileNameBlock", self.asset_files_dict["block"]) self.asset_files_dict["egg"] = self.cfg["env"]["asset"].get("assetFileNameEgg", self.asset_files_dict["egg"]) self.obs_type = self.cfg["env"]["observationType"] if not (self.obs_type in ["full_no_vel", "full", "full_state"]): raise Exception( "Unknown type of observations!\nobservationType should be one of: [openai, full_no_vel, full, full_state]") print("Obs type:", self.obs_type) ## TODO: change value here self.num_obs_dict = { "full_no_vel": 42, "full": 87, "full_state": 114 } self.up_axis = 'z' num_states = 0 self.cfg["env"]["numObservations"] = self.num_obs_dict[self.obs_type] self.cfg["env"]["numStates"] = num_states self.cfg["env"]["numActions"] = 12 self.hist_buf_reset_env_ids = None super().__init__(config=self.cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id, headless=headless) self.dt = self.sim_params.dt control_freq_inv = self.cfg["env"].get("controlFrequencyInv", 1) if self.reset_time > 0.0: self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt))) print("Reset time: ", self.reset_time) print("New episode length: ", self.max_episode_length) if self.viewer != None: cam_pos = gymapi.Vec3(0.16, -0.5, 0.5) cam_target = gymapi.Vec3(0.0, 0.0, 0.15) self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target) actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim) dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim) rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) if self.obs_type == "full_state": sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim) self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6) dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim) self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, self.num_dclaw_dofs) self.gym.refresh_actor_root_state_tensor(self.sim) self.gym.refresh_dof_state_tensor(self.sim) if self.cfg.env.dof_torque_on: self.gym.refresh_dof_force_tensor(self.sim) self.gym.refresh_rigid_body_state_tensor(self.sim) self.dof_state = gymtorch.wrap_tensor(dof_state_tensor) self.dclaw_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_dclaw_dofs] self.dclaw_dof_pos = self.dclaw_dof_state[..., 0] self.dclaw_dof_vel = self.dclaw_dof_state[..., 1] if self.cfg.env.dof_torque_on: self.dclaw_dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, -1) else: self.dclaw_dof_torque = None self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13) self.num_bodies = self.rigid_body_states.shape[1] self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13) if self.cfg.env.rew.pen_tb_contact: _net_cf = self.gym.acquire_net_contact_force_tensor(self.sim) self.net_contact_force = gymtorch.wrap_tensor(_net_cf).view(self.num_envs, -1, 3) table_handle = self.gym.find_actor_handle(self.envs[0], 'table') self.table_body_index = self.gym.find_actor_rigid_body_index(self.envs[0], table_handle, 'table', gymapi.DOMAIN_ENV) logger.warning(f'Table body index:{self.table_body_index}') self.table_contact_force = self.net_contact_force[:, self.table_body_index] self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device) self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1) self.reset_goal_buf = self.reset_buf.clone() self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device) self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device) self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device) self.total_successes = 0 self.total_resets = 0 self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device) self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device) self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1])) * torch.rand(self.num_envs, device=self.device) + torch.log( self.force_prob_range[1])) self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device) self.num_actions = self.num_dclaw_dofs self.actions = self.zero_actions() DClawBase.compute_observations(self) self.num_observations = self.obs_buf.shape[-1] self.cfg.env.numObservations = self.num_observations self.create_ob_act_space() def create_sim(self): self.dt = self.cfg["sim"]["dt"] self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, self.up_axis) self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params) self._create_ground_plane() self._create_envs(self.num_envs, self.cfg["env"]['envSpacing'], int(np.sqrt(self.num_envs))) if self.randomize: self.apply_randomizations(self.randomization_params) def _create_ground_plane(self): plane_params = gymapi.PlaneParams() plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0) plane_params.distance = 0.1 self.gym.add_ground(self.sim, plane_params) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() object_asset_file = self.asset_files_dict[self.object_type] dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) table_asset = self.get_table_asset() table_pose = self.get_table_pose() if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) if self.object_type in self.objs_in_isaacgym: asset_root = get_module_path('isaacgymenvs').parent.joinpath('assets').as_posix() else: asset_root = dexenv.LIB_PATH.joinpath('assets').as_posix() object_asset_options = gymapi.AssetOptions() if self.cfg.env.vhacd: object_asset_options.convex_decomposition_from_submeshes = True object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) object_asset_options.disable_gravity = True goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] print(f'Fingertip handles:{self.fingertip_handles}') dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_asset) object_rs_count = self.gym.get_asset_rigid_shape_count(object_asset) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] max_agg_bodies = self.num_dclaw_bodies + 2 * object_rb_count + 1 max_agg_shapes = self.num_dclaw_shapes + 2 * object_rs_count + 1 for i in range(self.num_envs): env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.env.blockscale is not None and self.cfg.env.objectType == 'block': blockscale = float(self.cfg.env.blockscale) self.gym.set_actor_scale(env_ptr, object_handle, blockscale) self.gym.set_actor_scale(env_ptr, goal_handle, blockscale) if self.object_type != "block": self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) self.setup_torch_states() def create_camera(self, camera_poses, env_ptr, camera_params): cam_handles = [] for ic in range(min(len(camera_poses), self.cfg.cam.cam_num)): camera_handle = self.gym.create_camera_sensor(env_ptr, camera_params) if isinstance(camera_poses[ic], tuple): self.gym.set_camera_location(camera_handle, env_ptr, camera_poses[ic][0], camera_poses[ic][1]) else: self.gym.set_camera_transform(camera_handle, env_ptr, camera_poses[ic]) cam_handles.append(camera_handle) return cam_handles def get_visual_render_camera_setup(self): cam_pos = np.array([-0.7, 0, 0.5]) cam_focus_pt = np.array([0.08, 0, 0.15]) cam_focus_pt = gymapi.Vec3(*cam_focus_pt) cam_pos = gymapi.Vec3(*cam_pos) camera_poses = [(cam_pos, cam_focus_pt)] camera_params = get_camera_params(width=self.cfg.cam.visual_render_width, height=self.cfg.cam.visual_render_height, hov=45, cuda=False) return camera_poses, camera_params def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id): dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, "hand", env_id, 0, 0) if self.cfg.env.dof_torque_on: self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor) self.hand_start_states.append( [dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z, dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z, dclaw_start_pose.r.w, 0, 0, 0, 0, 0, 0]) self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props) hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM) self.hand_indices.append(hand_idx) self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL) if self.obs_type == "full_state": self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor) self.dclaws.append(dclaw_actor) self.set_hand_color(env_ptr, dclaw_actor) def set_hand_color(self, env_ptr, dclaw_actor): rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor) for bd, bd_id in rgd_dict.items(): if bd not in dclaw_body_color_mapping: continue color = gymapi.Vec3(*dclaw_body_color_mapping[bd]) self.gym.set_rigid_body_color(env_ptr, dclaw_actor, bd_id, gymapi.MESH_VISUAL, color) def get_table_asset(self): asset_options = gymapi.AssetOptions() asset_options.armature = 0.001 asset_options.fix_base_link = True asset_options.thickness = 0.001 asset_options.disable_gravity = True table_dims = gymapi.Vec3(0.6, 0.6, 0.1) table_asset = self.gym.create_box(self.sim, table_dims.x, table_dims.y, table_dims.z, asset_options) table_props = self.gym.get_asset_rigid_shape_properties(table_asset) for p in table_props: p.friction = self.cfg.env.table.friction p.torsion_friction = self.cfg.env.table.torsion_friction p.restitution = self.cfg.env.table.restitution p.rolling_friction = self.cfg.env.table.rolling_friction self.gym.set_asset_rigid_shape_properties(table_asset, table_props) return table_asset def get_table_pose(self): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() object_start_pose.p.x = 0 object_start_pose.p.y = 0 object_start_pose.p.z = -0.05 return object_start_pose def get_dclaw_start_pose(self): dclaw_start_pose = gymapi.Transform() dclaw_start_pose.p = gymapi.Vec3(*get_axis_params(0.25, self.up_axis_idx)) dclaw_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi) return dclaw_start_pose def setup_torch_states(self): self.render_rgb_obs_buf = None if self.cfg.rgb_render: self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0, 0, 0)) else: self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0.7, 0.7, 0.7), gymapi.Vec3(0, 0, 0)) self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view( self.num_envs, 13) self.goal_states = self.object_init_state.clone() self.goal_states[:, self.up_axis_idx] -= 0.04 self.goal_init_state = self.goal_states.clone() self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13) self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device) self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device) self.object_rb_masses = None self.update_obj_mass() self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device) self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device) self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device) def get_dclaw_asset(self, asset_root=None, asset_options=None): # load dclaw asset if asset_options is None: asset_options = gymapi.AssetOptions() asset_options.flip_visual_attachments = False asset_options.fix_base_link = True asset_options.collapse_fixed_joints = False asset_options.disable_gravity = False asset_options.thickness = 0.001 asset_options.angular_damping = 0.01 asset_options.override_inertia = True asset_options.override_com = True logger.info(f'VHACD:{self.cfg.env.vhacd}') if self.cfg.env.vhacd: asset_options.convex_decomposition_from_submeshes = True if self.cfg.physics_engine == "physx": # if self.physics_engine == gymapi.SIM_PHYSX: asset_options.use_physx_armature = True asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS if asset_root is None: asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw_4f').as_posix() robot_name = self.cfg.env.robot asset_root = pathlib_file(asset_root).parent.joinpath(f'{robot_name}').as_posix() dclaw_asset = self.gym.load_asset(self.sim, asset_root, f"{robot_name}.urdf", asset_options) print(f'Dclaw asset root:{asset_root} robot name:{robot_name}') self.num_dclaw_bodies = self.gym.get_asset_rigid_body_count(dclaw_asset) self.num_dclaw_shapes = self.gym.get_asset_rigid_shape_count(dclaw_asset) self.num_dclaw_dofs = self.gym.get_asset_dof_count(dclaw_asset) print(f'D-Claw:') print(f'\t Number of bodies: {self.num_dclaw_bodies}') print(f'\t Number of shapes: {self.num_dclaw_shapes}') print(f'\t Number of dofs: {self.num_dclaw_dofs}') self.dclaw_asset_dof_dict = self.gym.get_asset_dof_dict(dclaw_asset) joint_names = self.dclaw_asset_dof_dict.keys() logger.info(f'Joint names:{joint_names}') self.dof_joint_indices = list(self.dclaw_asset_dof_dict.values()) dinds = np.array(self.dof_joint_indices) assert np.all(np.diff(dinds) > 0) # check if it's in a sorted order (ascending) rb_links = self.gym.get_asset_rigid_body_names(dclaw_asset) self.fingertips = [x for x in rb_links if 'tip_link' in x] # ["one_tip_link", "two_tip_link", "three_tip_link"] self.num_fingertips = len(self.fingertips) print(f'Number of fingertips:{self.num_fingertips} Fingertips:{self.fingertips}') print(f'Actuator --- DoF Index') for act_name, act_index in zip(joint_names, self.dof_joint_indices): print(f'\t {act_name} {act_index}') dclaw_dof_props = self.gym.get_asset_dof_properties(dclaw_asset) def set_dof_prop(props, prop_name, val): if np.isscalar(val): props[prop_name].fill(val) elif len(val) == 3: props[prop_name] = np.array(list(val) * int(len(props[prop_name]) / 3)) else: props[prop_name] = np.array(val) if self.cfg["env"]["dof_vel_hard_limit"] is not None: vel_hard_limit = self.cfg["env"]["dof_vel_hard_limit"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_dof_vel_hard_limit"] print(f'Setting DOF velocity limit to:{vel_hard_limit}') set_dof_prop(dclaw_dof_props, 'velocity', vel_hard_limit) if self.cfg["env"]["effort_limit"] is not None: effort_limit = self.cfg["env"]["effort_limit"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_effort_limit"] print(f'Setting DOF effort limit to:{effort_limit}') set_dof_prop(dclaw_dof_props, 'effort', effort_limit) if self.cfg["env"]["stiffness"] is not None: stiffness = self.cfg["env"]["stiffness"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_stiffness"] print(f'Setting stiffness to:{stiffness}') set_dof_prop(dclaw_dof_props, 'stiffness', stiffness) if self.cfg["env"]["damping"] is not None: damping = self.cfg["env"]["damping"] if not self.cfg.env.soft_control else self.cfg["env"]["soft_damping"] print(f'Setting damping to:{damping}') set_dof_prop(dclaw_dof_props, 'damping', damping) self.dclaw_dof_lower_limits = [] self.dclaw_dof_upper_limits = [] self.dclaw_default_dof_states = np.zeros(self.num_dclaw_dofs, dtype=gymapi.DofState.dtype) self.dclaw_default_dof_pos = self.dclaw_default_dof_states['pos'] self.dclaw_default_dof_vel = self.dclaw_default_dof_states['vel'] for i in range(self.num_dclaw_dofs): self.dclaw_dof_lower_limits.append(dclaw_dof_props['lower'][i]) self.dclaw_dof_upper_limits.append(dclaw_dof_props['upper'][i]) if i % 3 == 1: self.dclaw_default_dof_pos[i] = 0.8 elif i % 3 == 2: self.dclaw_default_dof_pos[i] = -1.1 else: self.dclaw_default_dof_pos[i] = 0. self.dclaw_default_dof_vel[i] = 0.0 self.dof_joint_indices = to_torch(self.dof_joint_indices, dtype=torch.long, device=self.device) self.dclaw_dof_lower_limits = to_torch(self.dclaw_dof_lower_limits, device=self.device) self.dclaw_dof_upper_limits = to_torch(self.dclaw_dof_upper_limits, device=self.device) self.dclaw_default_dof_pos = to_torch(self.dclaw_default_dof_pos, device=self.device) self.dclaw_default_dof_vel = to_torch(self.dclaw_default_dof_vel, device=self.device) self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_asset_props = self.gym.get_asset_rigid_shape_properties(dclaw_asset) for p in dclaw_asset_props: p.friction = self.cfg.env.hand.friction p.torsion_friction = self.cfg.env.hand.torsion_friction p.rolling_friction = self.cfg.env.hand.rolling_friction p.restitution = self.cfg.env.hand.restitution self.gym.set_asset_rigid_shape_properties(dclaw_asset, dclaw_asset_props) return dclaw_asset, dclaw_dof_props def get_object_start_pose(self, dclaw_start_pose): object_start_pose = gymapi.Transform() object_start_pose.p = gymapi.Vec3() if self.cfg.env.obj_init_delta_pos is not None: delta_pos = self.cfg.env.obj_init_delta_pos object_start_pose.p.x = dclaw_start_pose.p.x + delta_pos[0] object_start_pose.p.y = dclaw_start_pose.p.y + delta_pos[1] object_start_pose.p.z = dclaw_start_pose.p.z + delta_pos[2] else: object_start_pose.p.x = dclaw_start_pose.p.x pose_dy, pose_dz = 0., -0.13 object_start_pose.p.y = dclaw_start_pose.p.y + pose_dy object_start_pose.p.z = dclaw_start_pose.p.z + pose_dz return object_start_pose def get_goal_object_start_pose(self, object_start_pose): self.goal_displacement = gymapi.Vec3(0., 0, 0.25) self.goal_displacement_tensor = to_torch( [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device) goal_start_pose = gymapi.Transform() goal_start_pose.p = object_start_pose.p + self.goal_displacement return goal_start_pose def set_dof_props(self, props_dict): param_setters_map = get_property_setter_map(self.gym) param_getters_map = get_property_getter_map(self.gym) prop_name = 'dof_properties' setter = param_setters_map[prop_name] for env_id in range(len(self.envs)): env = self.envs[env_id] handle = self.gym.find_actor_handle(env, 'hand') prop = param_getters_map[prop_name](env, handle) for dof_prop_name, dof_prop_values in props_dict.items(): if env_id == 0: assert len(dof_prop_values) == len(self.envs) prop_val = dof_prop_values[env_id] prop[dof_prop_name].fill(prop_val) success = setter(env, handle, prop) if not success: logger.warning(f'Setting dof properties is not successful!') def update_obj_mass(self, env_ids=None): object_rb_masses = [] env_pool = env_ids if env_ids is not None else list(range(self.num_envs)) if len(env_pool) < 1: return for env_id, object_handle in zip(env_pool, self.object_handles): env_ptr = self.envs[env_id] object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) object_rb_masses.append([prop.mass for prop in object_rb_props]) if self.object_rb_masses is None: self.object_rb_masses = to_torch(object_rb_masses, dtype=torch.float, device=self.device) else: self.object_rb_masses[env_pool] = to_torch(object_rb_masses, dtype=torch.float, device=self.device) def reset(self) -> torch.Tensor: """Reset the environment. Returns: Observation dictionary """ zero_actions = self.zero_actions() self.reset_buf.fill_(1) self.reset_goal_buf.fill_(1) if self.cfg.env.action_ema is not None: self.action_ema_val = zero_actions.clone() # step the simulator self.step(zero_actions) return self.update_obs() def compute_reward(self, actions):
res = compute_dclaw_reward(
1
2023-10-25 17:22:41+00:00
16k
CVHub520/yolov5_obb
val.py
[ { "identifier": "poly2hbb", "path": "utils/rboxs_utils.py", "snippet": "def poly2hbb(polys):\n \"\"\"\n Trans poly format to hbb format\n Args:\n rboxes (array/tensor): (num_gts, poly) \n\n Returns:\n hbboxes (array/tensor): (num_gts, [xc yc w h]) \n \"\"\"\n assert polys...
import argparse import json import os import sys import numpy as np import torch from pathlib import Path from threading import Thread from tqdm import tqdm from utils.rboxs_utils import poly2hbb, rbox2poly from models.common import DetectMultiBackend from utils.callbacks import Callbacks from utils.datasets import create_dataloader from utils.general import (LOGGER, box_iou, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_coords, scale_polys, xywh2xyxy, xyxy2xywh, non_max_suppression_obb) from utils.metrics import ConfusionMatrix, ap_per_class from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
12,121
FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # def save_one_json(predn, jdict, path, class_map): def save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map): """ Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236, "poly": [...]} Args: pred_hbbn (tensor): (n, [poly, conf, cls]) pred_polyn (tensor): (n, [xyxy, conf, cls]) """ image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(pred_hbbn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred_polyn.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': class_map[int(p[-1]) + 1], # COCO's category_id start from 1, not 0 'bbox': [round(x, 1) for x in b], 'score': round(p[-2], 5), 'poly': [round(x, 1) for x in p[:8]], 'file_name': path.stem}) def process_batch(detections, labels, iouv): """ Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 Returns: correct (Array[N, 10]), for 10 IoU levels """ correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device) iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] matches = torch.Tensor(matches).to(iouv.device) correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv return correct @torch.no_grad() def run(data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.01, # confidence threshold iou_thres=0.4, # NMS IoU threshold task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, callbacks=Callbacks(), compute_loss=None, ): # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size else: half = False batch_size = 1 # export.py models default to batch-size 1 device = torch.device('cpu')
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 model accuracy on a custom dataset Usage: $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # def save_one_json(predn, jdict, path, class_map): def save_one_json(pred_hbbn, pred_polyn, jdict, path, class_map): """ Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236, "poly": [...]} Args: pred_hbbn (tensor): (n, [poly, conf, cls]) pred_polyn (tensor): (n, [xyxy, conf, cls]) """ image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(pred_hbbn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred_polyn.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': class_map[int(p[-1]) + 1], # COCO's category_id start from 1, not 0 'bbox': [round(x, 1) for x in b], 'score': round(p[-2], 5), 'poly': [round(x, 1) for x in p[:8]], 'file_name': path.stem}) def process_batch(detections, labels, iouv): """ Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 Returns: correct (Array[N, 10]), for 10 IoU levels """ correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device) iou = box_iou(labels[:, 1:], detections[:, :4]) x = torch.where((iou >= iouv[0]) & (labels[:, 0:1] == detections[:, 5])) # IoU above threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detection, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] matches = torch.Tensor(matches).to(iouv.device) correct[matches[:, 1].long()] = matches[:, 2:3] >= iouv return correct @torch.no_grad() def run(data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.01, # confidence threshold iou_thres=0.4, # NMS IoU threshold task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, callbacks=Callbacks(), compute_loss=None, ): # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() elif engine: batch_size = model.batch_size else: half = False batch_size = 1 # export.py models default to batch-size 1 device = torch.device('cpu')
LOGGER.info(f'Forcing --batch-size 1 square inference shape(1,3,{imgsz},{imgsz}) for non-PyTorch backends')
5
2023-10-31 06:06:41+00:00
16k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py
[ { "identifier": "LoraModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "class LoraModel(torch.nn.Module):\n \"\"\"\n Creates Low Rank Adapter (Lora) model from a pretrained transformers model.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): T...
import inspect import os import warnings import torch from contextlib import contextmanager from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import hf_hub_download from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from .tuners import AdaLoraModel, LoraModel, PrefixEncoder, PromptEmbedding, PromptEncoder from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftConfig, PeftType, PromptLearningConfig, TaskType, _set_adapter, _set_trainable, get_peft_model_state_dict, set_peft_model_state_dict, shift_tokens_right, ) from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
11,175
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder,
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder,
PeftType.ADALORA: AdaLoraModel,
1
2023-10-30 10:50:32+00:00
16k
chenran-li/RQL-release
sb3_contrib/tqc/tqc.py
[ { "identifier": "ReplayBuffer", "path": "stable_baselines3/common/buffers.py", "snippet": "class ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation ...
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union from gym import spaces from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.noise import ActionNoise from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback from stable_baselines3.common.utils import get_parameters_by_name, polyak_update from sb3_contrib.common.utils import quantile_huber_loss from sb3_contrib.tqc.policies import CnnPolicy, MlpPolicy, MultiInputPolicy, TQCPolicy import numpy as np import torch as th
13,267
SelfTQC = TypeVar("SelfTQC", bound="TQC") class TQC(OffPolicyAlgorithm): """ Controlling Overestimation Bias with Truncated Mixture of Continuous Distributional Quantile Critics. Paper: https://arxiv.org/abs/2005.04269 This implementation uses SB3 SAC implementation as base. :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: learning rate for adam optimizer, the same learning rate will be used for all networks (Q-Values, Actor and Value function) it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient update after each step :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param ent_coef: Entropy regularization coefficient. (Equivalent to inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off. Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value) :param target_update_interval: update the target network every ``target_network_update_freq`` gradient steps. :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``) :param top_quantiles_to_drop_per_net: Number of quantiles to drop per network :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) instead of action noise exploration (default: False) :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE Default: -1 (only sample at the beginning of the rollout) :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling during the warm up phase (before learning starts) :param policy_kwargs: additional arguments to be passed to the policy on creation :param verbose: the verbosity level: 0 no output, 1 info, 2 debug :param seed: Seed for the pseudo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy,
SelfTQC = TypeVar("SelfTQC", bound="TQC") class TQC(OffPolicyAlgorithm): """ Controlling Overestimation Bias with Truncated Mixture of Continuous Distributional Quantile Critics. Paper: https://arxiv.org/abs/2005.04269 This implementation uses SB3 SAC implementation as base. :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: learning rate for adam optimizer, the same learning rate will be used for all networks (Q-Values, Actor and Value function) it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient update after each step :param action_noise: the action noise type (None by default), this can help for hard exploration problem. Cf common.noise for the different action noise type. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param ent_coef: Entropy regularization coefficient. (Equivalent to inverse of reward scale in the original SAC paper.) Controlling exploration/exploitation trade-off. Set it to 'auto' to learn it automatically (and 'auto_0.1' for using 0.1 as initial value) :param target_update_interval: update the target network every ``target_network_update_freq`` gradient steps. :param target_entropy: target entropy when learning ``ent_coef`` (``ent_coef = 'auto'``) :param top_quantiles_to_drop_per_net: Number of quantiles to drop per network :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) instead of action noise exploration (default: False) :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE Default: -1 (only sample at the beginning of the rollout) :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling during the warm up phase (before learning starts) :param policy_kwargs: additional arguments to be passed to the policy on creation :param verbose: the verbosity level: 0 no output, 1 info, 2 debug :param seed: Seed for the pseudo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy,
"CnnPolicy": CnnPolicy,
8
2023-10-28 01:09:21+00:00
16k
zyang1580/CoLLM
minigpt4/runners/runner_base_rec.py
[ { "identifier": "download_cached_file", "path": "minigpt4/common/dist_utils.py", "snippet": "def download_cached_file(url, check_hash=True, progress=False):\n \"\"\"\n Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again.\n If distributed, only...
import datetime import json import logging import os import time import torch import torch.distributed as dist import webdataset as wds from pathlib import Path from minigpt4.common.dist_utils import ( download_cached_file, get_rank, get_world_size, is_main_process, main_process, ) from minigpt4.common.registry import registry from minigpt4.common.utils import is_url from minigpt4.datasets.data_utils import concat_datasets, reorg_datasets_by_split, ChainDataset from minigpt4.datasets.datasets.dataloader_utils import ( IterLoader, MultiIterLoader, PrefetchLoader, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader, DistributedSampler from minigpt4.runners.runner_base import RunnerBase
11,142
# shuffle=sampler is None and is_train, # collate_fn=collate_fn, # drop_last=True if is_train else False, # ) # loader = PrefetchLoader(loader) # if is_train: # loader = IterLoader(loader, use_distributed=self.use_distributed) # return loader # loaders = [] # for dataset, bsz, is_train, collate_fn in zip( # datasets, batch_sizes, is_trains, collate_fns # ): # if isinstance(dataset, list) or isinstance(dataset, tuple): # if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None: # dataset_ratios = [d.sample_ratio for d in dataset] # loader = MultiIterLoader( # loaders=[ # _create_loader(d, num_workers, bsz, is_train, collate_fn[i]) # for i, d in enumerate(dataset) # ], # ratios=dataset_ratios, # ) # else: # loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) # loaders.append(loader) # return loaders # @main_process # def _save_checkpoint(self, cur_epoch, is_best=False): # """ # Save the checkpoint at the current epoch. # """ # model_no_ddp = self.unwrap_dist_model(self.model) # param_grad_dic = { # k: v.requires_grad for (k, v) in model_no_ddp.named_parameters() # } # state_dict = model_no_ddp.state_dict() # for k in list(state_dict.keys()): # if k in param_grad_dic.keys() and not param_grad_dic[k]: # # delete parameters that do not require gradient # del state_dict[k] # save_obj = { # "model": state_dict, # "optimizer": self.optimizer.state_dict(), # "config": self.config.to_dict(), # "scaler": self.scaler.state_dict() if self.scaler else None, # "epoch": cur_epoch, # } # save_to = os.path.join( # self.output_dir, # "checkpoint_{}.pth".format("best" if is_best else cur_epoch), # ) # logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) # torch.save(save_obj, save_to) # def _reload_best_model(self, model): # """ # Load the best checkpoint for evaluation. # """ # checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") # logging.info("Loading checkpoint from {}.".format(checkpoint_path)) # checkpoint = torch.load(checkpoint_path, map_location="cpu") # try: # model.load_state_dict(checkpoint["model"]) # except RuntimeError as e: # logging.warning( # """ # Key mismatch when loading checkpoint. This is expected if only part of the model is saved. # Trying to load the model with strict=False. # """ # ) # model.load_state_dict(checkpoint["model"], strict=False) # return model # def _load_checkpoint(self, url_or_filename): # """ # Resume from a checkpoint. # """ # if is_url(url_or_filename): # cached_file = download_cached_file( # url_or_filename, check_hash=False, progress=True # ) # checkpoint = torch.load(cached_file, map_location=self.device) # elif os.path.isfile(url_or_filename): # checkpoint = torch.load(url_or_filename, map_location=self.device) # else: # raise RuntimeError("checkpoint url or path is invalid") # state_dict = checkpoint["model"] # self.unwrap_dist_model(self.model).load_state_dict(state_dict,strict=False) # self.optimizer.load_state_dict(checkpoint["optimizer"]) # if self.scaler and "scaler" in checkpoint: # self.scaler.load_state_dict(checkpoint["scaler"]) # self.start_epoch = checkpoint["epoch"] + 1 # logging.info("Resume checkpoint from {}".format(url_or_filename)) # @main_process # def log_stats(self, stats, split_name): # if isinstance(stats, dict): # log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(log_stats) + "\n") # elif isinstance(stats, list): # pass # @main_process # def log_config(self): # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(self.config.to_dict(), indent=4) + "\n") @registry.register_runner("rec_runner_base")
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ # @registry.register_runner("rec_runner_base") # class RecRunnerBase: # """ # A runner class to train and evaluate a model given a task and datasets. # The runner uses pytorch distributed data parallel by default. Future release # will support other distributed frameworks. # """ # def __init__(self, cfg, task, model, datasets, job_id): # self.config = cfg # self.job_id = job_id # self.task = task # self.datasets = datasets # self._model = model # self._wrapped_model = None # self._device = None # self._optimizer = None # self._scaler = None # self._dataloaders = None # self._lr_sched = None # self.start_epoch = 0 # # self.setup_seeds() # self.setup_output_dir() # @property # def device(self): # if self._device is None: # self._device = torch.device(self.config.run_cfg.device) # return self._device # @property # def use_distributed(self): # return self.config.run_cfg.distributed # @property # def model(self): # """ # A property to get the DDP-wrapped model on the device. # """ # # move model to device # if self._model.device != self.device: # self._model = self._model.to(self.device) # # distributed training wrapper # if self.use_distributed: # if self._wrapped_model is None: # self._wrapped_model = DDP( # self._model, device_ids=[self.config.run_cfg.gpu] # ) # else: # self._wrapped_model = self._model # return self._wrapped_model # @property # def optimizer(self): # # TODO make optimizer class and configurations # if self._optimizer is None: # num_parameters = 0 # p_wd, p_non_wd = [], [] # for n, p in self.model.named_parameters(): # if not p.requires_grad: # continue # frozen weights # print(n) # if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n: # p_non_wd.append(p) # else: # p_wd.append(p) # num_parameters += p.data.nelement() # logging.info("number of trainable parameters: %d" % num_parameters) # optim_params = [ # { # "params": p_wd, # "weight_decay": float(self.config.run_cfg.weight_decay), # }, # {"params": p_non_wd, "weight_decay": 0}, # ] # beta2 = self.config.run_cfg.get("beta2", 0.999) # self._optimizer = torch.optim.AdamW( # optim_params, # lr=float(self.config.run_cfg.init_lr), # weight_decay=float(self.config.run_cfg.weight_decay), # betas=(0.9, beta2), # ) # return self._optimizer # @property # def scaler(self): # amp = self.config.run_cfg.get("amp", False) # if amp: # if self._scaler is None: # self._scaler = torch.cuda.amp.GradScaler() # return self._scaler # @property # def lr_scheduler(self): # """ # A property to get and create learning rate scheduler by split just in need. # """ # if self._lr_sched is None: # lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched) # # max_epoch = self.config.run_cfg.max_epoch # max_epoch = self.max_epoch # # min_lr = self.config.run_cfg.min_lr # min_lr = self.min_lr # # init_lr = self.config.run_cfg.init_lr # init_lr = self.init_lr # # optional parameters # decay_rate = self.config.run_cfg.get("lr_decay_rate", None) # warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1) # warmup_steps = self.config.run_cfg.get("warmup_steps", 0) # iters_per_epoch = self.config.run_cfg.get("iters_per_epoch", None) # if iters_per_epoch is None: # try: # iters_per_epoch = len(self.dataloaders['train']) # except (AttributeError, TypeError): # iters_per_epoch = 10000 # self._lr_sched = lr_sched_cls( # optimizer=self.optimizer, # max_epoch=max_epoch, # iters_per_epoch=iters_per_epoch, # min_lr=min_lr, # init_lr=init_lr, # decay_rate=decay_rate, # warmup_start_lr=warmup_start_lr, # warmup_steps=warmup_steps, # ) # return self._lr_sched # @property # def dataloaders(self) -> dict: # """ # A property to get and create dataloaders by split just in need. # If no train_dataset_ratio is provided, concatenate map-style datasets and # chain wds.DataPipe datasets separately. Training set becomes a tuple # (ConcatDataset, ChainDataset), both are optional but at least one of them is # required. The resultant ConcatDataset and ChainDataset will be sampled evenly. # If train_dataset_ratio is provided, create a MultiIterLoader to sample # each dataset by ratios during training. # Currently do not support multiple datasets for validation and test. # Returns: # dict: {split_name: (tuples of) dataloader} # """ # if self._dataloaders is None: # # concatenate map-style datasets and chain wds.DataPipe datasets separately # # training set becomes a tuple (ConcatDataset, ChainDataset), both are # # optional but at least one of them is required. The resultant ConcatDataset # # and ChainDataset will be sampled evenly. # logging.info( # "dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)." # ) # datasets = reorg_datasets_by_split(self.datasets) # self.datasets = datasets # # self.datasets = concat_datasets(datasets) # # print dataset statistics after concatenation/chaining # for split_name in self.datasets: # if isinstance(self.datasets[split_name], tuple) or isinstance( # self.datasets[split_name], list # ): # # mixed wds.DataPipeline and torch.utils.data.Dataset # num_records = sum( # [ # len(d) # if not type(d) in [wds.DataPipeline, ChainDataset] # else 0 # for d in self.datasets[split_name] # ] # ) # else: # if hasattr(self.datasets[split_name], "__len__"): # # a single map-style dataset # num_records = len(self.datasets[split_name]) # else: # # a single wds.DataPipeline # num_records = -1 # logging.info( # "Only a single wds.DataPipeline dataset, no __len__ attribute." # ) # if num_records >= 0: # logging.info( # "Loaded {} records for {} split from the dataset.".format( # num_records, split_name # ) # ) # # create dataloaders # split_names = sorted(self.datasets.keys()) # datasets = [self.datasets[split] for split in split_names] # is_trains = [split in self.train_splits for split in split_names] # batch_sizes = [ # self.config.run_cfg.batch_size_train # if split == "train" # else self.config.run_cfg.batch_size_eval # for split in split_names # ] # collate_fns = [] # for dataset in datasets: # if isinstance(dataset, tuple) or isinstance(dataset, list): # collate_fns.append([getattr(d, "collater", None) for d in dataset]) # else: # collate_fns.append(getattr(dataset, "collater", None)) # dataloaders = self.create_loaders( # datasets=datasets, # num_workers=self.config.run_cfg.num_workers, # batch_sizes=batch_sizes, # is_trains=is_trains, # collate_fns=collate_fns, # ) # self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)} # return self._dataloaders # @property # def cuda_enabled(self): # return self.device.type == "cuda" # @property # def max_epoch(self): # return int(self.config.run_cfg.max_epoch) # @property # def log_freq(self): # log_freq = self.config.run_cfg.get("log_freq", 50) # return int(log_freq) # @property # def init_lr(self): # return float(self.config.run_cfg.init_lr) # @property # def min_lr(self): # return float(self.config.run_cfg.min_lr) # @property # def accum_grad_iters(self): # return int(self.config.run_cfg.get("accum_grad_iters", 1)) # @property # def valid_splits(self): # valid_splits = self.config.run_cfg.get("valid_splits", []) # if len(valid_splits) == 0: # logging.info("No validation splits found.") # return valid_splits # @property # def test_splits(self): # test_splits = self.config.run_cfg.get("test_splits", []) # return test_splits # @property # def train_splits(self): # train_splits = self.config.run_cfg.get("train_splits", []) # if len(train_splits) == 0: # logging.info("Empty train splits.") # return train_splits # @property # def evaluate_only(self): # """ # Set to True to skip training. # """ # return self.config.run_cfg.evaluate # @property # def use_dist_eval_sampler(self): # return self.config.run_cfg.get("use_dist_eval_sampler", True) # @property # def resume_ckpt_path(self): # return self.config.run_cfg.get("resume_ckpt_path", None) # @property # def train_loader(self): # train_dataloader = self.dataloaders["train"] # return train_dataloader # def setup_output_dir(self): # lib_root = Path(registry.get_path("library_root")) # output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id # result_dir = output_dir / "result" # output_dir.mkdir(parents=True, exist_ok=True) # result_dir.mkdir(parents=True, exist_ok=True) # registry.register_path("result_dir", str(result_dir)) # registry.register_path("output_dir", str(output_dir)) # self.result_dir = result_dir # self.output_dir = output_dir # def train(self): # start_time = time.time() # best_agg_metric = 0 # best_epoch = 0 # self.log_config() # # resume from checkpoint if specified # if not self.evaluate_only and self.resume_ckpt_path is not None: # self._load_checkpoint(self.resume_ckpt_path) # for cur_epoch in range(self.start_epoch, self.max_epoch): # # training phase # if not self.evaluate_only: # logging.info("Start training") # train_stats = self.train_epoch(cur_epoch) # self.log_stats(split_name="train", stats=train_stats) # # evaluation phase # if len(self.valid_splits) > 0: # for split_name in self.valid_splits: # logging.info("Evaluating on {}.".format(split_name)) # val_log = self.eval_epoch( # split_name=split_name, cur_epoch=cur_epoch # ) # if val_log is not None: # if is_main_process(): # assert ( # "agg_metrics" in val_log # ), "No agg_metrics found in validation log." # agg_metrics = val_log["agg_metrics"] # if agg_metrics > best_agg_metric and split_name == "val": # best_epoch, best_agg_metric = cur_epoch, agg_metrics # self._save_checkpoint(cur_epoch, is_best=True) # val_log.update({"best_epoch": best_epoch}) # self.log_stats(val_log, split_name) # else: # # if no validation split is provided, we just save the checkpoint at the end of each epoch. # if not self.evaluate_only: # self._save_checkpoint(cur_epoch, is_best=False) # if self.evaluate_only: # break # if self.config.run_cfg.distributed: # dist.barrier() # # testing phase # test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch # self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only) # total_time = time.time() - start_time # total_time_str = str(datetime.timedelta(seconds=int(total_time))) # logging.info("Training time {}".format(total_time_str)) # def evaluate(self, cur_epoch="best", skip_reload=False): # test_logs = dict() # if len(self.test_splits) > 0: # for split_name in self.test_splits: # test_logs[split_name] = self.eval_epoch( # split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload # ) # return test_logs # def train_epoch(self, epoch): # # train # self.model.train() # return self.task.train_epoch( # epoch=epoch, # model=self.model, # data_loader=self.train_loader, # optimizer=self.optimizer, # scaler=self.scaler, # lr_scheduler=self.lr_scheduler, # cuda_enabled=self.cuda_enabled, # log_freq=self.log_freq, # accum_grad_iters=self.accum_grad_iters, # ) # @torch.no_grad() # def eval_epoch(self, split_name, cur_epoch, skip_reload=False): # """ # Evaluate the model on a given split. # Args: # split_name (str): name of the split to evaluate on. # cur_epoch (int): current epoch. # skip_reload_best (bool): whether to skip reloading the best checkpoint. # During training, we will reload the best checkpoint for validation. # During testing, we will use provided weights and skip reloading the best checkpoint . # """ # self.model.eval() # data_loader = self.dataloaders.get(split_name, None) # assert data_loader, "data_loader for split {} is None.".format(split_name) # # TODO In validation, you need to compute loss as well as metrics # # TODO consider moving to model.before_evaluation() # model = self.unwrap_dist_model(self.model) # if not skip_reload and cur_epoch == "best": # model = self._reload_best_model(model) # model.eval() # self.task.before_evaluation( # model=model, # dataset=self.datasets[split_name], # ) # results = self.task.evaluation(model, data_loader) # if results is not None: # return self.task.after_evaluation( # val_result=results, # split_name=split_name, # epoch=cur_epoch, # ) # @torch.no_grad() # def eval_epoch_new(self, split_name, cur_epoch): # """ # Evaluate the model on a given split. # Args: # split_name (str): name of the split to evaluate on. # cur_epoch (int): current epoch. # skip_reload_best (bool): whether to skip reloading the best checkpoint. # During training, we will reload the best checkpoint for validation. # During testing, we will use provided weights and skip reloading the best checkpoint . # """ # data_loader = self.dataloaders.get(split_name, None) # assert data_loader, "data_loader for split {} is None.".format(split_name) # # TODO In validation, you need to compute loss as well as metrics # # TODO consider moving to model.before_evaluation() # model = self.unwrap_dist_model(self.model) # # if not skip_reload and cur_epoch == "best": # # model = self._reload_best_model(model) # model.eval() # self.task.before_evaluation( # model=model, # dataset=self.datasets[split_name], # ) # results = self.task.evaluation(model, data_loader) # return results # # if results is not None: # # return self.task.after_evaluation( # # val_result=results, # # split_name=split_name, # # epoch=cur_epoch, # # ) # def unwrap_dist_model(self, model): # if self.use_distributed: # return model.module # else: # return model # def create_loaders( # self, # datasets, # num_workers, # batch_sizes, # is_trains, # collate_fns, # dataset_ratios=None, # ): # """ # Create dataloaders for training and validation. # """ # def _create_loader(dataset, num_workers, bsz, is_train, collate_fn): # # create a single dataloader for each split # if isinstance(dataset, ChainDataset) or isinstance( # dataset, wds.DataPipeline # ): # # wds.WebdDataset instance are chained together # # webdataset.DataPipeline has its own sampler and collate_fn # loader = iter( # DataLoader( # dataset, # batch_size=bsz, # num_workers=num_workers, # pin_memory=True, # ) # ) # else: # # map-style dataset are concatenated together # # setup distributed sampler # if self.use_distributed: # sampler = DistributedSampler( # dataset, # shuffle=is_train, # num_replicas=get_world_size(), # rank=get_rank(), # ) # if not self.use_dist_eval_sampler: # # e.g. retrieval evaluation # sampler = sampler if is_train else None # else: # sampler = None # loader = DataLoader( # dataset, # batch_size=bsz, # num_workers=num_workers, # pin_memory=True, # sampler=sampler, # shuffle=sampler is None and is_train, # collate_fn=collate_fn, # drop_last=True if is_train else False, # ) # loader = PrefetchLoader(loader) # if is_train: # loader = IterLoader(loader, use_distributed=self.use_distributed) # return loader # loaders = [] # for dataset, bsz, is_train, collate_fn in zip( # datasets, batch_sizes, is_trains, collate_fns # ): # if isinstance(dataset, list) or isinstance(dataset, tuple): # if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None: # dataset_ratios = [d.sample_ratio for d in dataset] # loader = MultiIterLoader( # loaders=[ # _create_loader(d, num_workers, bsz, is_train, collate_fn[i]) # for i, d in enumerate(dataset) # ], # ratios=dataset_ratios, # ) # else: # loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) # loaders.append(loader) # return loaders # @main_process # def _save_checkpoint(self, cur_epoch, is_best=False): # """ # Save the checkpoint at the current epoch. # """ # model_no_ddp = self.unwrap_dist_model(self.model) # param_grad_dic = { # k: v.requires_grad for (k, v) in model_no_ddp.named_parameters() # } # state_dict = model_no_ddp.state_dict() # for k in list(state_dict.keys()): # if k in param_grad_dic.keys() and not param_grad_dic[k]: # # delete parameters that do not require gradient # del state_dict[k] # save_obj = { # "model": state_dict, # "optimizer": self.optimizer.state_dict(), # "config": self.config.to_dict(), # "scaler": self.scaler.state_dict() if self.scaler else None, # "epoch": cur_epoch, # } # save_to = os.path.join( # self.output_dir, # "checkpoint_{}.pth".format("best" if is_best else cur_epoch), # ) # logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) # torch.save(save_obj, save_to) # def _reload_best_model(self, model): # """ # Load the best checkpoint for evaluation. # """ # checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") # logging.info("Loading checkpoint from {}.".format(checkpoint_path)) # checkpoint = torch.load(checkpoint_path, map_location="cpu") # try: # model.load_state_dict(checkpoint["model"]) # except RuntimeError as e: # logging.warning( # """ # Key mismatch when loading checkpoint. This is expected if only part of the model is saved. # Trying to load the model with strict=False. # """ # ) # model.load_state_dict(checkpoint["model"], strict=False) # return model # def _load_checkpoint(self, url_or_filename): # """ # Resume from a checkpoint. # """ # if is_url(url_or_filename): # cached_file = download_cached_file( # url_or_filename, check_hash=False, progress=True # ) # checkpoint = torch.load(cached_file, map_location=self.device) # elif os.path.isfile(url_or_filename): # checkpoint = torch.load(url_or_filename, map_location=self.device) # else: # raise RuntimeError("checkpoint url or path is invalid") # state_dict = checkpoint["model"] # self.unwrap_dist_model(self.model).load_state_dict(state_dict,strict=False) # self.optimizer.load_state_dict(checkpoint["optimizer"]) # if self.scaler and "scaler" in checkpoint: # self.scaler.load_state_dict(checkpoint["scaler"]) # self.start_epoch = checkpoint["epoch"] + 1 # logging.info("Resume checkpoint from {}".format(url_or_filename)) # @main_process # def log_stats(self, stats, split_name): # if isinstance(stats, dict): # log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(log_stats) + "\n") # elif isinstance(stats, list): # pass # @main_process # def log_config(self): # with open(os.path.join(self.output_dir, "log.txt"), "a") as f: # f.write(json.dumps(self.config.to_dict(), indent=4) + "\n") @registry.register_runner("rec_runner_base")
class RecRunnerBase(RunnerBase):
13
2023-10-29 12:47:25+00:00
16k
tobagin/whakarere
whakarere/windows/whakarere.py
[ { "identifier": "ConfigManager", "path": "whakarere/managers/config.py", "snippet": "class ConfigManager:\n def __init__(self, window):\n self.window = window\n self.config = {}\n self.config_file_path = os.path.expanduser(\"~/.config/whakarere/config.json\")\n atexit.regi...
import gi from whakarere.managers.config import ConfigManager from whakarere.managers.session import SessionManager from whakarere.managers.whatsapp import WhatsAppSessionManager from whakarere.widgets.titlebar import WindowTitlebarWidget from whakarere.widgets.main_menu import MainMenuButtonWidget from whakarere.pages.session import SessionManagerPage from whakarere.pages.session2 import SessionManagerPage2 from whakarere.windows.account_wizard import AccountWizardWindow from gi.repository import Adw, Gtk, Gdk
11,746
gi.require_version('Gtk', '4.0') gi.require_version('Adw', '1') gi.require_version("Gdk", "4.0") class WhakarereMainWindow(Adw.ApplicationWindow): def __init__(self, app, debug=False, dev=False): super().__init__(application=app) self.app = app self.debug = debug self.dev = dev self.settings = Gtk.Settings.get_default() self.settings.connect("notify::gtk-theme-name", self.on_theme_changed) # Initial CSS application self.update_css_for_theme() # Set the window size and default close behavior self.set_default_size(800, 600) self.set_hide_on_close(True) # Create the config manager and load the config file self.config_manager = ConfigManager(self) self.config_manager.load_config() # Create the session manager and load the sessions self.session_manager = SessionManager(self) self.session_manager.load_sessions() # Create the whatsapp manager and initialize the active sessions
gi.require_version('Gtk', '4.0') gi.require_version('Adw', '1') gi.require_version("Gdk", "4.0") class WhakarereMainWindow(Adw.ApplicationWindow): def __init__(self, app, debug=False, dev=False): super().__init__(application=app) self.app = app self.debug = debug self.dev = dev self.settings = Gtk.Settings.get_default() self.settings.connect("notify::gtk-theme-name", self.on_theme_changed) # Initial CSS application self.update_css_for_theme() # Set the window size and default close behavior self.set_default_size(800, 600) self.set_hide_on_close(True) # Create the config manager and load the config file self.config_manager = ConfigManager(self) self.config_manager.load_config() # Create the session manager and load the sessions self.session_manager = SessionManager(self) self.session_manager.load_sessions() # Create the whatsapp manager and initialize the active sessions
self.whatsapp_manager = WhatsAppSessionManager(self)
2
2023-10-29 15:46:50+00:00
16k
KHU-VLL/CAST
dataset/datasets.py
[ { "identifier": "TubeMaskingGenerator", "path": "util_tools/masking_generator.py", "snippet": "class TubeMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n self.frames, self.height, self.width = input_size\n self.num_patches_per_frame = self.height * self.width\n ...
import os from torchvision import transforms from util_tools.transforms import * from util_tools.masking_generator import TubeMaskingGenerator from .kinetics import VideoClsDataset, VideoMAE from .ssv2 import SSVideoClsDataset from .epic import EpicVideoClsDataset
10,955
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) self.transform = transforms.Compose([ self.train_augmentation, Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) def __call__(self, images): process_data, _ = self.transform(images) return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args) dataset = VideoMAE( root=None, setting=args.data_path, video_ext='mp4', is_color=True, modality='rgb', new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, video_loader=True, use_decord=True, lazy_init=False) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if args.data_set == 'Kinetics-400': mode = None anno_path = args.anno_path if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path=args.data_path, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 400 elif args.data_set == 'SSV2': mode = None anno_path = None if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv')
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) self.transform = transforms.Compose([ self.train_augmentation, Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) def __call__(self, images): process_data, _ = self.transform(images) return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args) dataset = VideoMAE( root=None, setting=args.data_path, video_ext='mp4', is_color=True, modality='rgb', new_length=args.num_frames, new_step=args.sampling_rate, transform=transform, temporal_jitter=False, video_loader=True, use_decord=True, lazy_init=False) print("Data Aug = %s" % str(transform)) return dataset def build_dataset(is_train, test_mode, args): if args.data_set == 'Kinetics-400': mode = None anno_path = args.anno_path if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv') dataset = VideoClsDataset( anno_path=anno_path, data_path=args.data_path, mode=mode, clip_len=args.num_frames, frame_sample_rate=args.sampling_rate, num_segment=1, test_num_segment=args.test_num_segment, test_num_crop=args.test_num_crop, num_crop=1 if not test_mode else 3, keep_aspect_ratio=True, crop_size=args.input_size, short_side_size=args.short_side_size, new_height=256, new_width=320, args=args) nb_classes = 400 elif args.data_set == 'SSV2': mode = None anno_path = None if is_train is True: mode = 'train' anno_path = os.path.join(args.anno_path, 'train.csv') elif test_mode is True: mode = 'test' anno_path = os.path.join(args.anno_path, 'val.csv') else: mode = 'validation' anno_path = os.path.join(args.anno_path, 'val.csv')
dataset = SSVideoClsDataset(
3
2023-10-25 07:07:05+00:00
16k
OpenProteinAI/PoET
scripts/score.py
[ { "identifier": "Uniprot21", "path": "poet/alphabets.py", "snippet": "class Uniprot21(Alphabet):\n def __init__(\n self,\n mask=False,\n include_gap=False,\n include_startstop=False,\n distinct_startstop=False,\n ):\n chars = b\"ARNDCQEGHILKMFPSTWYV\"\n ...
import argparse import itertools import string import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from pathlib import Path from typing import Callable, Optional, Sequence, TypeVar from torch.nn.utils.rnn import pad_sequence from tqdm import tqdm, trange from poet.alphabets import Uniprot21 from poet.fasta import parse_stream from poet.models.modules.packed_sequence import PackedTensorSequences from poet.models.poet import PoET from poet.msa.sampling import MSASampler, NeighborsSampler
11,936
batch_first=True, padding_value=alphabet.mask_token, ) if this_variants.size(1) < max_variant_length: this_variants = F.pad( this_variants, (0, max_variant_length - this_variants.size(1)), value=alphabet.mask_token, ) assert (this_variants == alphabet.gap_token).sum() == 0 this_variants = this_variants.cuda() logits = model.logits(this_variants[:, :-1], memory, preallocated_memory=True) targets = this_variants[:, 1:] score = -criteria.forward(logits.transpose(1, 2), targets).float().sum(dim=1) logps.append(score.cpu().numpy()) return np.hstack(logps) def get_logps_tiered_fast( msa_sequences: Sequence[np.ndarray], variants: Sequence[np.ndarray], model: PoET, batch_size: int, alphabet: Uniprot21, pbar_position: Optional[int] = None, ) -> np.ndarray: if len(msa_sequences) > 0: segment_sizes = torch.tensor([len(s) for s in msa_sequences]).cuda() msa_sequences: torch.Tensor = torch.cat( [torch.from_numpy(s).long() for s in msa_sequences] ).cuda() memory = model.embed( msa_sequences.unsqueeze(0), segment_sizes.unsqueeze(0), pbar_position=pbar_position, ) else: memory = None return _get_logps_tiered_fast( memory=memory, variants=variants, model=model, batch_size=batch_size, alphabet=alphabet, pbar_position=pbar_position, ) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, default="data/poet.ckpt") parser.add_argument( "--msa_a3m_path", type=str, default="data/BLAT_ECOLX_ColabFold_2202.a3m" ) parser.add_argument( "--variants_fasta_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.fasta", ) parser.add_argument( "--output_npy_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.npy", ) parser.add_argument("--batch_size", type=int, default=8) parser.add_argument("--seed", type=int, default=188257) parser.add_argument( "--debug", action="store_true", help="run only 1/15 params from the msa sampling and filtering ensemble", ) args = parser.parse_args() args.msa_a3m_path = Path(args.msa_a3m_path) args.variants_fasta_path = Path(args.variants_fasta_path) args.output_npy_path = Path(args.output_npy_path) return args @torch.inference_mode() def main(): args = parse_args() # load model ckpt = torch.load(args.ckpt_path) model = PoET(**ckpt["hyper_parameters"]["model_spec"]["init_args"]) model.load_state_dict( {k.split(".", 1)[1]: v for k, v in ckpt["state_dict"].items()} ) del ckpt model = model.cuda().half().eval() alphabet = Uniprot21( include_gap=True, include_startstop=True, distinct_startstop=True ) jit_warmup(model, alphabet) # get variants to score variants = [ append_startstop(alphabet.encode(v), alphabet=alphabet) for v in get_seqs_from_fastalike(args.variants_fasta_path) ] # process msa msa_sequences = get_seqs_from_fastalike(args.msa_a3m_path) msa = get_encoded_msa_from_a3m_seqs(msa_sequences=msa_sequences, alphabet=alphabet) # score the variants logps = [] if not args.debug: params = list( itertools.product( [6144, 12288, 24576], [1.0, 0.95, 0.90, 0.70, 0.50], ) ) else: params = [(12288, 0.95)] for max_tokens, max_similarity in tqdm(params, desc="ensemble"): sampler = MSASampler(
ASCII_LOWERCASE_BYTES = string.ascii_lowercase.encode() PBAR_POSITION = 1 T = TypeVar("T", np.ndarray, torch.Tensor) def append_startstop(x: T, alphabet: Uniprot21) -> T: x_ndim = x.ndim assert x_ndim in {1, 2} if x_ndim == 1: x = x[None, :] if isinstance(x, torch.Tensor): empty_func = torch.empty else: empty_func = np.empty x_ = empty_func((x.shape[0], x.shape[1] + 2), dtype=x.dtype) x_[:, 0] = alphabet.start_token x_[:, -1] = alphabet.stop_token x_[:, 1:-1] = x if x_ndim == 1: x_ = x_.flatten() return x_ def get_seqs_from_fastalike(filepath: Path) -> list[bytes]: return [s for _, s in parse_stream(open(filepath, "rb"), upper=False)] def get_encoded_msa_from_a3m_seqs( msa_sequences: list[bytes], alphabet: Uniprot21 ) -> np.ndarray: return np.vstack( [ alphabet.encode(s.translate(None, delete=ASCII_LOWERCASE_BYTES)) for s in msa_sequences ] ) def sample_msa_sequences( get_sequence_fn: Callable[[int], bytes], sample_idxs: Sequence[int], max_tokens: int, alphabet: Uniprot21, shuffle: bool = True, shuffle_seed: Optional[int] = None, truncate: bool = True, ) -> list[np.ndarray]: assert alphabet.start_token != -1 assert alphabet.stop_token != -1 if not shuffle: assert shuffle_seed is None seqs, total_tokens = [], 0 for idx in sample_idxs: next_sequence = get_sequence_fn(idx) seqs.append(append_startstop(alphabet.encode(next_sequence), alphabet=alphabet)) total_tokens += len(seqs[-1]) if total_tokens > max_tokens: break # shuffle order and truncate to max tokens if shuffle: rng = ( np.random.default_rng(shuffle_seed) if shuffle_seed is not None else np.random ) final_permutation = rng.permutation(len(seqs)) else: final_permutation = np.arange(len(seqs)) final_seqs, total_tokens = [], 0 for seq in [seqs[i] for i in final_permutation]: if truncate and (total_tokens + len(seq) > max_tokens): seq = seq[: max_tokens - total_tokens] total_tokens += len(seq) final_seqs.append(seq) if total_tokens >= max_tokens: break return final_seqs def jit_warmup(embedding_model: PoET, alphabet: Uniprot21): x = b"$WAAAGH*$WAAGW*" segment_sizes = [8, 7] x = alphabet.encode(x) # encode x into the uniprot21 alphabet x = torch.from_numpy(x).long().cuda() segment_sizes = torch.tensor(segment_sizes).long().cuda() _ = embedding_model.embed(x.unsqueeze(0), segment_sizes.unsqueeze(0)) def _get_logps_tiered_fast( memory: Optional[list[PackedTensorSequences]], variants: Sequence[np.ndarray], model: PoET, batch_size: int, alphabet: Uniprot21, pbar_position: Optional[int] = None, ) -> np.ndarray: max_variant_length = max(len(v) for v in variants) memory = model.logits_allocate_memory( memory=memory, batch_size=batch_size, length=max_variant_length - 1, # discount stop token ) criteria = nn.CrossEntropyLoss(ignore_index=alphabet.mask_token, reduction="none") logps = [] if pbar_position is not None: pbar = trange( 0, len(variants), batch_size, desc=f"[{pbar_position}] decoding", leave=False, position=pbar_position, ) else: pbar = range(0, len(variants), batch_size) for start_idx in pbar: this_variants = variants[start_idx : start_idx + batch_size] this_variants = pad_sequence( [torch.from_numpy(v).long() for v in this_variants], batch_first=True, padding_value=alphabet.mask_token, ) if this_variants.size(1) < max_variant_length: this_variants = F.pad( this_variants, (0, max_variant_length - this_variants.size(1)), value=alphabet.mask_token, ) assert (this_variants == alphabet.gap_token).sum() == 0 this_variants = this_variants.cuda() logits = model.logits(this_variants[:, :-1], memory, preallocated_memory=True) targets = this_variants[:, 1:] score = -criteria.forward(logits.transpose(1, 2), targets).float().sum(dim=1) logps.append(score.cpu().numpy()) return np.hstack(logps) def get_logps_tiered_fast( msa_sequences: Sequence[np.ndarray], variants: Sequence[np.ndarray], model: PoET, batch_size: int, alphabet: Uniprot21, pbar_position: Optional[int] = None, ) -> np.ndarray: if len(msa_sequences) > 0: segment_sizes = torch.tensor([len(s) for s in msa_sequences]).cuda() msa_sequences: torch.Tensor = torch.cat( [torch.from_numpy(s).long() for s in msa_sequences] ).cuda() memory = model.embed( msa_sequences.unsqueeze(0), segment_sizes.unsqueeze(0), pbar_position=pbar_position, ) else: memory = None return _get_logps_tiered_fast( memory=memory, variants=variants, model=model, batch_size=batch_size, alphabet=alphabet, pbar_position=pbar_position, ) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--ckpt_path", type=str, default="data/poet.ckpt") parser.add_argument( "--msa_a3m_path", type=str, default="data/BLAT_ECOLX_ColabFold_2202.a3m" ) parser.add_argument( "--variants_fasta_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.fasta", ) parser.add_argument( "--output_npy_path", type=str, default="data/BLAT_ECOLX_Jacquier_2013_variants.npy", ) parser.add_argument("--batch_size", type=int, default=8) parser.add_argument("--seed", type=int, default=188257) parser.add_argument( "--debug", action="store_true", help="run only 1/15 params from the msa sampling and filtering ensemble", ) args = parser.parse_args() args.msa_a3m_path = Path(args.msa_a3m_path) args.variants_fasta_path = Path(args.variants_fasta_path) args.output_npy_path = Path(args.output_npy_path) return args @torch.inference_mode() def main(): args = parse_args() # load model ckpt = torch.load(args.ckpt_path) model = PoET(**ckpt["hyper_parameters"]["model_spec"]["init_args"]) model.load_state_dict( {k.split(".", 1)[1]: v for k, v in ckpt["state_dict"].items()} ) del ckpt model = model.cuda().half().eval() alphabet = Uniprot21( include_gap=True, include_startstop=True, distinct_startstop=True ) jit_warmup(model, alphabet) # get variants to score variants = [ append_startstop(alphabet.encode(v), alphabet=alphabet) for v in get_seqs_from_fastalike(args.variants_fasta_path) ] # process msa msa_sequences = get_seqs_from_fastalike(args.msa_a3m_path) msa = get_encoded_msa_from_a3m_seqs(msa_sequences=msa_sequences, alphabet=alphabet) # score the variants logps = [] if not args.debug: params = list( itertools.product( [6144, 12288, 24576], [1.0, 0.95, 0.90, 0.70, 0.50], ) ) else: params = [(12288, 0.95)] for max_tokens, max_similarity in tqdm(params, desc="ensemble"): sampler = MSASampler(
method=NeighborsSampler(
5
2023-10-28 01:30:26+00:00
16k
Transconnectome/SwiFT
interpretation/integrated_gradient.py
[ { "identifier": "SwinTransformer4D", "path": "project/module/models/swin4d_transformer_ver7.py", "snippet": "class SwinTransformer4D(nn.Module):\n \"\"\"\n Swin Transformer based on: \"Liu et al.,\n Swin Transformer: Hierarchical Vision Transformer using Shifted Windows\n <https://arxiv.org/...
import torch import torch.nn as nn import torch.nn.functional as F import os import json import numpy as np import torchvision import matplotlib.pyplot as plt from PIL import Image from tqdm import tqdm from matplotlib.colors import LinearSegmentedColormap from torchvision import models from torchvision import transforms from captum.attr import IntegratedGradients from captum.attr import GradientShap from captum.attr import Occlusion from captum.attr import NoiseTunnel from captum.attr import visualization as viz from matplotlib.colors import LogNorm from project.module.models.swin4d_transformer_ver7 import SwinTransformer4D from project.module.pl_classifier import LitClassifier from project.module.utils.data_module import fMRIDataModule from pathlib import Path
12,978
save_dir = # write path to save_dir jobid = # write project number neptune_project_id = # write project id. ex)user_id/project_name for i in Path(f'SwiFT/output/{neptune_project_id}/RSTOT-{jobid}/').glob('checkpt*'): ckpt_path = i ckpt = torch.load(ckpt_path, map_location='cuda:0' if torch.cuda.is_available() else 'cpu') ckpt['hyper_parameters']['image_path'] = # write path to MNI_to_TRs folder ckpt['hyper_parameters']['default_root_dir'] = # write path to use default_root_dir ckpt['hyper_parameters']['shuffle_time_sequence'] = False ckpt['hyper_parameters']['time_as_channel'] = False ckpt['hyper_parameters']['eval_batch_size'] = 1 args = ckpt['hyper_parameters'] model = LitClassifier(**args) model.cuda(0) if torch.cuda.is_available() else model model.load_state_dict(ckpt['state_dict']) integrated_gradients = IntegratedGradients(model) noise_tunnel = NoiseTunnel(integrated_gradients) kwargs = { "nt_samples": 5, "nt_samples_batch_size": 5, "nt_type": "smoothgrad_sq", # 1 #"stdevs": 0.05, "internal_batch_size": 5, }
save_dir = # write path to save_dir jobid = # write project number neptune_project_id = # write project id. ex)user_id/project_name for i in Path(f'SwiFT/output/{neptune_project_id}/RSTOT-{jobid}/').glob('checkpt*'): ckpt_path = i ckpt = torch.load(ckpt_path, map_location='cuda:0' if torch.cuda.is_available() else 'cpu') ckpt['hyper_parameters']['image_path'] = # write path to MNI_to_TRs folder ckpt['hyper_parameters']['default_root_dir'] = # write path to use default_root_dir ckpt['hyper_parameters']['shuffle_time_sequence'] = False ckpt['hyper_parameters']['time_as_channel'] = False ckpt['hyper_parameters']['eval_batch_size'] = 1 args = ckpt['hyper_parameters'] model = LitClassifier(**args) model.cuda(0) if torch.cuda.is_available() else model model.load_state_dict(ckpt['state_dict']) integrated_gradients = IntegratedGradients(model) noise_tunnel = NoiseTunnel(integrated_gradients) kwargs = { "nt_samples": 5, "nt_samples_batch_size": 5, "nt_type": "smoothgrad_sq", # 1 #"stdevs": 0.05, "internal_batch_size": 5, }
data_module = fMRIDataModule(**args)
2
2023-10-28 09:26:03+00:00
16k
TheCompAce/ShellSpeak
main.py
[ { "identifier": "VectorDatabase", "path": "modules/vectorDatabase.py", "snippet": "class VectorDatabase:\n def __init__(self, path, name):\n self.path = path\n self.name = name\n self.db_path = os.path.join(path, f'{name}.db')\n self.model_path = os.path.join(path, f'{name...
import json import os import sys import asyncio import json from modules.vectorDatabase import VectorDatabase from datetime import datetime from modules.menus.setup_menu import save_settings, setup_menu from modules.shellSpeak import ShellSpeak from modules.utils import load_settings
11,060
# from modules.vectors import load_faiss_index, build_and_save_faiss_index, load_index_data, needs_index_update def run_async_function(func, *args): asyncio.run(func(*args)) async def start_shell_speak(settings, base_path, vector_db): await main_start(settings, base_path, vector_db) async def main_start(settings, base_path, vector_db): # Initialize VectorDatabase here if needed globally shellSpeak = ShellSpeak(settings, base_path, vector_db) await shellSpeak.run() def main(): base_path = os.path.abspath(".")
# from modules.vectors import load_faiss_index, build_and_save_faiss_index, load_index_data, needs_index_update def run_async_function(func, *args): asyncio.run(func(*args)) async def start_shell_speak(settings, base_path, vector_db): await main_start(settings, base_path, vector_db) async def main_start(settings, base_path, vector_db): # Initialize VectorDatabase here if needed globally shellSpeak = ShellSpeak(settings, base_path, vector_db) await shellSpeak.run() def main(): base_path = os.path.abspath(".")
settings = load_settings(base_path)
3
2023-10-31 23:35:19+00:00
16k
qym7/SparseDiff
sparse_diffusion/diffusion_model_sparse.py
[ { "identifier": "utils", "path": "sparse_diffusion/utils.py", "snippet": "def setup_wandb(cfg):\ndef create_folders(args):\ndef to_dense(x, edge_index, edge_attr, batch, charge):\ndef to_dense_node(x, batch):\ndef to_dense_edge(edge_index, edge_attr, batch, max_num_nodes):\ndef encode_no_edge(E):\ndef t...
import time import os import math import pickle import json import torch import wandb import numpy as np import torch.nn as nn import torch.nn.functional as F import pytorch_lightning as pl from tqdm import tqdm from models.conv_transformer_model import GraphTransformerConv from diffusion.noise_schedule import ( PredefinedNoiseScheduleDiscrete, MarginalUniformTransition, ) from metrics.train_metrics import TrainLossDiscrete from metrics.abstract_metrics import SumExceptBatchMetric, SumExceptBatchKL, NLL from analysis.visualization import Visualizer from sparse_diffusion import utils from sparse_diffusion.diffusion import diffusion_utils from sparse_diffusion.diffusion.sample_edges_utils import ( get_computational_graph, mask_query_graph_from_comp_graph, sample_non_existing_edge_attr, condensed_to_matrix_index_batch, ) from sparse_diffusion.diffusion.sample_edges import ( sample_query_edges, sample_non_existing_edges_batched, sampled_condensed_indices_uniformly, ) from sparse_diffusion.models.sign_pos_encoder import SignNetNodeEncoder
11,076
pred_node, pred_edge, p_s_and_t_given_0_X, p_s_and_t_given_0_E, pred_charge, p_s_and_t_given_0_charge, ): sampled_node = self.sample_sparse_node(pred_node, p_s_and_t_given_0_X).long() sampled_edge = self.sample_sparse_edge(pred_edge, p_s_and_t_given_0_E).long() if pred_charge.size(-1) > 0: sampled_charge = self.sample_sparse_node( pred_charge, p_s_and_t_given_0_charge ).long() else: sampled_charge = pred_charge return sampled_node, sampled_edge, sampled_charge def sample_p_zs_given_zt(self, s_float, t_float, data): """ Samples from zs ~ p(zs | zt). Only used during sampling. if last_step, return the graph prediction as well """ node = data.node edge_index = data.edge_index edge_attr = data.edge_attr y = data.y charge = data.charge ptr = data.ptr batch = data.batch beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # Retrieve transitions matrix Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) Qsb = self.transition_model.get_Qt_bar(alpha_s_bar, self.device) Qt = self.transition_model.get_Qt(beta_t, self.device) # Prior distribution # (N, dx, dx) p_s_and_t_given_0_X = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=node, batch=batch, Qt=Qt.X, Qsb=Qsb.X, Qtb=Qtb.X ) ) p_s_and_t_given_0_charge = None if self.use_charge: p_s_and_t_given_0_charge = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=charge, batch=batch, Qt=Qt.charge, Qsb=Qsb.charge, Qtb=Qtb.charge, ) ) # prepare sparse information num_nodes = ptr.diff().long() num_edges = (num_nodes * (num_nodes - 1) / 2).long() # If we had one graph, we will iterate on all edges for each step # we also make sure that the non existing edge number remains the same with the training process ( all_condensed_index, all_edge_batch, all_edge_mask, ) = sampled_condensed_indices_uniformly( max_condensed_value=num_edges, num_edges_to_sample=num_edges, return_mask=True, ) # double checked # number of edges used per loop for each graph num_edges_per_loop = torch.ceil(self.edge_fraction * num_edges) # (bs, ) len_loop = math.ceil(1. / self.edge_fraction) new_edge_index, new_edge_attr, new_charge = ( torch.zeros((2, 0), device=self.device, dtype=torch.long), torch.zeros(0, device=self.device), torch.zeros(0, device=self.device, dtype=torch.long), ) # create the new data for calculation sparse_noisy_data = { "node_t": node, "edge_index_t": edge_index, "edge_attr_t": edge_attr, "batch": batch, "y_t": y, "ptr": ptr, "charge_t": charge, "t_int": (t_float * self.T).int(), "t_float": t_float, } for i in range(len_loop): if self.autoregressive and i != 0: sparse_noisy_data["edge_index_t"] = new_edge_index sparse_noisy_data["edge_attr_t"] = new_edge_attr # the last loop might have less edges, we need to make sure that each loop has the same number of edges if i == len_loop - 1: edges_to_consider_mask = all_edge_mask >= ( num_edges[all_edge_batch] - num_edges_per_loop[all_edge_batch] ) else: # [0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1] # all_condensed_index is not sorted inside the graph, but it sorted for graph batch edges_to_consider_mask = torch.logical_and( all_edge_mask >= num_edges_per_loop[all_edge_batch] * i, all_edge_mask < num_edges_per_loop[all_edge_batch] * (i + 1), ) # get query edges and pass to matrix index triu_query_edge_index = all_condensed_index[edges_to_consider_mask] query_edge_batch = all_edge_batch[edges_to_consider_mask]
class DiscreteDenoisingDiffusion(pl.LightningModule): model_dtype = torch.float32 best_val_nll = 1e8 val_counter = 0 start_epoch_time = None val_iterations = None def __init__( self, cfg, dataset_infos, train_metrics, extra_features, domain_features, val_sampling_metrics, test_sampling_metrics, ): super().__init__() self.in_dims = dataset_infos.input_dims self.out_dims = dataset_infos.output_dims self.use_charge = cfg.model.use_charge and self.out_dims.charge > 1 self.node_dist = dataset_infos.nodes_dist self.extra_features = extra_features self.domain_features = domain_features self.sign_net = cfg.model.sign_net if not self.sign_net: cfg.model.sn_hidden_dim = 0 # sparse settings self.edge_fraction = cfg.model.edge_fraction self.autoregressive = cfg.model.autoregressive self.cfg = cfg self.test_variance = cfg.general.test_variance self.dataset_info = dataset_infos self.visualization_tools = Visualizer(dataset_infos) self.name = cfg.general.name self.T = cfg.model.diffusion_steps self.train_loss = TrainLossDiscrete(cfg.model.lambda_train, self.edge_fraction) self.train_metrics = train_metrics self.val_sampling_metrics = val_sampling_metrics self.test_sampling_metrics = test_sampling_metrics # TODO: transform to torchmetrics.MetricCollection self.val_nll = NLL() # self.val_metrics = torchmetrics.MetricCollection([]) self.val_X_kl = SumExceptBatchKL() self.val_E_kl = SumExceptBatchKL() self.val_X_logp = SumExceptBatchMetric() self.val_E_logp = SumExceptBatchMetric() self.best_nll = 1e8 self.best_epoch = 0 # TODO: transform to torchmetrics.MetricCollection self.test_nll = NLL() self.test_X_kl = SumExceptBatchKL() self.test_E_kl = SumExceptBatchKL() self.test_X_logp = SumExceptBatchMetric() self.test_E_logp = SumExceptBatchMetric() if self.use_charge: self.val_charge_kl = SumExceptBatchKL() self.val_charge_logp = SumExceptBatchMetric() self.test_charge_kl = SumExceptBatchKL() self.test_charge_logp = SumExceptBatchMetric() self.model = GraphTransformerConv( n_layers=cfg.model.n_layers, input_dims=self.in_dims, hidden_dims=cfg.model.hidden_dims, output_dims=self.out_dims, sn_hidden_dim=cfg.model.sn_hidden_dim, output_y=cfg.model.output_y, dropout=cfg.model.dropout ) # whether to use sign net if self.sign_net and cfg.model.extra_features == "all": self.sign_net = SignNetNodeEncoder( dataset_infos, cfg.model.sn_hidden_dim, cfg.model.num_eigenvectors ) # whether to use scale layers self.scaling_layer = cfg.model.scaling_layer ( self.node_scaling_layer, self.edge_scaling_layer, self.graph_scaling_layer, ) = self.get_scaling_layers() self.noise_schedule = PredefinedNoiseScheduleDiscrete( cfg.model.diffusion_noise_schedule, timesteps=cfg.model.diffusion_steps ) # Marginal transition node_types = self.dataset_info.node_types.float() x_marginals = node_types / torch.sum(node_types) edge_types = self.dataset_info.edge_types.float() e_marginals = edge_types / torch.sum(edge_types) if not self.use_charge: charge_marginals = node_types.new_zeros(0) else: charge_marginals = ( self.dataset_info.charge_types * node_types[:, None] ).sum(dim=0) print( f"Marginal distribution of the classes: {x_marginals} for nodes, {e_marginals} for edges" ) self.transition_model = MarginalUniformTransition( x_marginals=x_marginals, e_marginals=e_marginals, y_classes=self.out_dims.y, charge_marginals=charge_marginals, ) self.limit_dist = utils.PlaceHolder( X=x_marginals, E=e_marginals, y=torch.ones(self.out_dims.y) / self.out_dims.y, charge=charge_marginals, ) self.save_hyperparameters(ignore=["train_metrics", "sampling_metrics"]) self.log_every_steps = cfg.general.log_every_steps self.number_chain_steps = cfg.general.number_chain_steps def training_step(self, data, i): # The above code is using the Python debugger module `pdb` to set a breakpoint at a specific # line of code. When the code is executed, it will pause at that line and allow you to # interactively debug the program. if data.edge_index.numel() == 0: print("Found a batch with no edges. Skipping.") return # Map discrete classes to one hot encoding data = self.dataset_info.to_one_hot(data) start_time = time.time() sparse_noisy_data = self.apply_sparse_noise(data) if hasattr(self, "apply_noise_time"): self.apply_noise_time.append(round(time.time() - start_time, 2)) # Sample the query edges and build the computational graph = union(noisy graph, query edges) start_time = time.time() # print(data.ptr.diff()) triu_query_edge_index, _ = sample_query_edges( num_nodes_per_graph=data.ptr.diff(), edge_proportion=self.edge_fraction ) query_mask, comp_edge_index, comp_edge_attr = get_computational_graph( triu_query_edge_index=triu_query_edge_index, clean_edge_index=sparse_noisy_data["edge_index_t"], clean_edge_attr=sparse_noisy_data["edge_attr_t"], ) # pass sparse comp_graph to dense comp_graph for ease calculation sparse_noisy_data["comp_edge_index_t"] = comp_edge_index sparse_noisy_data["comp_edge_attr_t"] = comp_edge_attr self.sample_query_time.append(round(time.time() - start_time, 2)) sparse_pred = self.forward(sparse_noisy_data) # Compute the loss on the query edges only sparse_pred.edge_attr = sparse_pred.edge_attr[query_mask] sparse_pred.edge_index = comp_edge_index[:, query_mask] # mask true label for query edges # We have the true edge index at time 0, and the query edge index at time t. This function # merge the query edges and edge index at time 0, delete repeated one, and retune the mask # for the true attr of query edges start_time = time.time() ( query_mask2, true_comp_edge_attr, true_comp_edge_index, ) = mask_query_graph_from_comp_graph( triu_query_edge_index=triu_query_edge_index, edge_index=data.edge_index, edge_attr=data.edge_attr, num_classes=self.out_dims.E, ) query_true_edge_attr = true_comp_edge_attr[query_mask2] assert ( true_comp_edge_index[:, query_mask2] - sparse_pred.edge_index == 0 ).all() self.query_count.append(len(query_true_edge_attr)) true_data = utils.SparsePlaceHolder( node=data.x, charge=data.charge, edge_attr=query_true_edge_attr, edge_index=sparse_pred.edge_index, y=data.y, batch=data.batch, ) true_data.collapse() # Map one-hot to discrete class self.coalesce_time.append(round(time.time() - start_time, 2)) # Loss calculation start_time = time.time() loss = self.train_loss.forward( pred=sparse_pred, true_data=true_data, log=i % self.log_every_steps == 0 ) self.train_metrics( pred=sparse_pred, true_data=true_data, log=i % self.log_every_steps == 0 ) self.loss_time.append(round(time.time() - start_time, 2)) return {"loss": loss} def on_fit_start(self) -> None: print( f"Size of the input features:" f" X {self.in_dims.X}, E {self.in_dims.E}, charge {self.in_dims.charge}, y {self.in_dims.y}" ) if self.local_rank == 0: utils.setup_wandb( self.cfg ) # Initialize wandb only on one process to log metrics only once def on_train_epoch_start(self) -> None: self.print("Starting train epoch...") self.start_epoch_time = time.time() self.train_loss.reset() self.train_metrics.reset() self.query_count = [] self.apply_noise_time = [] self.extra_data_time = [] self.forward_time = [] self.sample_query_time = [] self.coalesce_time = [] self.loss_time = [] self.cycle_time = [] self.eigen_time = [] def on_train_epoch_end(self) -> None: epoch_loss = self.train_loss.log_epoch_metrics() self.print( f"Epoch {self.current_epoch} finished: X: {epoch_loss['train_epoch/x_CE'] :.2f} -- " f"E: {epoch_loss['train_epoch/E_CE'] :.2f} --" f"charge: {epoch_loss['train_epoch/charge_CE'] :.2f} --" f"y: {epoch_loss['train_epoch/y_CE'] :.2f}" ) self.train_metrics.log_epoch_metrics() if wandb.run: wandb.log({"epoch": self.current_epoch}, commit=False) def on_validation_epoch_start(self) -> None: val_metrics = [self.val_nll, self.val_X_kl, self.val_E_kl, self.val_X_logp, self.val_E_logp, self.val_sampling_metrics] if self.use_charge: val_metrics.extend([self.val_charge_kl, self.val_charge_logp]) for metric in val_metrics: metric.reset() def validation_step(self, data, i): data = self.dataset_info.to_one_hot(data) sparse_noisy_data = self.apply_sparse_noise(data) # Sample the query edges and build the computational graph = union(noisy graph, query edges) triu_query_edge_index, _ = sample_query_edges( num_nodes_per_graph=data.ptr.diff(), edge_proportion=self.edge_fraction ) _, comp_edge_index, comp_edge_attr = get_computational_graph( triu_query_edge_index=triu_query_edge_index, clean_edge_index=sparse_noisy_data["edge_index_t"], clean_edge_attr=sparse_noisy_data["edge_attr_t"] ) # pass sparse comp_graph to dense comp_graph for ease calculation sparse_noisy_data["comp_edge_index_t"] = comp_edge_index sparse_noisy_data["comp_edge_attr_t"] = comp_edge_attr sparse_pred = self.forward(sparse_noisy_data) # to dense dense_pred, node_mask = utils.to_dense( x=sparse_pred.node, edge_index=sparse_pred.edge_index, edge_attr=sparse_pred.edge_attr, batch=sparse_pred.batch, charge=sparse_pred.charge, ) dense_original, _ = utils.to_dense( x=data.x, edge_index=data.edge_index, edge_attr=data.edge_attr, batch=data.batch, charge=data.charge, ) noisy_data = utils.densify_noisy_data(sparse_noisy_data) nll = self.compute_val_loss( dense_pred, noisy_data, dense_original.X, dense_original.E, dense_original.y, node_mask, charge=dense_original.charge, test=False, ) return {"loss": nll} def on_validation_epoch_end(self) -> None: metrics = [ self.val_nll.compute(), self.val_X_kl.compute() * self.T, self.val_E_kl.compute() * self.T, self.val_X_logp.compute(), self.val_E_logp.compute(), ] if self.use_charge: metrics += [ self.val_charge_kl.compute() * self.T, self.val_charge_logp.compute(), ] else: metrics += [-1, -1] if self.val_nll.compute() < self.best_nll: self.best_epoch = self.current_epoch self.best_nll = self.val_nll.compute() metrics += [self.best_epoch, self.best_nll] if wandb.run: wandb.log( { "val/epoch_NLL": metrics[0], "val/X_kl": metrics[1], "val/E_kl": metrics[2], "val/X_logp": metrics[3], "val/E_logp": metrics[4], "val/charge_kl": metrics[5], "val/charge_logp": metrics[6], "val/best_nll_epoch": metrics[7], "val/best_nll": metrics[8], }, commit=False, ) self.print( f"Epoch {self.current_epoch}: Val NLL {metrics[0] :.2f} -- Val Atom type KL {metrics[1] :.2f} -- ", f"Val Edge type KL: {metrics[2] :.2f}", ) # Log val nll with default Lightning logger, so it can be monitored by checkpoint callback val_nll = metrics[0] self.log("val/epoch_NLL", val_nll, sync_dist=True) if val_nll < self.best_val_nll: self.best_val_nll = val_nll self.print( "Val loss: %.4f \t Best val loss: %.4f\n" % (val_nll, self.best_val_nll) ) self.val_counter += 1 print("Starting to sample") if self.val_counter % self.cfg.general.sample_every_val == 0: start = time.time() samples_left_to_generate = self.cfg.general.samples_to_generate samples_left_to_save = self.cfg.general.samples_to_save chains_left_to_save = self.cfg.general.chains_to_save # multi gpu operation samples_left_to_generate = math.ceil(samples_left_to_generate / max(self._trainer.num_devices, 1)) self.print( f"Samples to generate: {samples_left_to_generate} for each of the {max(self._trainer.num_devices, 1)} devices" ) print(f"Sampling start on GR{self.global_rank}") print('multi-gpu metrics for uniqueness is not accurate in the validation step.') generated_graphs = [] ident = 0 while samples_left_to_generate > 0: bs = self.cfg.train.batch_size * 2 to_generate = min(samples_left_to_generate, bs) to_save = min(samples_left_to_save, bs) chains_save = min(chains_left_to_save, bs) sampled_batch = self.sample_batch( batch_id=ident, batch_size=to_generate, save_final=to_save, keep_chain=chains_save, number_chain_steps=self.number_chain_steps, ) generated_graphs.append(sampled_batch) ident += to_generate samples_left_to_save -= to_save samples_left_to_generate -= to_generate chains_left_to_save -= chains_save generated_graphs = utils.concat_sparse_graphs(generated_graphs) print( f"Sampled {generated_graphs.batch.max().item()+1} batches on local rank {self.local_rank}. ", "Sampling took {time.time() - start:.2f} seconds\n" ) print("Computing sampling metrics...") self.val_sampling_metrics.compute_all_metrics( generated_graphs, self.current_epoch, local_rank=self.local_rank ) def on_test_epoch_start(self) -> None: print("Starting test...") if self.local_rank == 0: utils.setup_wandb( self.cfg ) # Initialize wandb only on one process to log metrics only once test_metrics = [self.test_nll, self.test_X_kl, self.test_E_kl, self.test_X_logp, self.test_E_logp, self.test_sampling_metrics] if self.use_charge: test_metrics.extend([self.test_charge_kl, self.test_charge_logp]) for metric in test_metrics: metric.reset() def test_step(self, data, i): pass def on_test_epoch_end(self) -> None: """Measure likelihood on a test set and compute stability metrics.""" if self.cfg.general.generated_path: self.print("Loading generated samples...") samples = np.load(self.cfg.general.generated_path) with open(self.cfg.general.generated_path, "rb") as f: samples = pickle.load(f) else: samples_left_to_generate = self.cfg.general.final_model_samples_to_generate samples_left_to_save = self.cfg.general.final_model_samples_to_save chains_left_to_save = self.cfg.general.final_model_chains_to_save # multi gpu operation samples_left_to_generate = math.ceil(samples_left_to_generate / max(self._trainer.num_devices, 1)) self.print( f"Samples to generate: {samples_left_to_generate} for each of the {max(self._trainer.num_devices, 1)} devices" ) print(f"Sampling start on GR{self.global_rank}") samples = [] id = 0 while samples_left_to_generate > 0: print( f"Samples left to generate: {samples_left_to_generate}/" f"{self.cfg.general.final_model_samples_to_generate}", end="", flush=True, ) bs = self.cfg.train.batch_size * 2 to_generate = min(samples_left_to_generate, bs) to_save = min(samples_left_to_save, bs) chains_save = min(chains_left_to_save, bs) sampled_batch = self.sample_batch( batch_id=id, batch_size=to_generate, num_nodes=None, save_final=to_save, keep_chain=chains_save, number_chain_steps=self.number_chain_steps, ) samples.append(sampled_batch) id += to_generate samples_left_to_save -= to_save samples_left_to_generate -= to_generate chains_left_to_save -= chains_save print("Saving the generated graphs") samples = utils.concat_sparse_graphs(samples) filename = f"generated_samples1.txt" # Save the samples list as pickle to a file that depends on the local rank # This is needed to avoid overwriting the same file on different GPUs with open(f"generated_samples_rank{self.local_rank}.pkl", "wb") as f: pickle.dump(samples, f) # This line is used to sync between gpus self._trainer.strategy.barrier() for i in range(2, 10): if os.path.exists(filename): filename = f"generated_samples{i}.txt" else: break with open(filename, "w") as f: for i in range(samples.batch.max().item() + 1): atoms = samples.node[samples.batch == i] f.write(f"N={atoms.shape[0]}\n") atoms = atoms.tolist() f.write("X: \n") for at in atoms: f.write(f"{at} ") f.write("\n") f.write("E: \n") bonds = samples.edge_attr[samples.batch[samples.edge_index[0]] == i] for bond in bonds: f.write(f"{bond} ") f.write("\n") print("Saved.") print("Computing sampling metrics...") # Load the pickles of the other GPUs samples = [] for i in range(self._trainer.num_devices): with open(f"generated_samples_rank{i}.pkl", "rb") as f: samples.append(pickle.load(f)) samples = utils.concat_sparse_graphs(samples) print('saving all samples') with open(f"generated_samples.pkl", "wb") as f: pickle.dump(samples, f) if self.test_variance == 1: to_log, _ = self.test_sampling_metrics.compute_all_metrics( samples, self.current_epoch, self.local_rank ) # save results for testing print('saving results for testing') current_path = os.getcwd() res_path = os.path.join( current_path, f"test_epoch{self.current_epoch}.json", ) with open(res_path, 'w') as file: # Convert the dictionary to a JSON string and write it to the file json.dump(to_log, file) else: to_log = {} for i in range(self.test_variance): start_idx = int(self.cfg.general.final_model_samples_to_generate / self.test_variance * i) end_idx = int(self.cfg.general.final_model_samples_to_generate / self.test_variance * (i + 1)) cur_samples = utils.split_samples(samples, start_idx, end_idx) cur_to_log, _ = self.test_sampling_metrics.compute_all_metrics(cur_samples, self.current_epoch, self.local_rank) if i == 0: to_log = {i: [cur_to_log[i]] for i in cur_to_log} else: to_log = {i: to_log[i].append(cur_to_log[i]) for i in cur_to_log} # get the variance and mean value of the metrics final_to_log = {i: [np.mean(i), np.var(i)] for i in to_log} to_log.update(final_to_log) # save results for testing print('saving results for testing') current_path = os.getcwd() res_path = os.path.join( current_path, f"test_epoch{self.current_epoch}_fold{self.test_variance}.json", ) with open(res_path, 'w') as file: # Convert the dictionary to a JSON string and write it to the file json.dump(to_log, file) print("Test sampling metrics computed.") def apply_sparse_noise(self, data): """Sample noise and apply it to the data.""" bs = int(data.batch.max() + 1) t_int = torch.randint( 1, self.T + 1, size=(bs, 1), device=self.device ).float() # (bs, 1) s_int = t_int - 1 t_float = t_int / self.T s_float = s_int / self.T # beta_t and alpha_s_bar are used for denoising/loss computation beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) # (bs, 1) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # (bs, 1) Qtb = self.transition_model.get_Qt_bar( alpha_t_bar, device=self.device ) # (bs, dx_in, dx_out), (bs, de_in, de_out) assert (abs(Qtb.X.sum(dim=2) - 1.0) < 1e-4).all(), Qtb.X.sum(dim=2) - 1 assert (abs(Qtb.E.sum(dim=2) - 1.0) < 1e-4).all() # Compute transition probabilities # get charge distribution if self.use_charge: prob_charge = data.charge.unsqueeze(1) @ Qtb.charge[data.batch] charge_t = prob_charge.squeeze(1).multinomial(1).flatten() # (N, ) charge_t = F.one_hot(charge_t, num_classes=self.out_dims.charge) else: charge_t = data.charge # Diffuse sparse nodes and sample sparse node labels probN = data.x.unsqueeze(1) @ Qtb.X[data.batch] # (N, dx) node_t = probN.squeeze(1).multinomial(1).flatten() # (N, ) # count node numbers and edge numbers for existing edges for each graph num_nodes = data.ptr.diff().long() batch_edge = data.batch[data.edge_index[0]] num_edges = torch.zeros(num_nodes.shape).to(self.device) unique, counts = torch.unique(batch_edge, sorted=True, return_counts=True) num_edges[unique] = counts.float() # count number of non-existing edges for each graph num_neg_edge = ((num_nodes - 1) * num_nodes - num_edges) / 2 # (bs, ) # Step1: diffuse on existing edges # get edges defined in the top triangle of the adjacency matrix dir_edge_index, dir_edge_attr = utils.undirected_to_directed( data.edge_index, data.edge_attr ) batch_edge = data.batch[dir_edge_index[0]] batch_Qtb = Qtb.E[batch_edge] probE = dir_edge_attr.unsqueeze(1) @ batch_Qtb dir_edge_attr = probE.squeeze(1).multinomial(1).flatten() # Step2: diffuse on non-existing edges # get number of new edges according to Qtb emerge_prob = Qtb.E[:, 0, 1:].sum(-1) # (bs, ) num_emerge_edges = ( torch.distributions.binomial.Binomial(num_neg_edge, emerge_prob) .sample() .int() ) # combine existing and non-existing edges (both are directed, i.e. triu) if num_emerge_edges.max() > 0: # sample non-existing edges neg_edge_index = sample_non_existing_edges_batched( num_edges_to_sample=num_emerge_edges, existing_edge_index=dir_edge_index, num_nodes=num_nodes, batch=data.batch, ) neg_edge_attr = sample_non_existing_edge_attr( query_edges_dist_batch=Qtb.E[:, 0, 1:], num_edges_to_sample=num_emerge_edges, ) E_t_attr = torch.hstack([dir_edge_attr, neg_edge_attr]) E_t_index = torch.hstack([dir_edge_index, neg_edge_index]) else: E_t_attr = dir_edge_attr E_t_index = dir_edge_index # mask non-existing edges mask = E_t_attr != 0 E_t_attr = E_t_attr[mask] E_t_index = E_t_index[:, mask] E_t_index, E_t_attr = utils.to_undirected(E_t_index, E_t_attr) E_t_attr = F.one_hot(E_t_attr, num_classes=self.out_dims.E) node_t = F.one_hot(node_t, num_classes=self.out_dims.X) sparse_noisy_data = { "t_int": t_int, "t_float": t_float, "beta_t": beta_t, "alpha_s_bar": alpha_s_bar, "alpha_t_bar": alpha_t_bar, "node_t": node_t, "edge_index_t": E_t_index, "edge_attr_t": E_t_attr, "comp_edge_index_t": None, "comp_edge_attr_t": None, # computational graph "y_t": data.y, "batch": data.batch, "ptr": data.ptr, "charge_t": charge_t, } return sparse_noisy_data def compute_val_loss(self, pred, noisy_data, X, E, y, node_mask, charge, test): """Computes an estimator for the variational lower bound. pred: (batch_size, n, total_features) noisy_data: dict X, E, y : (bs, n, dx), (bs, n, n, de), (bs, dy) node_mask : (bs, n) Output: nll (size 1) """ t = noisy_data["t_float"] # 1. N = node_mask.sum(1).long() log_pN = self.node_dist.log_prob(N) # 2. The KL between q(z_T | x) and p(z_T) = Uniform(1/num_classes). Should be close to zero. kl_prior = self.kl_prior(X, E, node_mask, charge=charge) # 3. Diffusion loss loss_all_t = self.compute_Lt( X, E, y, charge, pred, noisy_data, node_mask, test=test ) # Combine terms nlls = - log_pN + kl_prior + loss_all_t assert (~nlls.isnan()).all(), f"NLLs contain NaNs: {nlls}" assert len(nlls.shape) == 1, f"{nlls.shape} has more than only batch dim." # Update NLL metric object and return batch nll nll = (self.test_nll if test else self.val_nll)(nlls) # Average over the batch if wandb.run: wandb.log( { "kl prior": kl_prior.mean(), "Estimator loss terms": loss_all_t.mean(), "log_pn": log_pN.mean(), "val_nll": nll, "epoch": self.current_epoch }, commit=False, ) return nll def kl_prior(self, X, E, node_mask, charge): """Computes the KL between q(z1 | x) and the prior p(z1) = Normal(0, 1). This is essentially a lot of work for something that is in practice negligible in the loss. However, you compute it so that you see it when you've made a mistake in your noise schedule. """ # Compute the last alpha value, alpha_T. ones = torch.ones((X.size(0), 1), device=X.device) Ts = self.T * ones alpha_t_bar = self.noise_schedule.get_alpha_bar(t_int=Ts) # (bs, 1) Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) # Compute transition probabilities probX = X @ Qtb.X # (bs, n, dx_out) probE = E @ Qtb.E.unsqueeze(1) # (bs, n, n, de_out) assert probX.shape == X.shape bs, n, _ = probX.shape limit_X = self.limit_dist.X[None, None, :].expand(bs, n, -1).type_as(probX) limit_E = ( self.limit_dist.E[None, None, None, :].expand(bs, n, n, -1).type_as(probE) ) if self.use_charge: prob_charge = charge @ Qtb.charge # (bs, n, de_out) limit_charge = ( self.limit_dist.charge[None, None, :] .expand(bs, n, -1) .type_as(prob_charge) ) limit_charge = limit_charge.clone() else: prob_charge = limit_charge = None # Make sure that masked rows do not contribute to the loss ( limit_dist_X, limit_dist_E, probX, probE, limit_dist_charge, prob_charge, ) = diffusion_utils.mask_distributions( true_X=limit_X.clone(), true_E=limit_E.clone(), pred_X=probX, pred_E=probE, node_mask=node_mask, true_charge=limit_charge, pred_charge=prob_charge, ) kl_distance_X = F.kl_div( input=probX.log(), target=limit_dist_X, reduction="none" ) kl_distance_E = F.kl_div( input=probE.log(), target=limit_dist_E, reduction="none" ) # not all edges are used for loss calculation E_mask = torch.logical_or( kl_distance_E.sum(-1).isnan(), kl_distance_E.sum(-1).isinf() ) kl_distance_E[E_mask] = 0 X_mask = torch.logical_or( kl_distance_X.sum(-1).isnan(), kl_distance_X.sum(-1).isinf() ) kl_distance_X[X_mask] = 0 loss = diffusion_utils.sum_except_batch( kl_distance_X ) + diffusion_utils.sum_except_batch(kl_distance_E) # The above code is using the Python debugger module `pdb` to set a breakpoint in the code. # When the code is executed, it will pause at this line and allow you to interactively debug # the program. if self.use_charge: kl_distance_charge = F.kl_div( input=prob_charge.log(), target=limit_dist_charge, reduction="none" ) kl_distance_charge[X_mask] = 0 loss = loss + diffusion_utils.sum_except_batch(kl_distance_charge) assert (~loss.isnan()).any() return loss def compute_Lt(self, X, E, y, charge, pred, noisy_data, node_mask, test): pred_probs_X = F.softmax(pred.X, dim=-1) pred_probs_E = F.softmax(pred.E, dim=-1) if self.use_charge: pred_probs_charge = F.softmax(pred.charge, dim=-1) else: pred_probs_charge = None charge = None Qtb = self.transition_model.get_Qt_bar(noisy_data["alpha_t_bar"], self.device) Qsb = self.transition_model.get_Qt_bar(noisy_data["alpha_s_bar"], self.device) Qt = self.transition_model.get_Qt(noisy_data["beta_t"], self.device) # Compute distributions to compare with KL bs, n, d = X.shape prob_true = diffusion_utils.posterior_distributions( X=X, E=E, X_t=noisy_data["X_t"], E_t=noisy_data["E_t"], charge=charge, charge_t=noisy_data["charge_t"], y_t=noisy_data["y_t"], Qt=Qt, Qsb=Qsb, Qtb=Qtb, ) prob_true.E = prob_true.E.reshape((bs, n, n, -1)) prob_pred = diffusion_utils.posterior_distributions( X=pred_probs_X, E=pred_probs_E, X_t=noisy_data["X_t"], E_t=noisy_data["E_t"], charge=pred_probs_charge, charge_t=noisy_data["charge_t"], y_t=noisy_data["y_t"], Qt=Qt, Qsb=Qsb, Qtb=Qtb, ) prob_pred.E = prob_pred.E.reshape((bs, n, n, -1)) # Reshape and filter masked rows ( prob_true_X, prob_true_E, prob_pred.X, prob_pred.E, prob_true.charge, prob_pred.charge, ) = diffusion_utils.mask_distributions( true_X=prob_true.X, true_E=prob_true.E, pred_X=prob_pred.X, pred_E=prob_pred.E, node_mask=node_mask, true_charge=prob_true.charge, pred_charge=prob_pred.charge, ) kl_x = (self.test_X_kl if test else self.val_X_kl)(prob_true_X, torch.log(prob_pred.X)) kl_e = (self.test_E_kl if test else self.val_E_kl)(prob_true_E, torch.log(prob_pred.E)) assert (~(kl_x + kl_e).isnan()).any() loss = kl_x + kl_e if self.use_charge: kl_charge = (self.test_charge_kl if test else self.val_charge_kl)( prob_true.charge, torch.log(prob_pred.charge) ) assert (~(kl_charge).isnan()).any() loss = loss + kl_charge return self.T * loss def reconstruction_logp(self, t, X, E, node_mask, charge): # Compute noise values for t = 0. t_zeros = torch.zeros_like(t) beta_0 = self.noise_schedule(t_zeros) Q0 = self.transition_model.get_Qt(beta_t=beta_0, device=self.device) probX0 = X @ Q0.X # (bs, n, dx_out) probE0 = E @ Q0.E.unsqueeze(1) # (bs, n, n, de_out) prob_charge0 = None if self.use_charge: prob_charge0 = charge @ Q0.charge sampled0 = diffusion_utils.sample_discrete_features( probX=probX0, probE=probE0, node_mask=node_mask, prob_charge=prob_charge0 ) X0 = F.one_hot(sampled0.X, num_classes=self.out_dims.X).float() E0 = F.one_hot(sampled0.E, num_classes=self.out_dims.E).float() y0 = sampled0.y assert (X.shape == X0.shape) and (E.shape == E0.shape) charge0 = X0.new_zeros((*X0.shape[:-1], 0)) if self.use_charge: charge0 = F.one_hot( sampled0.charge, num_classes=self.out_dims.charge ).float() sampled_0 = utils.PlaceHolder(X=X0, E=E0, y=y0, charge=charge0).mask(node_mask) # Predictions noisy_data = { "X_t": sampled_0.X, "E_t": sampled_0.E, "y_t": sampled_0.y, "node_mask": node_mask, "t_int": torch.zeros((X0.shape[0], 1), dtype=torch.long).to(self.device), "t_float": torch.zeros((X0.shape[0], 1), dtype=torch.float).to(self.device), "charge_t": sampled_0.charge, } sparse_noisy_data = utils.to_sparse( noisy_data["X_t"], noisy_data["E_t"], noisy_data["y_t"], node_mask, charge=noisy_data["charge_t"], ) noisy_data.update(sparse_noisy_data) noisy_data["comp_edge_index_t"] = sparse_noisy_data["edge_index_t"] noisy_data["comp_edge_attr_t"] = sparse_noisy_data["edge_attr_t"] pred0 = self.forward(noisy_data) pred0, _ = utils.to_dense( pred0.node, pred0.edge_index, pred0.edge_attr, pred0.batch, pred0.charge ) # Normalize predictions probX0 = F.softmax(pred0.X, dim=-1) probE0 = F.softmax(pred0.E, dim=-1) # Set masked rows to arbitrary values that don't contribute to loss probX0[~node_mask] = torch.ones(self.out_dims.X).type_as(probX0) probE0[~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2))] = torch.ones( self.out_dims.E ).type_as(probE0) diag_mask = torch.eye(probE0.size(1)).type_as(probE0).bool() diag_mask = diag_mask.unsqueeze(0).expand(probE0.size(0), -1, -1) probE0[diag_mask] = torch.ones(self.out_dims.E).type_as(probE0) assert (~probX0.isnan()).any() assert (~probE0.isnan()).any() prob_charge0 = charge if self.use_charge: prob_charge0 = F.softmax(pred0.charge, dim=-1) prob_charge0[~node_mask] = torch.ones(self.out_dims.charge).type_as( prob_charge0 ) assert (~prob_charge0.isnan()).any() return utils.PlaceHolder(X=probX0, E=probE0, y=None, charge=prob_charge0) def forward_sparse(self, sparse_noisy_data): start_time = time.time() node = sparse_noisy_data["node_t"] edge_attr = sparse_noisy_data["edge_attr_t"].float() edge_index = sparse_noisy_data["edge_index_t"].to(torch.int64) y = sparse_noisy_data["y_t"] batch = sparse_noisy_data["batch"].long() if hasattr(self, "forward_time"): self.forward_time.append(round(time.time() - start_time, 2)) return self.model(node, edge_attr, edge_index, y, batch) def forward(self, noisy_data): """ noisy data contains: node_t, comp_edge_index_t, comp_edge_attr_t, batch """ # build the sparse_noisy_data for the forward function of the sparse model start_time = time.time() sparse_noisy_data = self.compute_extra_data(sparse_noisy_data=noisy_data) if self.sign_net and self.cfg.model.extra_features == "all": x = self.sign_net( sparse_noisy_data["node_t"], sparse_noisy_data["edge_index_t"], sparse_noisy_data["batch"], ) sparse_noisy_data["node_t"] = torch.hstack( [sparse_noisy_data["node_t"], x] ) if hasattr(self, "extra_data_time"): self.extra_data_time.append(round(time.time() - start_time, 2)) return self.forward_sparse(sparse_noisy_data) @torch.no_grad() def sample_batch( self, batch_id: int, batch_size: int, keep_chain: int, number_chain_steps: int, save_final: int, num_nodes=None, ): """ :param batch_id: int :param batch_size: int :param num_nodes: int, <int>tensor (batch_size) (optional) for specifying number of nodes :param save_final: int: number of predictions to save to file :param keep_chain: int: number of chains to save to file :param keep_chain_steps: number of timesteps to save for each chain :return: molecule_list. Each element of this list is a tuple (node_types, charge, positions) """ if num_nodes is None: num_nodes = self.node_dist.sample_n(batch_size, self.device) elif type(num_nodes) == int: num_nodes = num_nodes * torch.ones( batch_size, device=self.device, dtype=torch.int ) else: assert isinstance(num_nodes, torch.Tensor) num_nodes = num_nodes num_max = torch.max(num_nodes) # Build the masks arange = ( torch.arange(num_max, device=self.device) .unsqueeze(0) .expand(batch_size, -1) ) node_mask = arange < num_nodes.unsqueeze(1) # Sample noise -- z has size ( num_samples, num_nodes, num_features) sparse_sampled_data = diffusion_utils.sample_sparse_discrete_feature_noise( limit_dist=self.limit_dist, node_mask=node_mask ) assert number_chain_steps < self.T chain = utils.SparseChainPlaceHolder(keep_chain=keep_chain) # Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1. for s_int in tqdm(reversed(range(self.T)), total=self.T): s_array = (s_int * torch.ones((batch_size, 1))).to(self.device) t_array = s_array + 1 s_norm = s_array / self.T t_norm = t_array / self.T # Sample z_s sparse_sampled_data = self.sample_p_zs_given_zt( s_norm, t_norm, sparse_sampled_data ) # keep_chain can be very small, e.g., 1 if ((s_int * number_chain_steps) % self.T == 0) and (keep_chain != 0): chain.append(sparse_sampled_data) # get generated graphs generated_graphs = sparse_sampled_data.to_device("cpu") generated_graphs.edge_attr = sparse_sampled_data.edge_attr.argmax(-1) generated_graphs.node = sparse_sampled_data.node.argmax(-1) if self.use_charge: generated_graphs.charge = sparse_sampled_data.charge.argmax(-1) - 1 if self.visualization_tools is not None: current_path = os.getcwd() # Visualize chains if keep_chain > 0: print("Visualizing chains...") chain_path = os.path.join( current_path, f"chains/{self.cfg.general.name}/" f"epoch{self.current_epoch}/", ) try: _ = self.visualization_tools.visualize_chain( chain_path, batch_id, chain, local_rank=self.local_rank ) except OSError: print("Warn: image chains failed to be visualized ") # Visualize the final molecules print("\nVisualizing molecules...") result_path = os.path.join( current_path, f"graphs/{self.name}/epoch{self.current_epoch}_b{batch_id}/", ) try: self.visualization_tools.visualize( result_path, generated_graphs, save_final, local_rank=self.local_rank, ) except OSError: print("Warn: image failed to be visualized ") print("Done.") return generated_graphs def sample_node(self, pred_X, p_s_and_t_given_0_X, node_mask): # Normalize predictions pred_X = F.softmax(pred_X, dim=-1) # bs, n, d0 # Dim of these two tensors: bs, N, d0, d_t-1 weighted_X = pred_X.unsqueeze(-1) * p_s_and_t_given_0_X # bs, n, d0, d_t-1 unnormalized_prob_X = weighted_X.sum(dim=2) # bs, n, d_t-1 unnormalized_prob_X[torch.sum(unnormalized_prob_X, dim=-1) == 0] = 1e-5 prob_X = unnormalized_prob_X / torch.sum( unnormalized_prob_X, dim=-1, keepdim=True ) # bs, n, d_t assert ((prob_X.sum(dim=-1) - 1).abs() < 1e-4).all() X_t = diffusion_utils.sample_discrete_node_features(prob_X, node_mask) return X_t, prob_X def sample_edge(self, pred_E, p_s_and_t_given_0_E, node_mask): # Normalize predictions bs, n, n, de = pred_E.shape pred_E = F.softmax(pred_E, dim=-1) # bs, n, n, d0 pred_E = pred_E.reshape((bs, -1, pred_E.shape[-1])) weighted_E = pred_E.unsqueeze(-1) * p_s_and_t_given_0_E # bs, N, d0, d_t-1 unnormalized_prob_E = weighted_E.sum(dim=-2) unnormalized_prob_E[torch.sum(unnormalized_prob_E, dim=-1) == 0] = 1e-5 prob_E = unnormalized_prob_E / torch.sum( unnormalized_prob_E, dim=-1, keepdim=True ) prob_E = prob_E.reshape(bs, n, n, de) assert ((prob_E.sum(dim=-1) - 1).abs() < 1e-4).all() E_t = diffusion_utils.sample_discrete_edge_features(prob_E, node_mask) return E_t, prob_E def sample_node_edge( self, pred, p_s_and_t_given_0_X, p_s_and_t_given_0_E, node_mask ): _, prob_X = self.sample_node(pred.X, p_s_and_t_given_0_X, node_mask) _, prob_E = self.sample_edge(pred.E, p_s_and_t_given_0_E, node_mask) sampled_s = diffusion_utils.sample_discrete_features( prob_X, prob_E, node_mask=node_mask ) return sampled_s def sample_sparse_node(self, pred_node, p_s_and_t_given_0_X): # Normalize predictions pred_X = F.softmax(pred_node, dim=-1) # N, dx # Dim of the second tensor: N, dx, dx weighted_X = pred_X.unsqueeze(-1) * p_s_and_t_given_0_X # N, dx, dx unnormalized_prob_X = weighted_X.sum(dim=1) # N, dx unnormalized_prob_X[ torch.sum(unnormalized_prob_X, dim=-1) == 0 ] = 1e-5 # TODO: delete/masking? prob_X = unnormalized_prob_X / torch.sum( unnormalized_prob_X, dim=-1, keepdim=True ) # N, dx assert ((prob_X.sum(dim=-1) - 1).abs() < 1e-4).all() X_t = prob_X.multinomial(1)[:, 0] return X_t def sample_sparse_edge(self, pred_edge, p_s_and_t_given_0_E): # Normalize predictions pred_E = F.softmax(pred_edge, dim=-1) # N, d0 # Dim of the second tensor: N, d0, dt-1 weighted_E = pred_E.unsqueeze(-1) * p_s_and_t_given_0_E # N, d0, dt-1 unnormalized_prob_E = weighted_E.sum(dim=1) # N, dt-1 unnormalized_prob_E[torch.sum(unnormalized_prob_E, dim=-1) == 0] = 1e-5 prob_E = unnormalized_prob_E / torch.sum( unnormalized_prob_E, dim=-1, keepdim=True ) assert ((prob_E.sum(dim=-1) - 1).abs() < 1e-4).all() E_t = prob_E.multinomial(1)[:, 0] return E_t def sample_sparse_node_edge( self, pred_node, pred_edge, p_s_and_t_given_0_X, p_s_and_t_given_0_E, pred_charge, p_s_and_t_given_0_charge, ): sampled_node = self.sample_sparse_node(pred_node, p_s_and_t_given_0_X).long() sampled_edge = self.sample_sparse_edge(pred_edge, p_s_and_t_given_0_E).long() if pred_charge.size(-1) > 0: sampled_charge = self.sample_sparse_node( pred_charge, p_s_and_t_given_0_charge ).long() else: sampled_charge = pred_charge return sampled_node, sampled_edge, sampled_charge def sample_p_zs_given_zt(self, s_float, t_float, data): """ Samples from zs ~ p(zs | zt). Only used during sampling. if last_step, return the graph prediction as well """ node = data.node edge_index = data.edge_index edge_attr = data.edge_attr y = data.y charge = data.charge ptr = data.ptr batch = data.batch beta_t = self.noise_schedule(t_normalized=t_float) # (bs, 1) alpha_s_bar = self.noise_schedule.get_alpha_bar(t_normalized=s_float) alpha_t_bar = self.noise_schedule.get_alpha_bar(t_normalized=t_float) # Retrieve transitions matrix Qtb = self.transition_model.get_Qt_bar(alpha_t_bar, self.device) Qsb = self.transition_model.get_Qt_bar(alpha_s_bar, self.device) Qt = self.transition_model.get_Qt(beta_t, self.device) # Prior distribution # (N, dx, dx) p_s_and_t_given_0_X = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=node, batch=batch, Qt=Qt.X, Qsb=Qsb.X, Qtb=Qtb.X ) ) p_s_and_t_given_0_charge = None if self.use_charge: p_s_and_t_given_0_charge = ( diffusion_utils.compute_sparse_batched_over0_posterior_distribution( input_data=charge, batch=batch, Qt=Qt.charge, Qsb=Qsb.charge, Qtb=Qtb.charge, ) ) # prepare sparse information num_nodes = ptr.diff().long() num_edges = (num_nodes * (num_nodes - 1) / 2).long() # If we had one graph, we will iterate on all edges for each step # we also make sure that the non existing edge number remains the same with the training process ( all_condensed_index, all_edge_batch, all_edge_mask, ) = sampled_condensed_indices_uniformly( max_condensed_value=num_edges, num_edges_to_sample=num_edges, return_mask=True, ) # double checked # number of edges used per loop for each graph num_edges_per_loop = torch.ceil(self.edge_fraction * num_edges) # (bs, ) len_loop = math.ceil(1. / self.edge_fraction) new_edge_index, new_edge_attr, new_charge = ( torch.zeros((2, 0), device=self.device, dtype=torch.long), torch.zeros(0, device=self.device), torch.zeros(0, device=self.device, dtype=torch.long), ) # create the new data for calculation sparse_noisy_data = { "node_t": node, "edge_index_t": edge_index, "edge_attr_t": edge_attr, "batch": batch, "y_t": y, "ptr": ptr, "charge_t": charge, "t_int": (t_float * self.T).int(), "t_float": t_float, } for i in range(len_loop): if self.autoregressive and i != 0: sparse_noisy_data["edge_index_t"] = new_edge_index sparse_noisy_data["edge_attr_t"] = new_edge_attr # the last loop might have less edges, we need to make sure that each loop has the same number of edges if i == len_loop - 1: edges_to_consider_mask = all_edge_mask >= ( num_edges[all_edge_batch] - num_edges_per_loop[all_edge_batch] ) else: # [0, 3, 2, 1, 0, 3, 2, 1, 0, 3, 2, 1] # all_condensed_index is not sorted inside the graph, but it sorted for graph batch edges_to_consider_mask = torch.logical_and( all_edge_mask >= num_edges_per_loop[all_edge_batch] * i, all_edge_mask < num_edges_per_loop[all_edge_batch] * (i + 1), ) # get query edges and pass to matrix index triu_query_edge_index = all_condensed_index[edges_to_consider_mask] query_edge_batch = all_edge_batch[edges_to_consider_mask]
triu_query_edge_index = condensed_to_matrix_index_batch(
5
2023-10-30 12:12:16+00:00
16k
akekic/causal-component-analysis
experiments/nonparam_ident/main.py
[ { "identifier": "DGP", "path": "config.py", "snippet": "DGP = {\n \"graph-4-0\": {\n \"num_causal_variables\": 4, # N\n \"adj_matrix\": np.array(\n [[0, 1, 1, 1], [0, 0, 1, 1], [0, 0, 0, 1], [0, 0, 0, 0]]\n ),\n \"int_targets\": torch.tensor(\n [[0, ...
import argparse import os import pytorch_lightning as pl import torch from pathlib import Path from pytorch_lightning.loggers import WandbLogger from config import DGP from data_generator import MultiEnvDataModule, make_multi_env_dgp from model.cauca_model import LinearCauCAModel, NaiveNonlinearModel, NonlinearCauCAModel
12,382
) parser.add_argument( "--nonparametric-base-distr", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Use nonparametric base distribution for flows.", ) parser.add_argument( "--wandb", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to log to weights and biases.", ) parser.add_argument( "--wandb-project", type=str, default="nonparam-ident", help="Weights & Biases project name.", ) args = parser.parse_args() if args.wandb: wandb_logger = WandbLogger(project=args.wandb_project) wandb_logger.experiment.config.update(args, allow_val_change=True) checkpoint_dir = ( Path(args.checkpoint_root_dir) / f"{wandb_logger.experiment.id}" ) logger = [wandb_logger] else: checkpoint_dir = Path(args.checkpoint_root_dir) / "default" logger = None checkpoint_callback = pl.callbacks.ModelCheckpoint( dirpath=checkpoint_dir, save_last=True, every_n_epochs=args.check_val_every_n_epoch, ) multi_env_dgp = make_multi_env_dgp( latent_dim=DGP[args.dgp]["num_causal_variables"], observation_dim=DGP[args.dgp]["observation_dim"], adjacency_matrix=DGP[args.dgp]["adj_matrix"], intervention_targets_per_env=DGP[args.dgp]["int_targets"], noise_shift_type=args.noise_shift_type, mixing=args.mixing, scm=args.scm, n_nonlinearities=args.n_nonlinearities, scm_coeffs_low=args.scm_coeffs_low, scm_coeffs_high=args.scm_coeffs_high, coeffs_min_abs_value=args.scm_coeffs_min_abs_value, edge_prob=DGP[args.dgp].get("edge_prob", None), snr=args.snr, ) data_module = MultiEnvDataModule( multi_env_dgp=multi_env_dgp, num_samples_per_env=DGP[args.dgp]["num_samples_per_env"], batch_size=args.batch_size, num_workers=os.cpu_count(), intervention_targets_per_env=DGP[args.dgp]["int_targets"], log_dir=checkpoint_dir / "data_stats", intervention_target_misspec=args.intervention_target_misspec, intervention_target_perm=args.intervention_target_perm, ) data_module.setup() pl.seed_everything(args.training_seed, workers=True) if args.intervention_target_misspec: # remember old intervention targets old_intervention_targets_per_env = DGP[args.dgp]["int_targets"] intervention_targets_per_env = torch.zeros_like( old_intervention_targets_per_env ) # get target permutation from data module perm = data_module.intervention_target_perm # permute intervention targets for env_idx in range(intervention_targets_per_env.shape[0]): for i in range(intervention_targets_per_env.shape[1]): if old_intervention_targets_per_env[env_idx, i] == 1: intervention_targets_per_env[env_idx, perm[i]] = 1 else: intervention_targets_per_env = DGP[args.dgp]["int_targets"] # Model Initialization if args.model == "nonlinear": model = NonlinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, k_flows=args.k_flows, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, net_hidden_dim=args.net_hidden_dim, net_hidden_layers=args.net_hidden_layers, fix_mechanisms=args.fix_mechanisms, fix_all_intervention_targets=args.fix_all_intervention_targets, nonparametric_base_distr=args.nonparametric_base_distr, K_cbn=args.k_flows_cbn, net_hidden_dim_cbn=args.net_hidden_dim_cbn, net_hidden_layers_cbn=args.net_hidden_layers_cbn, ) elif args.model == "linear": model = LinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, fix_mechanisms=args.fix_mechanisms, nonparametric_base_distr=args.nonparametric_base_distr, ) elif args.model == "naive":
def int_list(arg): try: int_list = int(arg) return int_list except ValueError: raise argparse.ArgumentTypeError("Invalid integer list format") if __name__ == "__main__": parser = argparse.ArgumentParser( description="Run experiment for Nonparametric Identifiability of Causal Representations from Unknown " "Interventions." ) parser.add_argument( "--max-epochs", type=int, default=10, help="Number of epochs to train for.", ) parser.add_argument( "--accelerator", type=str, default="gpu", help="Accelerator to use for training.", ) parser.add_argument( "--batch-size", type=int, default=1024, help="Number of samples per batch.", ) parser.add_argument( "--lr", type=float, default=1e-4, help="Learning rate for Adam optimizer.", ) parser.add_argument( "--checkpoint-root-dir", type=str, default="checkpoints", help="Checkpoint root directory.", ) parser.add_argument( "--noise-shift-type", type=str, default="mean", choices=["mean", "std"], help="Property of noise distribution that is shifted between environments.", ) parser.add_argument( "--check-val-every-n-epoch", type=int, default=1, help="Check validation loss every n epochs.", ) parser.add_argument( "--dgp", type=str, default="graph-4-0", help="Data generation process to use.", ) parser.add_argument( "--k-flows", type=int, default=1, help="Number of flows to use in nonlinear ICA model.", ) parser.add_argument( "--k-flows-cbn", type=int, default=3, help="Number of flows to use in nonlinear latent CBN model.", ) parser.add_argument( "--model", type=str, default="nonlinear", help="Type of encoder to use.", choices=["linear", "nonlinear", "naive"], ) parser.add_argument( "--seed", type=int, default=42, ) parser.add_argument( "--training-seed", type=int, default=42, ) parser.add_argument( "--mixing", type=str, default="nonlinear", help="Type of mixing function to use.", choices=["linear", "nonlinear"], ) parser.add_argument( "--scm", type=str, default="linear", help="Type of SCM to use.", choices=["linear", "location-scale"], ) parser.add_argument( "--n-nonlinearities", type=int, default=1, help="Number of nonlinearities to use in nonlinear mixing function.", ) parser.add_argument( "--learn-scm-params", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to learn SCM parameters.", ) parser.add_argument( "--lr-scheduler", type=str, default=None, help="Learning rate scheduler.", choices=[None, "cosine"], ) parser.add_argument( "--lr-min", type=float, default=0.0, help="Minimum learning rate for cosine learning rate scheduler.", ) parser.add_argument( "--scm-coeffs-low", type=float, default=-1, help="Lower bound for SCM coefficients.", ) parser.add_argument( "--scm-coeffs-high", type=float, default=1, help="Upper bound for SCM coefficients.", ) parser.add_argument( "--scm-coeffs-min-abs-value", type=float, default=None, help="Minimum absolute value for SCM coefficients.", ) parser.add_argument( "--snr", type=float, default=1.0, help="Signal-to-noise ratio in latent SCM.", ) parser.add_argument( "--adjacency-misspec", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Misspecify adjacency matrix - assume ICA.", ) parser.add_argument( "--intervention-target-misspec", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Misspecify intervention target - mix up labels and true intervention targets.", ) parser.add_argument( "--intervention-target-perm", nargs="+", # Allows multiple arguments to be passed as a list default=None, type=int_list, help="Permutation of intervention targets. Only used if intervention-target-misspec is True.", ) parser.add_argument( "--net-hidden-layers", type=int, default=3, help="Number of hidden layers in nonlinear encoder.", ) parser.add_argument( "--net-hidden-layers-cbn", type=int, default=3, help="Number of hidden layers in latent CBN model.", ) parser.add_argument( "--net-hidden-dim", type=int, default=128, help="Number of hidden dimensions in nonlinear encoder.", ) parser.add_argument( "--net-hidden-dim-cbn", type=int, default=128, help="Number of hidden dimensions in latent CBN model.", ) parser.add_argument( "--fix-mechanisms", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Fix fixable mechanisms in latents.", ) parser.add_argument( "--fix-all-intervention-targets", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Fix all intervention targets.", ) parser.add_argument( "--nonparametric-base-distr", type=bool, default=False, action=argparse.BooleanOptionalAction, help="Use nonparametric base distribution for flows.", ) parser.add_argument( "--wandb", type=bool, default=True, action=argparse.BooleanOptionalAction, help="Whether to log to weights and biases.", ) parser.add_argument( "--wandb-project", type=str, default="nonparam-ident", help="Weights & Biases project name.", ) args = parser.parse_args() if args.wandb: wandb_logger = WandbLogger(project=args.wandb_project) wandb_logger.experiment.config.update(args, allow_val_change=True) checkpoint_dir = ( Path(args.checkpoint_root_dir) / f"{wandb_logger.experiment.id}" ) logger = [wandb_logger] else: checkpoint_dir = Path(args.checkpoint_root_dir) / "default" logger = None checkpoint_callback = pl.callbacks.ModelCheckpoint( dirpath=checkpoint_dir, save_last=True, every_n_epochs=args.check_val_every_n_epoch, ) multi_env_dgp = make_multi_env_dgp( latent_dim=DGP[args.dgp]["num_causal_variables"], observation_dim=DGP[args.dgp]["observation_dim"], adjacency_matrix=DGP[args.dgp]["adj_matrix"], intervention_targets_per_env=DGP[args.dgp]["int_targets"], noise_shift_type=args.noise_shift_type, mixing=args.mixing, scm=args.scm, n_nonlinearities=args.n_nonlinearities, scm_coeffs_low=args.scm_coeffs_low, scm_coeffs_high=args.scm_coeffs_high, coeffs_min_abs_value=args.scm_coeffs_min_abs_value, edge_prob=DGP[args.dgp].get("edge_prob", None), snr=args.snr, ) data_module = MultiEnvDataModule( multi_env_dgp=multi_env_dgp, num_samples_per_env=DGP[args.dgp]["num_samples_per_env"], batch_size=args.batch_size, num_workers=os.cpu_count(), intervention_targets_per_env=DGP[args.dgp]["int_targets"], log_dir=checkpoint_dir / "data_stats", intervention_target_misspec=args.intervention_target_misspec, intervention_target_perm=args.intervention_target_perm, ) data_module.setup() pl.seed_everything(args.training_seed, workers=True) if args.intervention_target_misspec: # remember old intervention targets old_intervention_targets_per_env = DGP[args.dgp]["int_targets"] intervention_targets_per_env = torch.zeros_like( old_intervention_targets_per_env ) # get target permutation from data module perm = data_module.intervention_target_perm # permute intervention targets for env_idx in range(intervention_targets_per_env.shape[0]): for i in range(intervention_targets_per_env.shape[1]): if old_intervention_targets_per_env[env_idx, i] == 1: intervention_targets_per_env[env_idx, perm[i]] = 1 else: intervention_targets_per_env = DGP[args.dgp]["int_targets"] # Model Initialization if args.model == "nonlinear": model = NonlinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, k_flows=args.k_flows, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, net_hidden_dim=args.net_hidden_dim, net_hidden_layers=args.net_hidden_layers, fix_mechanisms=args.fix_mechanisms, fix_all_intervention_targets=args.fix_all_intervention_targets, nonparametric_base_distr=args.nonparametric_base_distr, K_cbn=args.k_flows_cbn, net_hidden_dim_cbn=args.net_hidden_dim_cbn, net_hidden_layers_cbn=args.net_hidden_layers_cbn, ) elif args.model == "linear": model = LinearCauCAModel( latent_dim=DGP[args.dgp]["num_causal_variables"], adjacency_matrix=data_module.medgp.adjacency_matrix, lr=args.lr, intervention_targets_per_env=intervention_targets_per_env, lr_scheduler=args.lr_scheduler, lr_min=args.lr_min, adjacency_misspecified=args.adjacency_misspec, fix_mechanisms=args.fix_mechanisms, nonparametric_base_distr=args.nonparametric_base_distr, ) elif args.model == "naive":
model = NaiveNonlinearModel(
4
2023-10-25 09:25:26+00:00
16k
endo-yuki-t/MAG
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler
11,310
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit
if reset_ema: assert exists(ckpt_path)
1
2023-10-27 06:56:37+00:00
16k
Gene-Weaver/VoucherVision
vouchervision/VoucherVision_GUI.py
[ { "identifier": "write_config_file", "path": "vouchervision/LeafMachine2_Config_Builder.py", "snippet": "def write_config_file(config_data, dir_home, filename=\"LeafMachine2.yaml\"):\n file_path = os.path.join(dir_home, filename)\n\n # Write the data to a YAML file\n with open(file_path, \"w\")...
import streamlit as st import yaml, os, json, random, time, re import matplotlib.pyplot as plt import plotly.graph_objs as go import numpy as np import pandas as pd from itertools import chain from PIL import Image from typing import Union from streamlit_extras.let_it_rain import rain from vouchervision.LeafMachine2_Config_Builder import write_config_file from vouchervision.VoucherVision_Config_Builder import build_VV_config, run_demo_tests_GPT, run_demo_tests_Palm , TestOptionsGPT, TestOptionsPalm, check_if_usable, run_api_tests from vouchervision.vouchervision_main import voucher_vision, voucher_vision_OCR_test from vouchervision.general_utils import test_GPU, get_cfg_from_full_path, summarize_expense_report, create_google_ocr_yaml_config, validate_dir
10,828
st.write("") if st.session_state.config['leafmachine']['project']['use_domain_knowledge']: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', '')) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False)) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', '')) else: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', ''), disabled=True) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False), disabled=True) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', ''), disabled=True) def render_expense_report_summary(): expense_summary = st.session_state.expense_summary expense_report = st.session_state.expense_report st.header('Expense Report Summary') if expense_summary: st.metric(label="Total Cost", value=f"${round(expense_summary['total_cost_sum'], 4):,}") col1, col2 = st.columns(2) # Run count and total costs with col1: st.metric(label="Run Count", value=expense_summary['run_count']) st.metric(label="Tokens In", value=f"{expense_summary['tokens_in_sum']:,}") # Token information with col2: st.metric(label="Total Images", value=expense_summary['n_images_sum']) st.metric(label="Tokens Out", value=f"{expense_summary['tokens_out_sum']:,}") # Calculate cost proportion per image for each API version st.subheader('Average Cost per Image by API Version') cost_labels = [] cost_values = [] total_images = 0 cost_per_image_dict = {} # Iterate through the expense report to accumulate costs and image counts for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] n_images = row['n_images'] total_images += n_images # Keep track of total images processed if api_version not in cost_per_image_dict: cost_per_image_dict[api_version] = {'total_cost': 0, 'n_images': 0} cost_per_image_dict[api_version]['total_cost'] += total_cost cost_per_image_dict[api_version]['n_images'] += n_images api_versions = list(cost_per_image_dict.keys()) colors = [COLORS_EXPENSE_REPORT[version] if version in COLORS_EXPENSE_REPORT else '#DDDDDD' for version in api_versions] # Calculate the cost per image for each API version for version, cost_data in cost_per_image_dict.items(): total_cost = cost_data['total_cost'] n_images = cost_data['n_images'] # Calculate the cost per image for this version cost_per_image = total_cost / n_images if n_images > 0 else 0 cost_labels.append(version) cost_values.append(cost_per_image) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_values, hole=.3)]) # Update traces for custom text in hoverinfo, displaying cost with a dollar sign and two decimal places cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${value:.2f}" for value in cost_values], # Formats the cost as a string with a dollar sign and two decimals textinfo='percent+label', hoverinfo='label+percent+text' # Adds custom text (formatted cost) to the hover information ) st.plotly_chart(cost_pie_chart, use_container_width=True) st.subheader('Proportion of Total Cost by API Version') cost_labels = [] cost_proportions = [] total_cost_by_version = {} # Sum the total cost for each API version for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] if api_version not in total_cost_by_version: total_cost_by_version[api_version] = 0 total_cost_by_version[api_version] += total_cost # Calculate the combined total cost for all versions combined_total_cost = sum(total_cost_by_version.values()) # Calculate the proportion of total cost for each API version for version, total_cost in total_cost_by_version.items(): proportion = (total_cost / combined_total_cost) * 100 if combined_total_cost > 0 else 0 cost_labels.append(version) cost_proportions.append(proportion) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_proportions, hole=.3)]) # Update traces for custom text in hoverinfo cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${cost:.2f}" for cost in total_cost_by_version.values()], # This will format the cost to 2 decimal places textinfo='percent+label', hoverinfo='label+percent+text' # This tells Plotly to show the label, percent, and custom text (cost) on hover ) st.plotly_chart(cost_pie_chart, use_container_width=True) # API version usage percentages pie chart st.subheader('Runs by API Version') api_versions = list(expense_summary['api_version_percentages'].keys()) percentages = [expense_summary['api_version_percentages'][version] for version in api_versions] pie_chart = go.Figure(data=[go.Pie(labels=api_versions, values=percentages, hole=.3)]) pie_chart.update_layout(margin=dict(t=0, b=0, l=0, r=0)) pie_chart.update_traces(marker=dict(colors=colors),) st.plotly_chart(pie_chart, use_container_width=True) else: st.error('No expense report data available.') def sidebar_content(): if not os.path.exists(os.path.join(st.session_state.dir_home,'expense_report')): validate_dir(os.path.join(st.session_state.dir_home,'expense_report')) expense_report_path = os.path.join(st.session_state.dir_home, 'expense_report', 'expense_report.csv') if os.path.exists(expense_report_path): # File exists, proceed with summarization
PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE = ["Version 1","Version 1 PaLM 2"] COLORS_EXPENSE_REPORT = { 'GPT_4': '#8fff66', # Bright Green 'GPT_3_5': '#006400', # Dark Green 'PALM2': '#66a8ff' # blue } class ProgressReport: def __init__(self, overall_bar, batch_bar, text_overall, text_batch): self.overall_bar = overall_bar self.batch_bar = batch_bar self.text_overall = text_overall self.text_batch = text_batch self.current_overall_step = 0 self.total_overall_steps = 20 # number of major steps in machine function self.current_batch = 0 self.total_batches = 20 def update_overall(self, step_name=""): self.current_overall_step += 1 self.overall_bar.progress(self.current_overall_step / self.total_overall_steps) self.text_overall.text(step_name) def update_batch(self, step_name=""): self.current_batch += 1 self.batch_bar.progress(self.current_batch / self.total_batches) self.text_batch.text(step_name) def set_n_batches(self, n_batches): self.total_batches = n_batches def set_n_overall(self, total_overall_steps): self.current_overall_step = 0 self.overall_bar.progress(0) self.total_overall_steps = total_overall_steps def reset_batch(self, step_name): self.current_batch = 0 self.batch_bar.progress(0) self.text_batch.text(step_name) def reset_overall(self, step_name): self.current_overall_step = 0 self.overall_bar.progress(0) self.text_overall.text(step_name) def get_n_images(self): return self.n_images def get_n_overall(self): return self.total_overall_steps def does_private_file_exist(): dir_home = os.path.dirname(os.path.dirname(__file__)) path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') return os.path.exists(path_cfg_private) def setup_streamlit_config(dir_home): # Define the directory path and filename dir_path = os.path.join(dir_home, ".streamlit") file_path = os.path.join(dir_path, "config.toml") # Check if directory exists, if not create it if not os.path.exists(dir_path): os.makedirs(dir_path) # Create or modify the file with the provided content config_content = f""" [theme] base = "dark" primaryColor = "#00ff00" [server] enableStaticServing = false runOnSave = true port = 8524 """ with open(file_path, "w") as f: f.write(config_content.strip()) def display_scrollable_results(JSON_results, test_results, OPT2, OPT3): """ Display the results from JSON_results in a scrollable container. """ # Initialize the container con_results = st.empty() with con_results.container(): # Start the custom container for all the results results_html = """<div class='scrollable-results-container'>""" for idx, (test_name, _) in enumerate(sorted(test_results.items())): _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" if JSON_results[idx] is None: results_html += f"<p>None</p>" else: formatted_json = json.dumps(JSON_results[idx], indent=4, sort_keys=False) results_html += f"<pre>[{opt2_readable}] + [{opt3_readable}]<br/>{formatted_json}</pre>" # End the custom container results_html += """</div>""" # The CSS to make this container scrollable css = """ <style> .scrollable-results-container { overflow-y: auto; height: 600px; width: 100%; white-space: pre-wrap; # To wrap the content font-family: monospace; # To give the JSON a code-like appearance } </style> """ # Apply the CSS and then the results st.markdown(css, unsafe_allow_html=True) st.markdown(results_html, unsafe_allow_html=True) def refresh(): st.write('') def display_test_results(test_results, JSON_results, llm_version): if llm_version == 'gpt': OPT1, OPT2, OPT3 = TestOptionsGPT.get_options() elif llm_version == 'palm': OPT1, OPT2, OPT3 = TestOptionsPalm.get_options() else: raise widths = [1] * (len(OPT1) + 2) + [2] columns = st.columns(widths) with columns[0]: st.write("LeafMachine2") with columns[1]: st.write("Prompt") with columns[len(OPT1) + 2]: st.write("Scroll to See Last Transcription in Each Test") already_written = set() for test_name, result in sorted(test_results.items()): _, ind_opt1, _, _ = test_name.split('__') option_value = OPT1[int(ind_opt1.split('-')[1])] if option_value not in already_written: with columns[int(ind_opt1.split('-')[1]) + 2]: st.write(option_value) already_written.add(option_value) printed_options = set() with columns[-1]: display_scrollable_results(JSON_results, test_results, OPT2, OPT3) # Close the custom container st.write('</div>', unsafe_allow_html=True) for idx, (test_name, result) in enumerate(sorted(test_results.items())): _, ind_opt1, ind_opt2, ind_opt3 = test_name.split('__') opt2_readable = "Use LeafMachine2" if OPT2[int(ind_opt2.split('-')[1])] else "Don't use LeafMachine2" opt3_readable = f"{OPT3[int(ind_opt3.split('-')[1])]}" if (opt2_readable, opt3_readable) not in printed_options: with columns[0]: st.info(f"{opt2_readable}") st.write('---') with columns[1]: st.info(f"{opt3_readable}") st.write('---') printed_options.add((opt2_readable, opt3_readable)) with columns[int(ind_opt1.split('-')[1]) + 2]: if result: st.success(f"Test Passed") else: st.error(f"Test Failed") st.write('---') # success_count = sum(1 for result in test_results.values() if result) # failure_count = len(test_results) - success_count # proportional_rain("🥇", success_count, "💔", failure_count, font_size=72, falling_speed=5, animation_length="infinite") rain_emojis(test_results) def add_emoji_delay(): time.sleep(0.3) def rain_emojis(test_results): # test_results = { # 'test1': True, # Test passed # 'test2': True, # Test passed # 'test3': True, # Test passed # 'test4': False, # Test failed # 'test5': False, # Test failed # 'test6': False, # Test failed # 'test7': False, # Test failed # 'test8': False, # Test failed # 'test9': False, # Test failed # 'test10': False, # Test failed # } success_emojis = ["🥇", "🏆", "🍾", "🙌"] failure_emojis = ["💔", "😭"] success_count = sum(1 for result in test_results.values() if result) failure_count = len(test_results) - success_count chosen_emoji = random.choice(success_emojis) for _ in range(success_count): rain( emoji=chosen_emoji, font_size=72, falling_speed=4, animation_length=2, ) add_emoji_delay() chosen_emoji = random.choice(failure_emojis) for _ in range(failure_count): rain( emoji=chosen_emoji, font_size=72, falling_speed=5, animation_length=1, ) add_emoji_delay() def get_prompt_versions(LLM_version): yaml_files = [f for f in os.listdir(os.path.join(st.session_state.dir_home, 'custom_prompts')) if f.endswith('.yaml')] if LLM_version in ["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5"]: versions = ["Version 1", "Version 1 No Domain Knowledge", "Version 2"] return (versions + yaml_files, "Version 2") elif LLM_version in ["PaLM 2",]: versions = ["Version 1 PaLM 2", "Version 1 PaLM 2 No Domain Knowledge", "Version 2 PaLM 2"] return (versions + yaml_files, "Version 2 PaLM 2") else: # Handle other cases or raise an error return (yaml_files, None) def get_private_file(): dir_home = os.path.dirname(os.path.dirname(__file__)) path_cfg_private = os.path.join(dir_home, 'PRIVATE_DATA.yaml') return get_cfg_from_full_path(path_cfg_private) def create_space_saver(): st.subheader("Space Saving Options") col_ss_1, col_ss_2 = st.columns([2,2]) with col_ss_1: st.write("Several folders are created and populated with data during the VoucherVision transcription process.") st.write("Below are several options that will allow you to automatically delete temporary files that you may not need for everyday operations.") st.write("VoucherVision creates the following folders. Folders marked with a :star: are required if you want to use VoucherVisionEditor for quality control.") st.write("`../[Run Name]/Archival_Components`") st.write("`../[Run Name]/Config_File`") st.write("`../[Run Name]/Cropped_Images` :star:") st.write("`../[Run Name]/Logs`") st.write("`../[Run Name]/Original_Images` :star:") st.write("`../[Run Name]/Transcription` :star:") with col_ss_2: st.session_state.config['leafmachine']['project']['delete_temps_keep_VVE'] = st.checkbox("Delete Temporary Files (KEEP files required for VoucherVisionEditor)", st.session_state.config['leafmachine']['project'].get('delete_temps_keep_VVE', False)) st.session_state.config['leafmachine']['project']['delete_all_temps'] = st.checkbox("Keep only the final transcription file", st.session_state.config['leafmachine']['project'].get('delete_all_temps', False),help="*WARNING:* This limits your ability to do quality assurance. This will delete all folders created by VoucherVision, leaving only the `transcription.xlsx` file.") # def create_private_file(): # st.session_state.proceed_to_main = False # if st.session_state.private_file: # cfg_private = get_private_file() # create_private_file_0(cfg_private) # else: # st.title("VoucherVision") # create_private_file_0() def create_private_file(): st.session_state.proceed_to_main = False st.title("VoucherVision") col_private,_= st.columns([12,2]) if st.session_state.private_file: cfg_private = get_private_file() else: cfg_private = {} cfg_private['openai'] = {} cfg_private['openai']['OPENAI_API_KEY'] ='' cfg_private['openai_azure'] = {} cfg_private['openai_azure']['openai_api_key'] = '' cfg_private['openai_azure']['api_version'] = '' cfg_private['openai_azure']['openai_api_base'] ='' cfg_private['openai_azure']['openai_organization'] ='' cfg_private['openai_azure']['openai_api_type'] ='' cfg_private['google_cloud'] = {} cfg_private['google_cloud']['path_json_file'] ='' cfg_private['google_palm'] = {} cfg_private['google_palm']['google_palm_api'] ='' with col_private: st.header("Set API keys") st.info("***Note:*** There is a known bug with tabs in Streamlit. If you update an input field it may take you back to the 'Project Settings' tab. Changes that you made are saved, it's just an annoying glitch. We are aware of this issue and will fix it as soon as we can.") st.warning("To commit changes to API keys you must press the 'Set API Keys' button at the bottom of the page.") st.write("Before using VoucherVision you must set your API keys. All keys are stored locally on your computer and are never made public.") st.write("API keys are stored in `../VoucherVision/PRIVATE_DATA.yaml`.") st.write("Deleting this file will allow you to reset API keys. Alternatively, you can edit the keys in the user interface.") st.write("Leave keys blank if you do not intend to use that service.") st.write("---") st.subheader("Google Vision (*Required*)") st.markdown("VoucherVision currently uses [Google Vision API](https://cloud.google.com/vision/docs/ocr) for OCR. Generating an API key for this is more involved than the others. [Please carefully follow the instructions outlined here to create and setup your account.](https://cloud.google.com/vision/docs/setup) ") st.markdown(""" Once your account is created, [visit this page](https://console.cloud.google.com) and create a project. Then follow these instructions: - **Select your Project**: If you have multiple projects, ensure you select the one where you've enabled the Vision API. - **Open the Navigation Menu**: Click on the hamburger menu (three horizontal lines) in the top left corner. - **Go to IAM & Admin**: In the navigation pane, hover over "IAM & Admin" and then click on "Service accounts." - **Locate Your Service Account**: Find the service account for which you wish to download the JSON key. If you haven't created a service account yet, you'll need to do so by clicking the "CREATE SERVICE ACCOUNT" button at the top. - **Download the JSON Key**: - Click on the three dots (actions menu) on the right side of your service account name. - Select "Manage keys." - In the pop-up window, click on the "ADD KEY" button and select "JSON." - The JSON key file will automatically be downloaded to your computer. - **Store Safely**: This file contains sensitive data that can be used to authenticate and bill your Google Cloud account. Never commit it to public repositories or expose it in any way. Always keep it safe and secure. """) with st.container(): c_in_ocr, c_button_ocr = st.columns([10,2]) with c_in_ocr: google_vision = st.text_input(label = 'Full path to Google Cloud JSON API key file', value = cfg_private['google_cloud'].get('path_json_file', ''), placeholder = 'e.g. C:/Documents/Secret_Files/google_API/application_default_credentials.json', help ="This API Key is in the form of a JSON file. Please save the JSON file in a safe directory. DO NOT store the JSON key inside of the VoucherVision directory.", type='password',key='924857298734590283750932809238') with c_button_ocr: st.empty() st.write("---") st.subheader("OpenAI") st.markdown("API key for first-party OpenAI API. Create an account with OpenAI [here](https://platform.openai.com/signup), then create an API key [here](https://platform.openai.com/account/api-keys).") with st.container(): c_in_openai, c_button_openai = st.columns([10,2]) with c_in_openai: openai_api_key = st.text_input("openai_api_key", cfg_private['openai'].get('OPENAI_API_KEY', ''), help='The actual API key. Likely to be a string of 2 character, a dash, and then a 48-character string: sk-XXXXXXXX...', placeholder = 'e.g. sk-XXXXXXXX...', type='password') with c_button_openai: st.empty() st.write("---") st.subheader("OpenAI - Azure") st.markdown("This version OpenAI relies on Azure servers directly as is intended for private enterprise instances of OpenAI's services, such as [UM-GPT](https://its.umich.edu/computing/ai). Administrators will provide you with the following information.") azure_openai_api_version = st.text_input("azure_openai_api_version", cfg_private['openai_azure'].get('api_version', ''), help='API Version e.g. "2023-05-15"', placeholder = 'e.g. 2023-05-15', type='password') azure_openai_api_key = st.text_input("azure_openai_api_key", cfg_private['openai_azure'].get('openai_api_key', ''), help='The actual API key. Likely to be a 32-character string', placeholder = 'e.g. 12333333333333333333333333333332', type='password') azure_openai_api_base = st.text_input("azure_openai_api_base", cfg_private['openai_azure'].get('openai_api_base', ''), help='The base url for the API e.g. "https://api.umgpt.umich.edu/azure-openai-api"', placeholder = 'e.g. https://api.umgpt.umich.edu/azure-openai-api', type='password') azure_openai_organization = st.text_input("azure_openai_organization", cfg_private['openai_azure'].get('openai_organization', ''), help='Your organization code. Likely a short string', placeholder = 'e.g. 123456', type='password') azure_openai_api_type = st.text_input("azure_openai_api_type", cfg_private['openai_azure'].get('openai_api_type', ''), help='The API type. Typically "azure"', placeholder = 'e.g. azure', type='password') with st.container(): c_in_azure, c_button_azure = st.columns([10,2]) with c_button_azure: st.empty() st.write("---") st.subheader("Google PaLM 2") st.markdown('Follow these [instructions](https://developers.generativeai.google/tutorials/setup) to generate an API key for PaLM 2. You may need to also activate an account with [MakerSuite](https://makersuite.google.com/app/apikey) and enable "early access."') with st.container(): c_in_palm, c_button_palm = st.columns([10,2]) with c_in_palm: google_palm = st.text_input("Google PaLM 2 API Key", cfg_private['google_palm'].get('google_palm_api', ''), help='The MakerSuite API key e.g. a 32-character string', placeholder='e.g. SATgthsykuE64FgrrrrEervr3S4455t_geyDeGq', type='password') with st.container(): with c_button_ocr: st.write("##") st.button("Test OCR", on_click=test_API, args=['google_vision',c_in_ocr, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_openai: st.write("##") st.button("Test OpenAI", on_click=test_API, args=['openai',c_in_openai, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_azure: st.write("##") st.button("Test Azure OpenAI", on_click=test_API, args=['azure_openai',c_in_azure, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) with st.container(): with c_button_palm: st.write("##") st.button("Test PaLM 2", on_click=test_API, args=['palm',c_in_palm, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) st.button("Set API Keys",type='primary', on_click=save_changes_to_API_keys, args=[cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm]) if st.button('Proceed to VoucherVision'): st.session_state.proceed_to_private = False st.session_state.proceed_to_main = True def test_API(api, message_loc, cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm): # Save the API keys save_changes_to_API_keys(cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key,azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm) with st.spinner('Performing validation checks...'): if api == 'google_vision': print("*** Google Vision OCR API Key ***") try: demo_config_path = os.path.join(st.session_state.dir_home,'demo','validation_configs','google_vision_ocr_test.yaml') demo_images_path = os.path.join(st.session_state.dir_home, 'demo', 'demo_images') demo_out_path = os.path.join(st.session_state.dir_home, 'demo', 'demo_output','run_name') create_google_ocr_yaml_config(demo_config_path, demo_images_path, demo_out_path) voucher_vision_OCR_test(demo_config_path, st.session_state.dir_home, None, demo_images_path) with message_loc: st.success("Google Vision OCR API Key Valid :white_check_mark:") return True except Exception as e: with message_loc: st.error(f"Google Vision OCR API Key Failed! {e}") return False elif api == 'openai': print("*** OpenAI API Key ***") try: if run_api_tests('openai'): with message_loc: st.success("OpenAI API Key Valid :white_check_mark:") else: with message_loc: st.error("OpenAI API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"OpenAI API Key Failed:exclamation: {e}") elif api == 'azure_openai': print("*** Azure OpenAI API Key ***") try: if run_api_tests('azure_openai'): with message_loc: st.success("Azure OpenAI API Key Valid :white_check_mark:") else: with message_loc: st.error(f"Azure OpenAI API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"Azure OpenAI API Key Failed:exclamation: {e}") elif api == 'palm': print("*** Google PaLM 2 API Key ***") try: if run_api_tests('palm'): with message_loc: st.success("Google PaLM 2 API Key Valid :white_check_mark:") else: with message_loc: st.error("Google PaLM 2 API Key Failed:exclamation:") return False except Exception as e: with message_loc: st.error(f"Google PaLM 2 API Key Failed:exclamation: {e}") def save_changes_to_API_keys(cfg_private,openai_api_key,azure_openai_api_version,azure_openai_api_key, azure_openai_api_base,azure_openai_organization,azure_openai_api_type,google_vision,google_palm): # Update the configuration dictionary with the new values cfg_private['openai']['OPENAI_API_KEY'] = openai_api_key cfg_private['openai_azure']['api_version'] = azure_openai_api_version cfg_private['openai_azure']['openai_api_key'] = azure_openai_api_key cfg_private['openai_azure']['openai_api_base'] = azure_openai_api_base cfg_private['openai_azure']['openai_organization'] = azure_openai_organization cfg_private['openai_azure']['openai_api_type'] = azure_openai_api_type cfg_private['google_cloud']['path_json_file'] = google_vision cfg_private['google_palm']['google_palm_api'] = google_palm # Call the function to write the updated configuration to the YAML file write_config_file(cfg_private, st.session_state.dir_home, filename="PRIVATE_DATA.yaml") st.session_state.private_file = does_private_file_exist() # Function to load a YAML file and update session_state def load_prompt_yaml(filename): with open(filename, 'r') as file: st.session_state['prompt_info'] = yaml.safe_load(file) st.session_state['prompt_author'] = st.session_state['prompt_info'].get('prompt_author', st.session_state['default_prompt_author']) st.session_state['prompt_author_institution'] = st.session_state['prompt_info'].get('prompt_author_institution', st.session_state['default_prompt_author_institution']) st.session_state['prompt_description'] = st.session_state['prompt_info'].get('prompt_description', st.session_state['default_prompt_description']) st.session_state['instructions'] = st.session_state['prompt_info'].get('instructions', st.session_state['default_instructions']) st.session_state['json_formatting_instructions'] = st.session_state['prompt_info'].get('json_formatting_instructions', st.session_state['default_json_formatting_instructions'] ) st.session_state['rules'] = st.session_state['prompt_info'].get('rules', {}) st.session_state['mapping'] = st.session_state['prompt_info'].get('mapping', {}) st.session_state['LLM'] = st.session_state['prompt_info'].get('LLM', 'gpt') # Placeholder: st.session_state['assigned_columns'] = list(chain.from_iterable(st.session_state['mapping'].values())) def save_prompt_yaml(filename): yaml_content = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'LLM': st.session_state['LLM'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], } dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') filepath = os.path.join(dir_prompt, f"{filename}.yaml") with open(filepath, 'w') as file: yaml.safe_dump(dict(yaml_content), file, sort_keys=False) st.success(f"Prompt saved as '{filename}.yaml'.") def check_unique_mapping_assignments(): if len(st.session_state['assigned_columns']) != len(set(st.session_state['assigned_columns'])): st.error("Each column name must be assigned to only one category.") return False else: st.success("Mapping confirmed.") return True def check_prompt_yaml_filename(fname): # Check if the filename only contains letters, numbers, underscores, and dashes pattern = r'^[\w-]+$' # The \w matches any alphanumeric character and is equivalent to the character class [a-zA-Z0-9_]. # The hyphen - is literally matched. if re.match(pattern, fname): return True else: return False def btn_load_prompt(selected_yaml_file, dir_prompt): if selected_yaml_file: yaml_file_path = os.path.join(dir_prompt, selected_yaml_file) load_prompt_yaml(yaml_file_path) elif not selected_yaml_file: # Directly assigning default values since no file is selected st.session_state['prompt_info'] = {} st.session_state['prompt_author'] = st.session_state['default_prompt_author'] st.session_state['prompt_author_institution'] = st.session_state['default_prompt_author_institution'] st.session_state['prompt_description'] = st.session_state['default_prompt_description'] st.session_state['instructions'] = st.session_state['default_instructions'] st.session_state['json_formatting_instructions'] = st.session_state['default_json_formatting_instructions'] st.session_state['rules'] = {} st.session_state['LLM'] = 'gpt' st.session_state['assigned_columns'] = [] st.session_state['prompt_info'] = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], 'LLM': st.session_state['LLM'] } def build_LLM_prompt_config(): st.session_state['assigned_columns'] = [] st.session_state['default_prompt_author'] = 'unknown' st.session_state['default_prompt_author_institution'] = 'unknown' st.session_state['default_prompt_description'] = 'unknown' st.session_state['default_instructions'] = """1. Refactor the unstructured OCR text into a dictionary based on the JSON structure outlined below. 2. You should map the unstructured OCR text to the appropriate JSON key and then populate the field based on its rules. 3. Some JSON key fields are permitted to remain empty if the corresponding information is not found in the unstructured OCR text. 4. Ignore any information in the OCR text that doesn't fit into the defined JSON structure. 5. Duplicate dictionary fields are not allowed. 6. Ensure that all JSON keys are in lowercase. 7. Ensure that new JSON field values follow sentence case capitalization. 8. Ensure all key-value pairs in the JSON dictionary strictly adhere to the format and data types specified in the template. 9. Ensure the output JSON string is valid JSON format. It should not have trailing commas or unquoted keys. 10. Only return a JSON dictionary represented as a string. You should not explain your answer.""" st.session_state['default_json_formatting_instructions'] = """The next section of instructions outlines how to format the JSON dictionary. The keys are the same as those of the final formatted JSON object. For each key there is a format requirement that specifies how to transcribe the information for that key. The possible formatting options are: 1. "verbatim transcription" - field is populated with verbatim text from the unformatted OCR. 2. "spell check transcription" - field is populated with spelling corrected text from the unformatted OCR. 3. "boolean yes no" - field is populated with only yes or no. 4. "boolean 1 0" - field is populated with only 1 or 0. 5. "integer" - field is populated with only an integer. 6. "[list]" - field is populated from one of the values in the list. 7. "yyyy-mm-dd" - field is populated with a date in the format year-month-day. The desired null value is also given. Populate the field with the null value of the information for that key is not present in the unformatted OCR text.""" # Start building the Streamlit app col_prompt_main_left, ___, col_prompt_main_right = st.columns([6,1,3]) with col_prompt_main_left: st.title("Custom LLM Prompt Builder") st.subheader('About') st.write("This form allows you to craft a prompt for your specific task.") st.subheader('How it works') st.write("1. Edit this page until you are happy with your instructions. We recommend looking at the basic structure, writing down your prompt inforamtion in a Word document so that it does not randomly disappear, and then copying and pasting that info into this form once your whole prompt structure is defined.") st.write("2. After you enter all of your prompt instructions, click 'Save' and give your file a name.") st.write("3. This file will be saved as a yaml configuration file in the `..VoucherVision/custom_prompts` folder.") st.write("4. When you go back the main VoucherVision page you will now see your custom prompt available in the 'Prompt Version' dropdown menu.") st.write("5. Select your custom prompt. Note, your prompt will only be available for the LLM that you set when filling out the form below.") dir_prompt = os.path.join(st.session_state.dir_home, 'custom_prompts') yaml_files = [f for f in os.listdir(dir_prompt) if f.endswith('.yaml')] col_load_text, col_load_btn = st.columns([8,2]) with col_load_text: # Dropdown for selecting a YAML file selected_yaml_file = st.selectbox('Select a prompt YAML file to load:', [''] + yaml_files) with col_load_btn: st.write('##') # Button to load the selected prompt st.button('Load Prompt', on_click=btn_load_prompt, args=[selected_yaml_file, dir_prompt]) # Prompt Author Information st.header("Prompt Author Information") st.write("We value community contributions! Please provide your name(s) (or pseudonym if you prefer) for credit. If you leave this field blank, it will say 'unknown'.") st.session_state['prompt_author'] = st.text_input("Enter names of prompt author(s)", value=st.session_state['default_prompt_author']) st.write("Please provide your institution name. If you leave this field blank, it will say 'unknown'.") st.session_state['prompt_author_institution'] = st.text_input("Enter name of institution", value=st.session_state['default_prompt_author_institution']) st.write("Please provide a description of your prompt and its intended task. Is it designed for a specific collection? Taxa? Database structure?") st.session_state['prompt_description'] = st.text_input("Enter description of prompt", value=st.session_state['default_prompt_description']) st.write('---') st.header("Set LLM Model Type") # Define the options for the dropdown llm_options = ['gpt', 'palm'] # Create the dropdown and set the value to session_state['LLM'] st.write("Which LLM is this prompt designed for? This will not restrict its use to a specific LLM, but some prompts will behave in different ways across models.") st.write("For example, VoucherVision will automatically add multiple JSON formatting blocks to all PaLM 2 prompts to coax PaLM 2 to return a valid JSON object.") st.session_state['LLM'] = st.selectbox('Set LLM', llm_options, index=llm_options.index(st.session_state.get('LLM', 'gpt'))) st.write('---') # Instructions Section st.header("Instructions") st.write("These are the general instructions that guide the LLM through the transcription task. We recommend using the default instructions unless you have a specific reason to change them.") st.session_state['instructions'] = st.text_area("Enter instructions", value=st.session_state['default_instructions'].strip(), height=350, disabled=True) st.write('---') # Column Instructions Section st.header("JSON Formatting Instructions") st.write("The following section tells the LLM how we want to structure the JSON dictionary. We do not recommend changing this section because it would likely result in unstable and inconsistent behavior.") st.session_state['json_formatting_instructions'] = st.text_area("Enter column instructions", value=st.session_state['default_json_formatting_instructions'], height=350, disabled=True) st.write('---') col_left, col_right = st.columns([6,4]) with col_left: st.subheader('Add/Edit Columns') # Initialize rules in session state if not already present if 'rules' not in st.session_state or not st.session_state['rules']: st.session_state['rules']['Dictionary'] = { "catalog_number": { "format": "verbatim transcription", "null_value": "", "description": "The barcode identifier, typically a number with at least 6 digits, but fewer than 30 digits." } } st.session_state['rules']['SpeciesName'] = { "taxonomy": ["Genus_species"] } # Layout for adding a new column name # col_text, col_textbtn = st.columns([8, 2]) # with col_text: new_column_name = st.text_input("Enter a new column name:") # with col_textbtn: # st.write('##') if st.button("Add New Column") and new_column_name: if new_column_name not in st.session_state['rules']['Dictionary']: st.session_state['rules']['Dictionary'][new_column_name] = {"format": "", "null_value": "", "description": ""} st.success(f"New column '{new_column_name}' added. Now you can edit its properties.") else: st.error("Column name already exists. Please enter a unique column name.") # Get columns excluding the protected "catalog_number" st.write('#') editable_columns = [col for col in st.session_state['rules']['Dictionary'] if col != "catalog_number"] column_name = st.selectbox("Select a column to edit:", [""] + editable_columns) # Handle rules editing current_rule = st.session_state['rules']['Dictionary'].get(column_name, { "format": "", "null_value": "", "description": "" }) if 'selected_column' not in st.session_state: st.session_state['selected_column'] = column_name # Form for input fields with st.form(key='rule_form'): format_options = ["verbatim transcription", "spell check transcription", "boolean yes no", "boolean 1 0", "integer", "[list]", "yyyy-mm-dd"] current_rule["format"] = st.selectbox("Format:", format_options, index=format_options.index(current_rule["format"]) if current_rule["format"] else 0) current_rule["null_value"] = st.text_input("Null value:", value=current_rule["null_value"]) current_rule["description"] = st.text_area("Description:", value=current_rule["description"]) commit_button = st.form_submit_button("Commit Column") default_rule = { "format": format_options[0], # default format "null_value": "", # default null value "description": "", # default description } if st.session_state['selected_column'] != column_name: # Column has changed. Update the session_state selected column. st.session_state['selected_column'] = column_name # Reset the current rule to the default for this new column, or a blank rule if not set. current_rule = st.session_state['rules']['Dictionary'].get(column_name, default_rule.copy()) # Handle commit action if commit_button and column_name: # Commit the rules to the session state. st.session_state['rules']['Dictionary'][column_name] = current_rule.copy() st.success(f"Column '{column_name}' added/updated in rules.") # Force the form to reset by clearing the fields from the session state st.session_state.pop('selected_column', None) # Clear the selected column to force reset # st.session_state['rules'][column_name] = current_rule # st.success(f"Column '{column_name}' added/updated in rules.") # # Reset current_rule to default values for the next input # current_rule["format"] = default_rule["format"] # current_rule["null_value"] = default_rule["null_value"] # current_rule["description"] = default_rule["description"] # # To ensure that the form fields are reset, we can clear them from the session state # for key in current_rule.keys(): # st.session_state[key] = default_rule[key] # Layout for removing an existing column # del_col, del_colbtn = st.columns([8, 2]) # with del_col: delete_column_name = st.selectbox("Select a column to delete:", [""] + editable_columns, key='delete_column') # with del_colbtn: # st.write('##') if st.button("Delete Column") and delete_column_name: del st.session_state['rules'][delete_column_name] st.success(f"Column '{delete_column_name}' removed from rules.") with col_right: # Display the current state of the JSON rules st.subheader('Formatted Columns') st.json(st.session_state['rules']['Dictionary']) # st.subheader('All Prompt Info') # st.json(st.session_state['prompt_info']) st.write('---') col_left_mapping, col_right_mapping = st.columns([6,4]) with col_left_mapping: st.header("Mapping") st.write("Assign each column name to a single category.") st.session_state['refresh_mapping'] = False # Dynamically create a list of all column names that can be assigned # This assumes that the column names are the keys in the dictionary under 'rules' all_column_names = list(st.session_state['rules']['Dictionary'].keys()) categories = ['TAXONOMY', 'GEOGRAPHY', 'LOCALITY', 'COLLECTING', 'MISCELLANEOUS'] if ('mapping' not in st.session_state) or (st.session_state['mapping'] == {}): st.session_state['mapping'] = {category: [] for category in categories} for category in categories: # Filter out the already assigned columns available_columns = [col for col in all_column_names if col not in st.session_state['assigned_columns'] or col in st.session_state['mapping'].get(category, [])] # Ensure the current mapping is a subset of the available options current_mapping = [col for col in st.session_state['mapping'].get(category, []) if col in available_columns] # Provide a safe default if the current mapping is empty or contains invalid options safe_default = current_mapping if all(col in available_columns for col in current_mapping) else [] # Create a multi-select widget for the category with a safe default selected_columns = st.multiselect( f"Select columns for {category}:", available_columns, default=safe_default, key=f"mapping_{category}" ) # Update the assigned_columns based on the selections for col in current_mapping: if col not in selected_columns and col in st.session_state['assigned_columns']: st.session_state['assigned_columns'].remove(col) st.session_state['refresh_mapping'] = True for col in selected_columns: if col not in st.session_state['assigned_columns']: st.session_state['assigned_columns'].append(col) st.session_state['refresh_mapping'] = True # Update the mapping in session state when there's a change st.session_state['mapping'][category] = selected_columns if st.session_state['refresh_mapping']: st.session_state['refresh_mapping'] = False # Button to confirm and save the mapping configuration if st.button('Confirm Mapping'): if check_unique_mapping_assignments(): # Proceed with further actions since the mapping is confirmed and unique pass with col_right_mapping: # Display the current state of the JSON rules st.subheader('Formatted Column Maps') st.json(st.session_state['mapping']) col_left_save, col_right_save = st.columns([6,4]) with col_left_save: # Input for new file name new_filename = st.text_input("Enter filename to save your prompt as a configuration YAML:",placeholder='my_prompt_name') # Button to save the new YAML file if st.button('Save YAML', type='primary'): if new_filename: if check_unique_mapping_assignments(): if check_prompt_yaml_filename(new_filename): save_prompt_yaml(new_filename) else: st.error("File name can only contain letters, numbers, underscores, and dashes. Cannot contain spaces.") else: st.error("Mapping contains an error. Make sure that each column is assigned to only ***one*** category.") else: st.error("Please enter a filename.") if st.button('Exit'): st.session_state.proceed_to_build_llm_prompt = False st.session_state.proceed_to_main = True st.rerun() with col_prompt_main_right: st.subheader('All Prompt Components') st.session_state['prompt_info'] = { 'prompt_author': st.session_state['prompt_author'], 'prompt_author_institution': st.session_state['prompt_author_institution'], 'prompt_description': st.session_state['prompt_description'], 'LLM': st.session_state['LLM'], 'instructions': st.session_state['instructions'], 'json_formatting_instructions': st.session_state['json_formatting_instructions'], 'rules': st.session_state['rules'], 'mapping': st.session_state['mapping'], } st.json(st.session_state['prompt_info']) def show_header_welcome(): st.session_state.logo_path = os.path.join(st.session_state.dir_home, 'img','logo.png') st.session_state.logo = Image.open(st.session_state.logo_path) st.image(st.session_state.logo, width=250) def determine_n_images(): try: # Check if 'dir_uploaded_images' key exists and it is not empty if 'dir_uploaded_images' in st and st['dir_uploaded_images']: dir_path = st['dir_uploaded_images'] # This would be the path to the directory return len([f for f in os.listdir(dir_path) if os.path.isfile(os.path.join(dir_path, f))]) else: return None except: return None def content_header(): col_run_1, col_run_2, col_run_3 = st.columns([4,4,2]) col_test = st.container() st.write("") st.write("") st.write("") st.write("") st.subheader("Overall Progress") col_run_info_1 = st.columns([1])[0] st.write("") st.write("") st.write("") st.write("") st.header("Configuration Settings") with col_run_info_1: # Progress # Progress # st.subheader('Project') # bar = st.progress(0) # new_text = st.empty() # Placeholder for current step name # progress_report = ProgressReportVV(bar, new_text, n_images=10) # Progress overall_progress_bar = st.progress(0) text_overall = st.empty() # Placeholder for current step name st.subheader('Transcription Progress') batch_progress_bar = st.progress(0) text_batch = st.empty() # Placeholder for current step name progress_report = ProgressReport(overall_progress_bar, batch_progress_bar, text_overall, text_batch) st.info("***Note:*** There is a known bug with tabs in Streamlit. If you update an input field it may take you back to the 'Project Settings' tab. Changes that you made are saved, it's just an annoying glitch. We are aware of this issue and will fix it as soon as we can.") st.write("If you use VoucherVision frequently, you can change the default values that are auto-populated in the form below. In a text editor or IDE, edit the first few rows in the file `../VoucherVision/vouchervision/VoucherVision_Config_Builder.py`") with col_run_1: show_header_welcome() st.subheader('Run VoucherVision') N_STEPS = 6 if determine_n_images(): st.session_state['processing_add_on'] = f" {determine_n_images()} Images" else: st.session_state['processing_add_on'] = '' if check_if_usable(): if st.button(f"Start Processing{st.session_state['processing_add_on']}", type='primary'): # Define number of overall steps progress_report.set_n_overall(N_STEPS) progress_report.update_overall(f"Starting VoucherVision...") # First, write the config file. write_config_file(st.session_state.config, st.session_state.dir_home, filename="VoucherVision.yaml") path_custom_prompts = os.path.join(st.session_state.dir_home,'custom_prompts',st.session_state.config['leafmachine']['project']['prompt_version']) # Call the machine function. last_JSON_response, total_cost = voucher_vision(None, st.session_state.dir_home, path_custom_prompts, None, progress_report,path_api_cost=os.path.join(st.session_state.dir_home,'api_cost','api_cost.yaml'), is_real_run=True) if total_cost: st.success(f":money_with_wings: This run cost :heavy_dollar_sign:{total_cost:.4f}") # Format the JSON string for display. if last_JSON_response is None: st.markdown(f"Last JSON object in the batch: NONE") else: try: formatted_json = json.dumps(json.loads(last_JSON_response), indent=4, sort_keys=False) except: formatted_json = json.dumps(last_JSON_response, indent=4, sort_keys=False) st.markdown(f"Last JSON object in the batch:\n```\n{formatted_json}\n```") st.balloons() else: st.button("Start Processing", type='primary', disabled=True) st.error(":heavy_exclamation_mark: Required API keys not set. Please visit the 'API Keys' tab and set the Google Vision OCR API key and at least one LLM key.") st.button("Refresh", on_click=refresh) with col_run_2: if st.button("Test GPT"): progress_report.set_n_overall(TestOptionsGPT.get_length()) test_results, JSON_results = run_demo_tests_GPT(progress_report) with col_test: display_test_results(test_results, JSON_results, 'gpt') st.balloons() if st.button("Test PaLM2"): progress_report.set_n_overall(TestOptionsPalm.get_length()) test_results, JSON_results = run_demo_tests_Palm(progress_report) with col_test: display_test_results(test_results, JSON_results, 'palm') st.balloons() with col_run_3: st.subheader('Check GPU') if st.button("GPU"): success, info = test_GPU() if success: st.balloons() for message in info: st.success(message) else: for message in info: st.error(message) def content_tab_settings(): st.header('Project') col_project_1, col_project_2 = st.columns([4,2]) st.write("---") st.header('Input Images') col_local_1, col_local_2 = st.columns([4,2]) st.write("---") st.header('LeafMachine2 Label Collage') col_cropped_1, col_cropped_2 = st.columns([4,4]) st.write("---") st.header('OCR Overlay Image') col_ocr_1, col_ocr_2 = st.columns([4,4]) os.path.join(st.session_state.dir_home, ) ### Project with col_project_1: st.session_state.config['leafmachine']['project']['run_name'] = st.text_input("Run name", st.session_state.config['leafmachine']['project'].get('run_name', '')) st.session_state.config['leafmachine']['project']['dir_output'] = st.text_input("Output directory", st.session_state.config['leafmachine']['project'].get('dir_output', '')) ### Input Images Local with col_local_1: st.session_state.config['leafmachine']['project']['dir_images_local'] = st.text_input("Input images directory", st.session_state.config['leafmachine']['project'].get('dir_images_local', '')) st.session_state.config['leafmachine']['project']['continue_run_from_partial_xlsx'] = st.text_input("Continue run from partially completed project XLSX", st.session_state.config['leafmachine']['project'].get('continue_run_from_partial_xlsx', ''), disabled=True) st.write("---") st.subheader('LLM Version') st.markdown( """ ***Note:*** GPT-4 is 20x more expensive than GPT-3.5 """ ) st.session_state.config['leafmachine']['LLM_version'] = st.selectbox("LLM version", ["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"], index=["gpt-4-1106-preview", "GPT 4", "GPT 3.5", "Azure GPT 4", "Azure GPT 3.5", "PaLM 2"].index(st.session_state.config['leafmachine'].get('LLM_version', 'Azure GPT 4'))) st.write("---") st.subheader('Prompt Version') versions, default_version = get_prompt_versions(st.session_state.config['leafmachine']['LLM_version']) if versions: selected_version = st.session_state.config['leafmachine']['project'].get('prompt_version', default_version) if selected_version not in versions: selected_version = default_version st.session_state.config['leafmachine']['project']['prompt_version'] = st.selectbox("Prompt Version", versions, index=versions.index(selected_version)) with col_cropped_1: default_crops = st.session_state.config['leafmachine']['cropped_components'].get('save_cropped_annotations', ['leaf_whole']) st.write("Prior to transcription, use LeafMachine2 to crop all labels from input images to create label collages for each specimen image. (Requires GPU)") st.session_state.config['leafmachine']['use_RGB_label_images'] = st.checkbox("Use LeafMachine2 label collage for transcriptions", st.session_state.config['leafmachine'].get('use_RGB_label_images', False)) st.session_state.config['leafmachine']['cropped_components']['save_cropped_annotations'] = st.multiselect("Components to crop", ['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights', 'leaf_whole', 'leaf_partial', 'leaflet', 'seed_fruit_one', 'seed_fruit_many', 'flower_one', 'flower_many', 'bud','specimen','roots','wood'],default=default_crops) with col_cropped_2: ba = os.path.join(st.session_state.dir_home,'demo', 'ba','ba2.png') image = Image.open(ba) st.image(image, caption='LeafMachine2 Collage', output_format = "PNG") with col_ocr_1: st.write('This will plot bounding boxes around all text that Google Vision was able to detect. If there are no boxes around text, then the OCR failed, so that missing text will not be seen by the LLM when it is creating the JSON object. The created image will be viewable in the VoucherVisionEditor.') st.session_state.config['leafmachine']['do_create_OCR_helper_image'] = st.checkbox("Create image showing an overlay of the OCR detections", st.session_state.config['leafmachine'].get('do_create_OCR_helper_image', False)) with col_ocr_2: ocr = os.path.join(st.session_state.dir_home,'demo', 'ba','ocr.png') image_ocr = Image.open(ocr) st.image(image_ocr, caption='OCR Overlay Images', output_format = "PNG") def content_tab_component(): st.header('Archival Components') ACD_version = st.selectbox("Archival Component Detector (ACD) Version", ["Version 2.1", "Version 2.2"]) ACD_confidence_default = int(st.session_state.config['leafmachine']['archival_component_detector']['minimum_confidence_threshold'] * 100) ACD_confidence = st.number_input("ACD Confidence Threshold (%)", min_value=0, max_value=100,value=ACD_confidence_default) st.session_state.config['leafmachine']['archival_component_detector']['minimum_confidence_threshold'] = float(ACD_confidence/100) st.session_state.config['leafmachine']['archival_component_detector']['do_save_prediction_overlay_images'] = st.checkbox("Save Archival Prediction Overlay Images", st.session_state.config['leafmachine']['archival_component_detector'].get('do_save_prediction_overlay_images', True)) st.session_state.config['leafmachine']['archival_component_detector']['ignore_objects_for_overlay'] = st.multiselect("Hide Archival Components in Prediction Overlay Images", ['ruler', 'barcode','label', 'colorcard','map','envelope','photo','attached_item','weights',], default=[]) # Depending on the selected version, set the configuration if ACD_version == "Version 2.1": st.session_state.config['leafmachine']['archival_component_detector']['detector_type'] = 'Archival_Detector' st.session_state.config['leafmachine']['archival_component_detector']['detector_version'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_iteration'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_weights'] = 'best.pt' elif ACD_version == "Version 2.2": #TODO update this to version 2.2 st.session_state.config['leafmachine']['archival_component_detector']['detector_type'] = 'Archival_Detector' st.session_state.config['leafmachine']['archival_component_detector']['detector_version'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_iteration'] = 'PREP_final' st.session_state.config['leafmachine']['archival_component_detector']['detector_weights'] = 'best.pt' def content_tab_processing(): st.header('Processing Options') col_processing_1, col_processing_2 = st.columns([2,2,]) with col_processing_1: st.subheader('Compute Options') st.session_state.config['leafmachine']['project']['num_workers'] = st.number_input("Number of CPU workers", value=st.session_state.config['leafmachine']['project'].get('num_workers', 1), disabled=True) st.session_state.config['leafmachine']['project']['batch_size'] = st.number_input("Batch size", value=st.session_state.config['leafmachine']['project'].get('batch_size', 500), help='Sets the batch size for the LeafMachine2 cropping. If computer RAM is filled, lower this value to ~100.') with col_processing_2: st.subheader('Misc') st.session_state.config['leafmachine']['project']['prefix_removal'] = st.text_input("Remove prefix from catalog number", st.session_state.config['leafmachine']['project'].get('prefix_removal', '')) st.session_state.config['leafmachine']['project']['suffix_removal'] = st.text_input("Remove suffix from catalog number", st.session_state.config['leafmachine']['project'].get('suffix_removal', '')) st.session_state.config['leafmachine']['project']['catalog_numerical_only'] = st.checkbox("Require 'Catalog Number' to be numerical only", st.session_state.config['leafmachine']['project'].get('catalog_numerical_only', True)) ### Logging and Image Validation - col_v1 st.header('Logging and Image Validation') col_v1, col_v2 = st.columns(2) with col_v1: st.session_state.config['leafmachine']['do']['check_for_illegal_filenames'] = st.checkbox("Check for illegal filenames", st.session_state.config['leafmachine']['do'].get('check_for_illegal_filenames', True)) st.session_state.config['leafmachine']['do']['check_for_corrupt_images_make_vertical'] = st.checkbox("Check for corrupt images", st.session_state.config['leafmachine']['do'].get('check_for_corrupt_images_make_vertical', True)) st.session_state.config['leafmachine']['print']['verbose'] = st.checkbox("Print verbose", st.session_state.config['leafmachine']['print'].get('verbose', True)) st.session_state.config['leafmachine']['print']['optional_warnings'] = st.checkbox("Show optional warnings", st.session_state.config['leafmachine']['print'].get('optional_warnings', True)) with col_v2: log_level = st.session_state.config['leafmachine']['logging'].get('log_level', None) log_level_display = log_level if log_level is not None else 'default' selected_log_level = st.selectbox("Logging Level", ['default', 'DEBUG', 'INFO', 'WARNING', 'ERROR'], index=['default', 'DEBUG', 'INFO', 'WARNING', 'ERROR'].index(log_level_display)) if selected_log_level == 'default': st.session_state.config['leafmachine']['logging']['log_level'] = None else: st.session_state.config['leafmachine']['logging']['log_level'] = selected_log_level def content_tab_domain(): st.header('Embeddings Database') col_emb_1, col_emb_2 = st.columns([4,2]) with col_emb_1: st.markdown( """ VoucherVision includes the option of using domain knowledge inside of the dynamically generated prompts. The OCR text is queried against a database of existing label transcriptions. The most similar existing transcriptions act as an example of what the LLM should emulate and are shown to the LLM as JSON objects. VoucherVision uses cosine similarity search to return the most similar existing transcription. - Note: Using domain knowledge may increase the chance that foreign text is included in the final transcription - Disabling this feature will show the LLM multiple examples of an empty JSON skeleton structure instead - Enabling this option requires a GPU with at least 8GB of VRAM - The domain knowledge files can be located in the directory "../VoucherVision/domain_knowledge". On first run the embeddings database must be created, which takes time. If the database creation runs each time you use VoucherVision, then something is wrong. """ ) st.write(f"Domain Knowledge is only available for the following prompts:") for available_prompts in PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE: st.markdown(f"- {available_prompts}") if st.session_state.config['leafmachine']['project']['prompt_version'] in PROMPTS_THAT_NEED_DOMAIN_KNOWLEDGE: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", True, disabled=True) else: st.session_state.config['leafmachine']['project']['use_domain_knowledge'] = st.checkbox("Use domain knowledge", False, disabled=True) st.write("") if st.session_state.config['leafmachine']['project']['use_domain_knowledge']: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', '')) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False)) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', '')) else: st.session_state.config['leafmachine']['project']['embeddings_database_name'] = st.text_input("Embeddings database name (only use underscores)", st.session_state.config['leafmachine']['project'].get('embeddings_database_name', ''), disabled=True) st.session_state.config['leafmachine']['project']['build_new_embeddings_database'] = st.checkbox("Build *new* embeddings database", st.session_state.config['leafmachine']['project'].get('build_new_embeddings_database', False), disabled=True) st.session_state.config['leafmachine']['project']['path_to_domain_knowledge_xlsx'] = st.text_input("Path to domain knowledge CSV file (will be used to create new embeddings database)", st.session_state.config['leafmachine']['project'].get('path_to_domain_knowledge_xlsx', ''), disabled=True) def render_expense_report_summary(): expense_summary = st.session_state.expense_summary expense_report = st.session_state.expense_report st.header('Expense Report Summary') if expense_summary: st.metric(label="Total Cost", value=f"${round(expense_summary['total_cost_sum'], 4):,}") col1, col2 = st.columns(2) # Run count and total costs with col1: st.metric(label="Run Count", value=expense_summary['run_count']) st.metric(label="Tokens In", value=f"{expense_summary['tokens_in_sum']:,}") # Token information with col2: st.metric(label="Total Images", value=expense_summary['n_images_sum']) st.metric(label="Tokens Out", value=f"{expense_summary['tokens_out_sum']:,}") # Calculate cost proportion per image for each API version st.subheader('Average Cost per Image by API Version') cost_labels = [] cost_values = [] total_images = 0 cost_per_image_dict = {} # Iterate through the expense report to accumulate costs and image counts for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] n_images = row['n_images'] total_images += n_images # Keep track of total images processed if api_version not in cost_per_image_dict: cost_per_image_dict[api_version] = {'total_cost': 0, 'n_images': 0} cost_per_image_dict[api_version]['total_cost'] += total_cost cost_per_image_dict[api_version]['n_images'] += n_images api_versions = list(cost_per_image_dict.keys()) colors = [COLORS_EXPENSE_REPORT[version] if version in COLORS_EXPENSE_REPORT else '#DDDDDD' for version in api_versions] # Calculate the cost per image for each API version for version, cost_data in cost_per_image_dict.items(): total_cost = cost_data['total_cost'] n_images = cost_data['n_images'] # Calculate the cost per image for this version cost_per_image = total_cost / n_images if n_images > 0 else 0 cost_labels.append(version) cost_values.append(cost_per_image) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_values, hole=.3)]) # Update traces for custom text in hoverinfo, displaying cost with a dollar sign and two decimal places cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${value:.2f}" for value in cost_values], # Formats the cost as a string with a dollar sign and two decimals textinfo='percent+label', hoverinfo='label+percent+text' # Adds custom text (formatted cost) to the hover information ) st.plotly_chart(cost_pie_chart, use_container_width=True) st.subheader('Proportion of Total Cost by API Version') cost_labels = [] cost_proportions = [] total_cost_by_version = {} # Sum the total cost for each API version for index, row in expense_report.iterrows(): api_version = row['api_version'] total_cost = row['total_cost'] if api_version not in total_cost_by_version: total_cost_by_version[api_version] = 0 total_cost_by_version[api_version] += total_cost # Calculate the combined total cost for all versions combined_total_cost = sum(total_cost_by_version.values()) # Calculate the proportion of total cost for each API version for version, total_cost in total_cost_by_version.items(): proportion = (total_cost / combined_total_cost) * 100 if combined_total_cost > 0 else 0 cost_labels.append(version) cost_proportions.append(proportion) # Generate the pie chart cost_pie_chart = go.Figure(data=[go.Pie(labels=cost_labels, values=cost_proportions, hole=.3)]) # Update traces for custom text in hoverinfo cost_pie_chart.update_traces( marker=dict(colors=colors), text=[f"${cost:.2f}" for cost in total_cost_by_version.values()], # This will format the cost to 2 decimal places textinfo='percent+label', hoverinfo='label+percent+text' # This tells Plotly to show the label, percent, and custom text (cost) on hover ) st.plotly_chart(cost_pie_chart, use_container_width=True) # API version usage percentages pie chart st.subheader('Runs by API Version') api_versions = list(expense_summary['api_version_percentages'].keys()) percentages = [expense_summary['api_version_percentages'][version] for version in api_versions] pie_chart = go.Figure(data=[go.Pie(labels=api_versions, values=percentages, hole=.3)]) pie_chart.update_layout(margin=dict(t=0, b=0, l=0, r=0)) pie_chart.update_traces(marker=dict(colors=colors),) st.plotly_chart(pie_chart, use_container_width=True) else: st.error('No expense report data available.') def sidebar_content(): if not os.path.exists(os.path.join(st.session_state.dir_home,'expense_report')): validate_dir(os.path.join(st.session_state.dir_home,'expense_report')) expense_report_path = os.path.join(st.session_state.dir_home, 'expense_report', 'expense_report.csv') if os.path.exists(expense_report_path): # File exists, proceed with summarization
st.session_state.expense_summary, st.session_state.expense_report = summarize_expense_report(expense_report_path)
12
2023-10-30 23:25:20+00:00
16k
medsagou/massar-direction-sagoubot
main.py
[ { "identifier": "C_File", "path": "utilities/Class_Files.py", "snippet": "class C_File():\n #____________________________________________________________________________________________________________________________________________________________\n # Le constructeur d'une instance d'un fichier\...
import tkinter as tk import customtkinter import time import os import threading import logging import sys from tkinter import filedialog from PIL import Image from validate_email import validate_email from utilities import C_File, C_Dossier from dotenv import set_key, load_dotenv from absence_app import Read_Db from absence_app import Absence from Interaction_browser import Massar_Direction_Sagou
11,129
self.sideBar_logo = customtkinter.CTkLabel(self.sidebar_frame, text="", image=self.main_logo_image) self.sideBar_logo.grid(row=5, column=0, padx=20, pady=20) self.entry_default_bordercolor = customtkinter.CTkEntry(self).cget("border_color") # self.logo_label = customtkinter.CTkLabel(self.sidebar_frame, text="SagouBot", font=customtkinter.CTkFont(size=40, weight="bold")) # self.logo_label.grid(row=1, column=0, padx=20, pady=(20, 10)) self.generate_list_menu_button_event() # Console (Text area) self.console_text = customtkinter.CTkTextbox(self, height=200, width=400, fg_color="gray1") self.console_text.insert("0.0", "CONSOLE") self.console_text.insert(F"{len('CONSOLE')}.0", "--------" * 28) self.console_text.configure(state="disabled") self.console_text.grid(row=1, column=1, padx=(20, 20), pady=(5, 15), sticky="nsew") self.console_text.tag_config("error", foreground="red") self.console_text.tag_config("note", foreground="orange") self.console_text.tag_config("successes", foreground="blue") # self.generate_progress_bar() # Progress Bar # progress_bar = customtkinter.CTkProgressBar(self, mode='determinate') # progress_bar.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") # # Button to trigger updates # update_button = customtkinter.CTkButton(self, text="Start Processing", command=()) # update_button.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") def high_school_switch(self): state = self.high_school_options.get() options = [self.TCS, self.TCSF, self.TCLSH, self.BACSC, self.BACSH, self.BACSE, self.BACSVT, self.BACSH2] if state: for option in options: option.configure(state="normal") else: for option in options: option.configure(state="disabled") return def college_switch(self): state = self.college_options.get() if state: self.college_generale.configure(state="normal") self.college_aspeb.configure(state="normal") self.college_inter.configure(state="normal") else: self.college_generale.configure(state="disabled") self.college_aspeb.configure(state="disabled") self.college_inter.configure(state="disabled") def college_label_error(self): current_text = self.label_college.cget("text") self.label_college.configure(text=current_text.replace("*", "") + "*", text_color="red") return def high_school_label_eroor(self): current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_label_high_college(self): current_text1 = self.label_college.cget("text") current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", ""), text_color="gray90") self.label_college.configure(text=current_text1.replace("*", ""), text_color="gray90") def label_data_file_error(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", "") + "*", text_color="red") return def label_template_file_error(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_error1(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", ""), text_color="gray90") return def reset_error2(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", ""), text_color="gray90") return def directory_error(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text + "*", text_color="red") return def reset_error3(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text.replace("*", ""), text_color="gray90") return def go_to_review2(self): if self.email_entry.get() == "" or self.password_entry.get() == "" or not self.validate_path(self.entry_path_absence) or not self.check_terms_and_condition.get(): if self.email_entry.get() == "": self.error_label(self.label_email_entry) self.entry_error(self.email_entry) if len(self.password_entry.get()) < 8: self.error_label(self.label_password_entry) self.entry_error(self.password_entry) if not self.validate_path(self.entry_path_absence): self.error_label(self.label_absence_data_file) self.entry_error(self.entry_path_absence) if not self.check_terms_and_condition.get(): self.check_terms_and_condition.configure(border_color="red", text_color="red") self.error_label(self.label_terms) else:
# https://stackoverflow.com/questions/31836104/pyinstaller-and-onefile-how-to-include-an-image-in-the-exe-file def resource_path(relative_path): """ Get absolute path to resource, works for dev and for PyInstaller """ try: # PyInstaller creates a temp folder and stores path in _MEIPASS base_path = sys._MEIPASS2 except Exception: base_path = os.path.abspath(".") return os.path.join(base_path, relative_path) logging.basicConfig(filename='app.log', level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') customtkinter.set_appearance_mode("Dark") # Modes: "System" (standard), "Dark", "Light" customtkinter.set_default_color_theme("dark-blue") # Themes: "blue" (standard), "green", "dark-blue" dirPath = os.path.dirname(os.path.realpath(__file__)) class App(customtkinter.CTk): def __init__(self): super().__init__() self.tabview_generate_lists = None self.tabview_fill_bot= None self.generate_list_menu = None self.about_us_text = None self.fill_absence_menu = None self.try_again_generate = False self.try_again_fill = False self.progressbar_1 = None image_path = resource_path("images") self.main_logo_image = customtkinter.CTkImage( light_image=Image.open(os.path.join(image_path, "logo_black.png")), dark_image=Image.open(os.path.join(image_path, "logo_white.png")), size=(200,200)) self.about_us_image = customtkinter.CTkImage( light_image=Image.open(os.path.join(image_path, "logo_black.png")), dark_image=Image.open(os.path.join(image_path, "logo_white.png")), size=(150, 150)) # self.main_logo_photo = ImageTk.PhotoImage(self.main_logo_image) # configure window self.title("SagouBot Massar Direction") self.iconbitmap(resource_path("icon.ico")) self.geometry(f"{1100}x{580}") # configure grid layout (4x4) self.grid_columnconfigure(1, weight=1) self.grid_columnconfigure((2, 3), weight=0) self.grid_rowconfigure((0, 1, 2), weight=1) # create sidebar frame with widgets self.sidebar_frame = customtkinter.CTkFrame(self, width=200, corner_radius=0) self.sidebar_frame.grid(row=0, column=0, rowspan=4, sticky="nsew") self.sidebar_frame.grid_rowconfigure(5, weight=1) self.sidebar_frame.grid(row=0, column=0) self.sideBar_logo = customtkinter.CTkLabel(self.sidebar_frame, text="", image=self.main_logo_image) self.sideBar_logo.grid(row=5, column=0, padx=20, pady=20) self.entry_default_bordercolor = customtkinter.CTkEntry(self).cget("border_color") # self.logo_label = customtkinter.CTkLabel(self.sidebar_frame, text="SagouBot", font=customtkinter.CTkFont(size=40, weight="bold")) # self.logo_label.grid(row=1, column=0, padx=20, pady=(20, 10)) self.generate_list_menu_button_event() # Console (Text area) self.console_text = customtkinter.CTkTextbox(self, height=200, width=400, fg_color="gray1") self.console_text.insert("0.0", "CONSOLE") self.console_text.insert(F"{len('CONSOLE')}.0", "--------" * 28) self.console_text.configure(state="disabled") self.console_text.grid(row=1, column=1, padx=(20, 20), pady=(5, 15), sticky="nsew") self.console_text.tag_config("error", foreground="red") self.console_text.tag_config("note", foreground="orange") self.console_text.tag_config("successes", foreground="blue") # self.generate_progress_bar() # Progress Bar # progress_bar = customtkinter.CTkProgressBar(self, mode='determinate') # progress_bar.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") # # Button to trigger updates # update_button = customtkinter.CTkButton(self, text="Start Processing", command=()) # update_button.grid(row=1, column=1, padx=(20, 20), pady=(5, 0), sticky="nsew") def high_school_switch(self): state = self.high_school_options.get() options = [self.TCS, self.TCSF, self.TCLSH, self.BACSC, self.BACSH, self.BACSE, self.BACSVT, self.BACSH2] if state: for option in options: option.configure(state="normal") else: for option in options: option.configure(state="disabled") return def college_switch(self): state = self.college_options.get() if state: self.college_generale.configure(state="normal") self.college_aspeb.configure(state="normal") self.college_inter.configure(state="normal") else: self.college_generale.configure(state="disabled") self.college_aspeb.configure(state="disabled") self.college_inter.configure(state="disabled") def college_label_error(self): current_text = self.label_college.cget("text") self.label_college.configure(text=current_text.replace("*", "") + "*", text_color="red") return def high_school_label_eroor(self): current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_label_high_college(self): current_text1 = self.label_college.cget("text") current_text = self.label_high_school.cget("text") self.label_high_school.configure(text=current_text.replace("*", ""), text_color="gray90") self.label_college.configure(text=current_text1.replace("*", ""), text_color="gray90") def label_data_file_error(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", "") + "*", text_color="red") return def label_template_file_error(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", "") + "*", text_color="red") return def reset_error1(self): current_text = self.label_data_file.cget("text") self.label_data_file.configure(text=current_text.replace("*", ""), text_color="gray90") return def reset_error2(self): current_text = self.label_template_entry.cget("text") self.label_template_entry.configure(text=current_text.replace("*", ""), text_color="gray90") return def directory_error(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text + "*", text_color="red") return def reset_error3(self): current_text = self.label_output_folder.cget("text") self.label_output_folder.configure(text=current_text.replace("*", ""), text_color="gray90") return def go_to_review2(self): if self.email_entry.get() == "" or self.password_entry.get() == "" or not self.validate_path(self.entry_path_absence) or not self.check_terms_and_condition.get(): if self.email_entry.get() == "": self.error_label(self.label_email_entry) self.entry_error(self.email_entry) if len(self.password_entry.get()) < 8: self.error_label(self.label_password_entry) self.entry_error(self.password_entry) if not self.validate_path(self.entry_path_absence): self.error_label(self.label_absence_data_file) self.entry_error(self.entry_path_absence) if not self.check_terms_and_condition.get(): self.check_terms_and_condition.configure(border_color="red", text_color="red") self.error_label(self.label_terms) else:
paths = C_File(resource_path("db/paths.txt"))
0
2023-10-29 18:10:27+00:00
16k
hsma-programme/Teaching_DES_Concepts_Streamlit
pages/2_🛏️_Using_A_Simple_Resource.py
[ { "identifier": "add_logo", "path": "helper_functions.py", "snippet": "def add_logo():\n '''\n Add a logo at the top of the page navigation sidebar\n\n Approach written by blackary on\n https://discuss.streamlit.io/t/put-logo-and-title-above-on-top-of-page-navigation-in-sidebar-of-multipage-...
import asyncio import gc import pandas as pd import plotly.express as px import plotly.graph_objects as go import streamlit as st from helper_functions import add_logo, mermaid, center_running from model_classes import Scenario, multiple_replications from distribution_classes import Normal from output_animation_functions import reshape_for_animations, animate_activity_log
10,892
10, 300, step=5, value=120) with col2: consult_time = st.slider("⏱️ How long (in minutes) does a consultation take on average?", 5, 150, step=5, value=50) consult_time_sd = st.slider("🕔 🕣 How much (in minutes) does the time for a consultation usually vary by?", 5, 30, step=5, value=10) norm_dist = Normal(consult_time, consult_time_sd, random_seed=seed) norm_fig = px.histogram(norm_dist.sample(size=2500), height=150) norm_fig.update_layout(yaxis_title="", xaxis_title="Consultation Time<br>(Minutes)") norm_fig.update_xaxes(tick0=0, dtick=10, range=[0, # max(norm_dist.sample(size=2500)) 240 ]) norm_fig.layout.update(showlegend=False, margin=dict(l=0, r=0, t=0, b=0)) st.markdown("#### Consultation Time Distribution") st.plotly_chart(norm_fig, use_container_width=True, config = {'displayModeBar': False}) # A user must press a streamlit button to run the model button_run_pressed = st.button("Run simulation") if button_run_pressed: # add a spinner and then display success box with st.spinner('Simulating the minor injuries unit...'): args = Scenario( random_number_set=seed, n_cubicles_1=nurses, override_arrival_rate=True, manual_arrival_rate=60/(mean_arrivals_per_day/24), model="simplest", trauma_treat_mean=consult_time, trauma_treat_var=consult_time_sd ) await asyncio.sleep(0.1) # run multiple replications of experment detailed_outputs = multiple_replications( args, n_reps=n_reps, rc_period=run_time_days*60*24, return_detailed_logs=True ) results = pd.concat([detailed_outputs[i]['results']['summary_df'].assign(rep= i+1) for i in range(n_reps)]).set_index('rep') full_event_log = pd.concat([detailed_outputs[i]['results']['full_event_log'].assign(rep= i+1) for i in range(n_reps)]) del detailed_outputs gc.collect() animation_dfs_log = reshape_for_animations( full_event_log=full_event_log[ (full_event_log['rep']==1) & ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='resource_use') | (full_event_log['event_type']=='arrival_departure')) & # Limit to first 5 days (full_event_log['time'] <= 60*24*5) ], every_x_minutes=5 )['full_patient_df'] del full_event_log gc.collect() if button_run_pressed: tab1, tab2, tab3 = st.tabs( ["Animated Log", "Simple Graphs", "Advanced Graphs"] ) # st.markdown(""" # You can click on the three tabs below ("Animated Log", "Simple Graphs", and "Advanced Graphs") to view different outputs from the model. # """) with tab1: # st.write(results) st.subheader("Animated Model Output") with st.spinner('Generating the animated patient log...'): event_position_df = pd.DataFrame([ {'event': 'arrival', 'x': 50, 'y': 300, 'label': "Arrival" }, # Triage - minor and trauma {'event': 'treatment_wait_begins', 'x': 190, 'y': 170, 'label': "Waiting for Treatment" }, {'event': 'treatment_begins', 'x': 190, 'y': 110, 'resource':'n_cubicles_1', 'label': "Being Treated" }, {'event': 'exit', 'x': 270, 'y': 70, 'label': "Exit"} ]) st.markdown( """ The plot below shows a snapshot every 5 minutes of the position of everyone in our emergency department model. The buttons to the left of the slider below the plot can be used to start and stop the animation. Clicking on the bar below the plot and dragging your cursor to the left or right allows you to rapidly jump through to a different time in the simulation. Only the first replication of the simulation is shown. """ )
''' A Streamlit application based on Monks and Allows users to interact with an increasingly more complex treatment simulation ''' # Set page parameters st.set_page_config( page_title="Using a Simple Resource", layout="wide", initial_sidebar_state="expanded", ) # Add the logo add_logo() center_running() # Import the stylesheet with open("style.css") as css: st.markdown( f'<style>{css.read()}</style>' , unsafe_allow_html= True) st.title("Discrete Event Simulation Playground") st.subheader("Using a Simple Resource: Sending Patients to be Treated") gc.collect() # tab1, tab2, tab3 = st.tabs(["Introduction", "Exercise", "Playground"]) tab1, tab2, tab3 = st.tabs(["Playground", "Exercise", "Information"]) with tab3: st.markdown( """ Now, it's all well and good having patients arrive, but at the moment there is no-one and nowhere to see them! We need to add our first resource. Resources exist inside our simulation, and can be nurses, rooms, ambulances - whatever we need them to be. When someone reaches the front of the queue, they will be allocated to a resource that is currently free. They will hold onto this resource for as long as they need it, and then they will let go of it and move on to the next part of the process. This means resources can continue to be reused again and again in the system, unlike our arrivals. So for now, let's make it so that when someone arrives, they need to be treated, and to do this they will need a resource. For now, we're keeping it simple - let's assume each nurse has a room that they treat people in. They always stay in this room, and as soon as a patient has finished being treated, the patient will leave and the nurse (and room) will become available again. This means we just have one type of resource to worry about. """ ) mermaid(height=175, code= """ %%{ init: { 'flowchart': { 'curve': 'step' } } }%% %%{ init: { 'theme': 'base', 'themeVariables': {'lineColor': '#b4b4b4'} } }%% flowchart LR A[Arrival]----> B[Treatment] B -.-> C([Nurse/Cubicle\n<b>RESOURCE</b>]) C -.-> B B ----> F[Discharge] classDef default font-size:18pt,font-family:lexend; linkStyle default stroke:white; """ ) st.markdown( """ For now, we'll assume all of our patients are roughly equally injured - but there might still be some variation in how long it takes to treat them. Some might need a few stitches, some might just need a quick bit of advice. This time, we're going to sample from a different distribution - the normal distribution. A few people won't take very long to fix up, while a few might take quite a long time - but most of the people will take an amount of time that's somewhere in the middle. """) norm_dist_example = Normal(mean=50, sigma=10) norm_fig_example = px.histogram(norm_dist_example.sample(size=5000), height=300) norm_fig_example.update_layout(yaxis_title="", xaxis_title="Consultation Time<br>(Minutes)") norm_fig_example.layout.update(showlegend=False, margin=dict(l=0, r=0, t=0, b=0)) st.plotly_chart(norm_fig_example, use_container_width=True) st.markdown(""" We're going to start measuring a few more things now - how much of each resource's time is spent with patients **(known as resource utilisation)** - how long each patient waits before they get allocated a resource - what percentage of patients meet a target of being treated within 2 hours of turning up to our treatment centre """) with tab2: st.markdown( """ ### Things to Try Out - Try changing the sliders for consultation time and variation in consultation time. What happens to the shape of the graph below the sliders? --- - Put the consulation times back to the default (50 minutes length on average, 10 minutes of variation). Run the model and take a look at the animated flow of patients through the system. What do you notice about - the number of nurses in use? Do they ever get any breaks? - the size of the queue for treatment at different times - does it get bigger and smaller at different times, or just keep growing? --- - What happens when you play around with the number of nurses we have available? - Look at the queues, but look at the resource utilisation too. The resource utilisation tells us how much of the time each nurse is busy rather than waiting for a patient to turn up. - Can you find a middle ground where the nurse is being used a good amount without the queues building up? --- """) with st.expander("Click here for bonus exercises"): st.markdown( """ - What happens to the average utilisation and waits when you keep the number of nurses the same but change - the average length of time it takes each patient to be seen? - the variability in the length of time it takes each patient to be seen? """ ) with tab1: col1, col2 = st.columns(2) with col1: nurses = st.slider("👨‍⚕️👩‍⚕️ How Many Rooms/Nurses Are Available?", 1, 15, step=1, value=4) seed = st.slider("🎲 Set a random number for the computer to start from", 1, 1000, step=1, value=42) with st.expander("Previous Parameters"): st.markdown("If you like, you can edit these parameters too!") n_reps = st.slider("🔁 How many times should the simulation run?", 1, 30, step=1, value=6) run_time_days = st.slider("🗓️ How many days should we run the simulation for each time?", 1, 40, step=1, value=10) mean_arrivals_per_day = st.slider("🧍 How many patients should arrive per day on average?", 10, 300, step=5, value=120) with col2: consult_time = st.slider("⏱️ How long (in minutes) does a consultation take on average?", 5, 150, step=5, value=50) consult_time_sd = st.slider("🕔 🕣 How much (in minutes) does the time for a consultation usually vary by?", 5, 30, step=5, value=10) norm_dist = Normal(consult_time, consult_time_sd, random_seed=seed) norm_fig = px.histogram(norm_dist.sample(size=2500), height=150) norm_fig.update_layout(yaxis_title="", xaxis_title="Consultation Time<br>(Minutes)") norm_fig.update_xaxes(tick0=0, dtick=10, range=[0, # max(norm_dist.sample(size=2500)) 240 ]) norm_fig.layout.update(showlegend=False, margin=dict(l=0, r=0, t=0, b=0)) st.markdown("#### Consultation Time Distribution") st.plotly_chart(norm_fig, use_container_width=True, config = {'displayModeBar': False}) # A user must press a streamlit button to run the model button_run_pressed = st.button("Run simulation") if button_run_pressed: # add a spinner and then display success box with st.spinner('Simulating the minor injuries unit...'): args = Scenario( random_number_set=seed, n_cubicles_1=nurses, override_arrival_rate=True, manual_arrival_rate=60/(mean_arrivals_per_day/24), model="simplest", trauma_treat_mean=consult_time, trauma_treat_var=consult_time_sd ) await asyncio.sleep(0.1) # run multiple replications of experment detailed_outputs = multiple_replications( args, n_reps=n_reps, rc_period=run_time_days*60*24, return_detailed_logs=True ) results = pd.concat([detailed_outputs[i]['results']['summary_df'].assign(rep= i+1) for i in range(n_reps)]).set_index('rep') full_event_log = pd.concat([detailed_outputs[i]['results']['full_event_log'].assign(rep= i+1) for i in range(n_reps)]) del detailed_outputs gc.collect() animation_dfs_log = reshape_for_animations( full_event_log=full_event_log[ (full_event_log['rep']==1) & ((full_event_log['event_type']=='queue') | (full_event_log['event_type']=='resource_use') | (full_event_log['event_type']=='arrival_departure')) & # Limit to first 5 days (full_event_log['time'] <= 60*24*5) ], every_x_minutes=5 )['full_patient_df'] del full_event_log gc.collect() if button_run_pressed: tab1, tab2, tab3 = st.tabs( ["Animated Log", "Simple Graphs", "Advanced Graphs"] ) # st.markdown(""" # You can click on the three tabs below ("Animated Log", "Simple Graphs", and "Advanced Graphs") to view different outputs from the model. # """) with tab1: # st.write(results) st.subheader("Animated Model Output") with st.spinner('Generating the animated patient log...'): event_position_df = pd.DataFrame([ {'event': 'arrival', 'x': 50, 'y': 300, 'label': "Arrival" }, # Triage - minor and trauma {'event': 'treatment_wait_begins', 'x': 190, 'y': 170, 'label': "Waiting for Treatment" }, {'event': 'treatment_begins', 'x': 190, 'y': 110, 'resource':'n_cubicles_1', 'label': "Being Treated" }, {'event': 'exit', 'x': 270, 'y': 70, 'label': "Exit"} ]) st.markdown( """ The plot below shows a snapshot every 5 minutes of the position of everyone in our emergency department model. The buttons to the left of the slider below the plot can be used to start and stop the animation. Clicking on the bar below the plot and dragging your cursor to the left or right allows you to rapidly jump through to a different time in the simulation. Only the first replication of the simulation is shown. """ )
st.plotly_chart(animate_activity_log(
7
2023-10-26 09:57:52+00:00
16k
hyperspy/exspy
exspy/models/edsmodel.py
[ { "identifier": "_get_element_and_line", "path": "exspy/misc/eds/utils.py", "snippet": "def _get_element_and_line(xray_line):\n \"\"\"\n Returns the element name and line character for a particular X-ray line as\n a tuple.\n\n By example, if xray_line = 'Mn_Ka' this function returns ('Mn', '...
import warnings import numpy as np import math import logging import hyperspy.components1d as create_component from hyperspy.misc.utils import stash_active_state from exspy.misc.eds.utils import _get_element_and_line from hyperspy.models.model1d import Model1D from exspy.signals.eds import EDSSpectrum from exspy.misc.elements import elements as elements_db from exspy.misc.eds import utils as utils_eds from hyperspy import utils
13,163
below. Parameters ---------- energy_resolution_MnKa : float Energy resolution of Mn Ka in eV E : float Energy of the peak in keV Returns ------- float : FWHM of the peak in keV Notes ----- This method implements the equation derived by Fiori and Newbury as is documented in the following: Fiori, C. E., and Newbury, D. E. (1978). In SEM/1978/I, SEM, Inc., AFM O'Hare, Illinois, p. 401. Goldstein et al. (2003). "Scanning Electron Microscopy & X-ray Microanalysis", Plenum, third edition, p 315. """ energy2sigma_factor = 2.5 / (eV2keV * (sigma2fwhm**2)) if return_f: return lambda sig_ref: math.sqrt( abs(energy2sigma_factor * (E - E_ref) * units_factor + np.power(sig_ref, 2)) ) else: return "sqrt(abs({} * ({} - {}) * {} + sig_ref ** 2))".format( energy2sigma_factor, E, E_ref, units_factor ) def _get_offset(diff): return "x + {}".format(diff) def _get_scale(E1, E_ref1, fact): return "{} + {} * (x - {})".format(E1, fact, E_ref1) class EDSModel(Model1D): """Build and fit a model of an EDS Signal1D. Parameters ---------- spectrum : EDSSpectrum (or any EDSSpectrum subclass) instance. auto_add_lines : bool If True, automatically add Gaussians for all X-rays generated in the energy range by an element, using the edsmodel.add_family_lines method. auto_background : bool If True, adds automatically a polynomial order 6 to the model, using the edsmodel.add_polynomial_background method. Any extra arguments are passed to the Model creator. Example ------- >>> m = s.create_model() >>> m.fit() >>> m.fit_background() >>> m.calibrate_energy_axis('resolution') >>> m.calibrate_xray_lines('energy', ['Au_Ma']) >>> m.calibrate_xray_lines('sub_weight',['Mn_La'], bound=10) """ def __init__( self, spectrum, auto_background=True, auto_add_lines=True, *args, **kwargs ): Model1D.__init__(self, spectrum, *args, **kwargs) self.xray_lines = list() self.family_lines = list() end_energy = self.axes_manager.signal_axes[0].high_value self.end_energy = min(end_energy, self.signal._get_beam_energy()) self.start_energy = self.axes_manager.signal_axes[0].low_value self.background_components = list() if "dictionary" in kwargs or len(args) > 1: auto_add_lines = False auto_background = False d = args[1] if len(args) > 1 else kwargs["dictionary"] if len(d["xray_lines"]) > 0: self.xray_lines.extend([self[name] for name in d["xray_lines"]]) if len(d["background_components"]) > 0: self.background_components.extend( [self[name] for name in d["background_components"]] ) if auto_background is True: self.add_polynomial_background() if auto_add_lines is True: # Will raise an error if no elements are specified, so check: if "Sample.elements" in self.signal.metadata: self.add_family_lines() def as_dictionary(self, fullcopy=True): dic = super(EDSModel, self).as_dictionary(fullcopy) dic["xray_lines"] = [c.name for c in self.xray_lines] dic["background_components"] = [c.name for c in self.background_components] return dic @property def units_factor(self): units_name = self.axes_manager.signal_axes[0].units if units_name == "eV": return 1000.0 elif units_name == "keV": return 1.0 else: raise ValueError("Energy units, %s, not supported" % str(units_name)) @property def spectrum(self): return self._signal @spectrum.setter def spectrum(self, value):
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. from __future__ import division _logger = logging.getLogger(__name__) eV2keV = 1000.0 sigma2fwhm = 2 * math.sqrt(2 * math.log(2)) def _get_weight(element, line, weight_line=None): if weight_line is None: weight_line = elements_db[element]["Atomic_properties"]["Xray_lines"][line][ "weight" ] return "x * {}".format(weight_line) def _get_sigma(E, E_ref, units_factor, return_f=False): """ Calculates an approximate sigma value, accounting for peak broadening due to the detector, for a peak at energy E given a known width at a reference energy. The factor 2.5 is a constant derived by Fiori & Newbury as references below. Parameters ---------- energy_resolution_MnKa : float Energy resolution of Mn Ka in eV E : float Energy of the peak in keV Returns ------- float : FWHM of the peak in keV Notes ----- This method implements the equation derived by Fiori and Newbury as is documented in the following: Fiori, C. E., and Newbury, D. E. (1978). In SEM/1978/I, SEM, Inc., AFM O'Hare, Illinois, p. 401. Goldstein et al. (2003). "Scanning Electron Microscopy & X-ray Microanalysis", Plenum, third edition, p 315. """ energy2sigma_factor = 2.5 / (eV2keV * (sigma2fwhm**2)) if return_f: return lambda sig_ref: math.sqrt( abs(energy2sigma_factor * (E - E_ref) * units_factor + np.power(sig_ref, 2)) ) else: return "sqrt(abs({} * ({} - {}) * {} + sig_ref ** 2))".format( energy2sigma_factor, E, E_ref, units_factor ) def _get_offset(diff): return "x + {}".format(diff) def _get_scale(E1, E_ref1, fact): return "{} + {} * (x - {})".format(E1, fact, E_ref1) class EDSModel(Model1D): """Build and fit a model of an EDS Signal1D. Parameters ---------- spectrum : EDSSpectrum (or any EDSSpectrum subclass) instance. auto_add_lines : bool If True, automatically add Gaussians for all X-rays generated in the energy range by an element, using the edsmodel.add_family_lines method. auto_background : bool If True, adds automatically a polynomial order 6 to the model, using the edsmodel.add_polynomial_background method. Any extra arguments are passed to the Model creator. Example ------- >>> m = s.create_model() >>> m.fit() >>> m.fit_background() >>> m.calibrate_energy_axis('resolution') >>> m.calibrate_xray_lines('energy', ['Au_Ma']) >>> m.calibrate_xray_lines('sub_weight',['Mn_La'], bound=10) """ def __init__( self, spectrum, auto_background=True, auto_add_lines=True, *args, **kwargs ): Model1D.__init__(self, spectrum, *args, **kwargs) self.xray_lines = list() self.family_lines = list() end_energy = self.axes_manager.signal_axes[0].high_value self.end_energy = min(end_energy, self.signal._get_beam_energy()) self.start_energy = self.axes_manager.signal_axes[0].low_value self.background_components = list() if "dictionary" in kwargs or len(args) > 1: auto_add_lines = False auto_background = False d = args[1] if len(args) > 1 else kwargs["dictionary"] if len(d["xray_lines"]) > 0: self.xray_lines.extend([self[name] for name in d["xray_lines"]]) if len(d["background_components"]) > 0: self.background_components.extend( [self[name] for name in d["background_components"]] ) if auto_background is True: self.add_polynomial_background() if auto_add_lines is True: # Will raise an error if no elements are specified, so check: if "Sample.elements" in self.signal.metadata: self.add_family_lines() def as_dictionary(self, fullcopy=True): dic = super(EDSModel, self).as_dictionary(fullcopy) dic["xray_lines"] = [c.name for c in self.xray_lines] dic["background_components"] = [c.name for c in self.background_components] return dic @property def units_factor(self): units_name = self.axes_manager.signal_axes[0].units if units_name == "eV": return 1000.0 elif units_name == "keV": return 1.0 else: raise ValueError("Energy units, %s, not supported" % str(units_name)) @property def spectrum(self): return self._signal @spectrum.setter def spectrum(self, value):
if isinstance(value, EDSSpectrum):
1
2023-10-28 20:04:10+00:00
16k
swyoon/variationally-weighted-kernel-density-estimation
train.py
[ { "identifier": "find_optimal_bandwidth", "path": "KDE.py", "snippet": "def find_optimal_bandwidth(X, l_h, gpu=True, lik=True):\n l_lik = []\n for h in l_h:\n kde = KDE(h=h, gpu=gpu)\n kde.fit(X)\n p_loo = kde.p_loo()\n f_sq = kde.f_sq()\n if lik:\n li...
import torch import argparse import numpy as np from KDE import find_optimal_bandwidth from ratio import KernelRatioNaive, KernelRatioAlpha, KernelRatioGaussian from model.energy import Score_network, Weight_network, Energy from loss.bias import Laplacian from loss.sliced_score_matching import sliced_VR_score_matching from scipy.spatial.distance import pdist
10,813
data1 = torch.randn(args.num_data, args.dim) @ L.T + mean1.astype(np.float32) L = torch.linalg.cholesky(torch.tensor(Cov2.astype(np.float32))) data2 = torch.randn(args.num_data, args.dim) @ L.T + mean2.astype(np.float32) TKL = (np.trace(np.linalg.inv(Cov2) @ Cov1) + (mean2-mean1).T @ np.linalg.inv(Cov2) @ (mean2-mean1) - args.dim + np.log(np.linalg.det(Cov2)/np.linalg.det(Cov1)))/2 print(f"True KL divergence: {TKL}") data1_set = torch.utils.data.TensorDataset(data1) data2_set = torch.utils.data.TensorDataset(data2) total_set = torch.utils.data.TensorDataset(torch.cat([data1, data2])) data1_loader = torch.utils.data.DataLoader(data1_set, batch_size=args.batch_size, shuffle=True) data2_loader = torch.utils.data.DataLoader(data2_set, batch_size=args.batch_size, shuffle=True) total_loader = torch.utils.data.DataLoader(total_set, batch_size=args.batch_size, shuffle=True) l_h = np.linspace(0.2, 1., 20) if args.model == "KDE": opt_h1 = find_optimal_bandwidth(data1, l_h, lik=False, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2, l_h, lik=False, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 model = KernelRatioNaive(h=opt_h, gpu=gpu) model.eps = 1e-100 model.fit(data1, data2) print(f"KL divergence calculated by KDE: {model.kl()}") elif args.model == "based": opt_h1 = find_optimal_bandwidth(data1[:int(len(data1)/4)], l_h, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2[:int(len(data2)/4)], l_h, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 med_dist1 = np.median(pdist(data1)) med_dist2 = np.median(pdist(data2)) med_dist = (med_dist1 + med_dist2) / 2 model = KernelRatioGaussian(grid_sample=3000, solver='para', para_h=med_dist, para_l=0.1, h=opt_h, gpu=gpu, kmeans=False, einsum_batch=100, reg=0.1, stabilize=True, online=True) model.eps = 1e-100 model.fit(data1, data2) print(f"KL divergence calculated by VWKDE model based: {model.kl()}") elif args.model == "free": score_model_p1 = Energy(net=Score_network(input_dim=args.dim, units=[300,300], dropout=True)).to(args.device) score_model_p2 = Energy(net=Score_network(input_dim=args.dim, units=[300,300], dropout=True)).to(args.device) optimizer_sp1 = torch.optim.Adam(score_model_p1.parameters(), lr=1e-4) optimizer_sp2 = torch.optim.Adam(score_model_p2.parameters(), lr=1e-4) print("Train score models for p1 and p2") for epoch in range(args.score_epoch): loss1 = 0 loss2 = 0 for x in data1_loader: x = x[0].to(args.device) loss = sliced_VR_score_matching(score_model_p1, x) optimizer_sp1.zero_grad() loss.backward() optimizer_sp1.step() loss1 += loss.item() / len(data1_loader) for x in data2_loader: x = x[0].to(args.device) loss = sliced_VR_score_matching(score_model_p2, x) optimizer_sp2.zero_grad() loss.backward() optimizer_sp2.step() loss2 += loss.item() / len(data2_loader) if epoch % 100 == 99: print(f"Epoch: {epoch+1} | Loss1: {loss1}") print(f"Epoch: {epoch+1} | Loss2: {loss2}") score_model_p1.eval() score_model_p2.eval() p1_laplacian = Laplacian(score_model_p1) p2_laplacian = Laplacian(score_model_p2) weight_model = Energy(net=Weight_network(input_dim=args.dim, units=[128,128,128,64], dropout=False)).to(args.device) optimizer_w = torch.optim.Adam(weight_model.parameters(), lr=1e-3) print("Train a weight model") for epoch in range(args.weight_epoch): total_loss = 0 for x in total_loader: x = x[0].to(args.device).requires_grad_() output = weight_model(x) output_gradient = torch.autograd.grad( outputs=output, inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] log_p1 = score_model_p1.minus_forward(x) grad_logp1 = torch.autograd.grad( outputs=log_p1.view(-1, 1), inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] log_p2 = score_model_p2.minus_forward(x) grad_logp2 = torch.autograd.grad( outputs=log_p2.view(-1, 1), inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] lp_p1 = p1_laplacian.get_laplacian(x) - (grad_logp1**2).sum(1) lp_p2 = p2_laplacian.get_laplacian(x) - (grad_logp2**2).sum(1) loss = ((((output_gradient)*(-grad_logp1+grad_logp2)).sum(1) + 0.5*(lp_p1-lp_p2))**2).mean() optimizer_w.zero_grad() loss.backward() optimizer_w.step() total_loss += loss.item() / len(total_loader) if epoch % 100 == 99: print(f"Epoch: {epoch+1} | Loss: {total_loss}") weight_model.eval() opt_h1 = find_optimal_bandwidth(data1[:int(len(data1)/4)], l_h, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2[:int(len(data2)/4)], l_h, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 logWeights1 = weight_model(data1.to(args.device)).detach().cpu().numpy() logWeights2 = weight_model(data2.to(args.device)).detach().cpu().numpy()
parser = argparse.ArgumentParser() parser.add_argument('--device', type=str, default='cuda') parser.add_argument('--model', type=str, default='KDE') parser.add_argument('--dim', type=int, default=20) parser.add_argument('--score_epoch', type=int, default=500) parser.add_argument('--weight_epoch', type=int, default=200) parser.add_argument('--batch_size', type=int, default=1024) parser.add_argument('--num_data', type=int, default=1024) args = parser.parse_args() if args.device == 'cuda': gpu=True else: gpu=False mean1 = np.concatenate([np.array([0]), np.zeros((args.dim-1,))]) Cov1 = np.eye(args.dim)*np.concatenate([np.array([1.]), np.ones((args.dim-1,))]) mean2 = np.concatenate([np.sqrt([2]), np.zeros((args.dim-1,))]) Cov2 = np.eye(args.dim)*np.concatenate([np.array([1.]), np.ones((args.dim-1,))]) L = torch.linalg.cholesky(torch.tensor(Cov1.astype(np.float32))) data1 = torch.randn(args.num_data, args.dim) @ L.T + mean1.astype(np.float32) L = torch.linalg.cholesky(torch.tensor(Cov2.astype(np.float32))) data2 = torch.randn(args.num_data, args.dim) @ L.T + mean2.astype(np.float32) TKL = (np.trace(np.linalg.inv(Cov2) @ Cov1) + (mean2-mean1).T @ np.linalg.inv(Cov2) @ (mean2-mean1) - args.dim + np.log(np.linalg.det(Cov2)/np.linalg.det(Cov1)))/2 print(f"True KL divergence: {TKL}") data1_set = torch.utils.data.TensorDataset(data1) data2_set = torch.utils.data.TensorDataset(data2) total_set = torch.utils.data.TensorDataset(torch.cat([data1, data2])) data1_loader = torch.utils.data.DataLoader(data1_set, batch_size=args.batch_size, shuffle=True) data2_loader = torch.utils.data.DataLoader(data2_set, batch_size=args.batch_size, shuffle=True) total_loader = torch.utils.data.DataLoader(total_set, batch_size=args.batch_size, shuffle=True) l_h = np.linspace(0.2, 1., 20) if args.model == "KDE": opt_h1 = find_optimal_bandwidth(data1, l_h, lik=False, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2, l_h, lik=False, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 model = KernelRatioNaive(h=opt_h, gpu=gpu) model.eps = 1e-100 model.fit(data1, data2) print(f"KL divergence calculated by KDE: {model.kl()}") elif args.model == "based": opt_h1 = find_optimal_bandwidth(data1[:int(len(data1)/4)], l_h, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2[:int(len(data2)/4)], l_h, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 med_dist1 = np.median(pdist(data1)) med_dist2 = np.median(pdist(data2)) med_dist = (med_dist1 + med_dist2) / 2 model = KernelRatioGaussian(grid_sample=3000, solver='para', para_h=med_dist, para_l=0.1, h=opt_h, gpu=gpu, kmeans=False, einsum_batch=100, reg=0.1, stabilize=True, online=True) model.eps = 1e-100 model.fit(data1, data2) print(f"KL divergence calculated by VWKDE model based: {model.kl()}") elif args.model == "free": score_model_p1 = Energy(net=Score_network(input_dim=args.dim, units=[300,300], dropout=True)).to(args.device) score_model_p2 = Energy(net=Score_network(input_dim=args.dim, units=[300,300], dropout=True)).to(args.device) optimizer_sp1 = torch.optim.Adam(score_model_p1.parameters(), lr=1e-4) optimizer_sp2 = torch.optim.Adam(score_model_p2.parameters(), lr=1e-4) print("Train score models for p1 and p2") for epoch in range(args.score_epoch): loss1 = 0 loss2 = 0 for x in data1_loader: x = x[0].to(args.device) loss = sliced_VR_score_matching(score_model_p1, x) optimizer_sp1.zero_grad() loss.backward() optimizer_sp1.step() loss1 += loss.item() / len(data1_loader) for x in data2_loader: x = x[0].to(args.device) loss = sliced_VR_score_matching(score_model_p2, x) optimizer_sp2.zero_grad() loss.backward() optimizer_sp2.step() loss2 += loss.item() / len(data2_loader) if epoch % 100 == 99: print(f"Epoch: {epoch+1} | Loss1: {loss1}") print(f"Epoch: {epoch+1} | Loss2: {loss2}") score_model_p1.eval() score_model_p2.eval() p1_laplacian = Laplacian(score_model_p1) p2_laplacian = Laplacian(score_model_p2) weight_model = Energy(net=Weight_network(input_dim=args.dim, units=[128,128,128,64], dropout=False)).to(args.device) optimizer_w = torch.optim.Adam(weight_model.parameters(), lr=1e-3) print("Train a weight model") for epoch in range(args.weight_epoch): total_loss = 0 for x in total_loader: x = x[0].to(args.device).requires_grad_() output = weight_model(x) output_gradient = torch.autograd.grad( outputs=output, inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] log_p1 = score_model_p1.minus_forward(x) grad_logp1 = torch.autograd.grad( outputs=log_p1.view(-1, 1), inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] log_p2 = score_model_p2.minus_forward(x) grad_logp2 = torch.autograd.grad( outputs=log_p2.view(-1, 1), inputs=x, grad_outputs=torch.ones_like(output), create_graph=True, only_inputs=True )[0] lp_p1 = p1_laplacian.get_laplacian(x) - (grad_logp1**2).sum(1) lp_p2 = p2_laplacian.get_laplacian(x) - (grad_logp2**2).sum(1) loss = ((((output_gradient)*(-grad_logp1+grad_logp2)).sum(1) + 0.5*(lp_p1-lp_p2))**2).mean() optimizer_w.zero_grad() loss.backward() optimizer_w.step() total_loss += loss.item() / len(total_loader) if epoch % 100 == 99: print(f"Epoch: {epoch+1} | Loss: {total_loss}") weight_model.eval() opt_h1 = find_optimal_bandwidth(data1[:int(len(data1)/4)], l_h, gpu=gpu) opt_h2 = find_optimal_bandwidth(data2[:int(len(data2)/4)], l_h, gpu=gpu) opt_h = (opt_h1 + opt_h2) / 2 logWeights1 = weight_model(data1.to(args.device)).detach().cpu().numpy() logWeights2 = weight_model(data2.to(args.device)).detach().cpu().numpy()
kl_model = KernelRatioAlpha(opt_h, gpu=gpu)
2
2023-10-27 04:47:03+00:00
16k
Sllambias/yucca
yucca/deprecated/YuccaPreprocessor_MultiTask.py
[ { "identifier": "YuccaPreprocessor", "path": "yucca/preprocessing/YuccaPreprocessor.py", "snippet": "class YuccaPreprocessor(object):\n \"\"\"\n The YuccaPreprocessor class is designed to preprocess medical images for the Yucca project.\n It implements various preprocessing steps, such as reori...
import numpy as np import torch import nibabel as nib import os import cc3d from yucca.preprocessing.YuccaPreprocessor import YuccaPreprocessor from yucca.paths import yucca_preprocessed_data, yucca_raw_data from yucca.preprocessing.normalization import normalizer from yucca.utils.nib_utils import get_nib_spacing, get_nib_orientation, reorient_nib_image from yucca.utils.type_conversions import nifti_or_np_to_np from yucca.image_processing.objects.BoundingBox import get_bbox_for_foreground from yucca.image_processing.cropping_and_padding import crop_to_box, pad_to_size from multiprocessing import Pool from skimage.transform import resize from batchgenerators.utilities.file_and_folder_operations import ( join, load_json, subfiles, save_pickle, maybe_mkdir_p, isfile, subdirs, )
11,295
np.save(arraypath, images) # save metadata as .pkl save_pickle(image_props, picklepath) def _resample_and_normalize_case( self, images: list, seg: np.ndarray = None, norm_op=None, transpose=None, original_spacing=None, target_spacing=None ): # Normalize and Transpose images to target view. # Transpose segmentations to target view. assert len(images) == len(norm_op) == len(self.intensities), ( "number of images, " "normalization operations and intensities does not match. \n" f"len(images) == {len(images)} \n" f"len(norm_op) == {len(norm_op)} \n" f"len(self.intensities) == {len(self.intensities)} \n" ) for i in range(len(images)): images[i] = normalizer(images[i], scheme=norm_op[i], intensities=self.intensities[i]) assert len(images[i].shape) == len(transpose), ( "image and transpose axes do not match. \n" f"images[i].shape == {images[i].shape} \n" f"transpose == {transpose} \n" f"len(images[i].shape) == {len(images[i]).shape} \n" f"len(transpose) == {len(transpose)} \n" ) images[i] = images[i].transpose(transpose) print(f"Normalized with: {norm_op[0]} \n" f"Transposed with: {transpose}") shape_t = images[0].shape original_spacing_t = original_spacing[transpose] target_spacing_t = target_spacing[transpose] # Find new shape based on the target spacing target_shape = np.round((original_spacing_t / target_spacing_t).astype(float) * shape_t).astype(int) # Resample to target shape and spacing for i in range(len(images)): try: images[i] = resize(images[i], output_shape=target_shape, order=3) except OverflowError: print("Unexpected values in either shape or image for resize") if seg is not None: seg = seg.transpose(transpose) try: seg = resize(seg, output_shape=target_shape, order=0, anti_aliasing=False) except OverflowError: print("Unexpected values in either shape or seg for resize") return images, seg return images def preprocess_case_for_inference(self, images: list, patch_size: tuple, do_tta=False): """ Will reorient ONLY if we have valid qform or sform codes. with coded=True the methods will return {affine or None} and {0 or 1}. If both are 0 we cannot rely on headers for orientations and will instead assume images are in the desired orientation already. Afterwards images will be normalized and transposed as specified by the plans file also used in training. Finally images are resampled to the required spacing/size and returned as torch tensors of the required shape (b, c, x, y, (z)) """ assert isinstance(images, list), "image(s) should be a list, even if only one " "image is passed" self.initialize_properties() image_properties = {} images = [nib.load(image) for image in images] image_properties["original_spacing"] = get_nib_spacing(images[0]) image_properties["original_shape"] = np.array(images[0].shape) image_properties["qform"] = images[0].get_qform() image_properties["sform"] = images[0].get_sform() assert len(image_properties["original_shape"]) in [2, 3], "images must be either 2D or 3D for preprocessing" # Check if header is valid and then attempt to orient to target orientation. if ( images[0].get_qform(coded=True)[1] or images[0].get_sform(coded=True)[1] and self.plans.get("target_coordinate_system") ): image_properties["reoriented"] = True original_orientation = get_nib_orientation(images[0]) image_properties["original_orientation"] = original_orientation images = [ reorient_nib_image(image, original_orientation, self.plans["target_coordinate_system"]) for image in images ] image_properties["new_orientation"] = get_nib_orientation(images[0]) else: print("Insufficient header information. Reorientation will not be attempted.") image_properties["reoriented"] = False image_properties["affine"] = images[0].affine images = [nifti_or_np_to_np(image) for image in images] image_properties["uncropped_shape"] = np.array(images[0].shape) if self.plans["crop_to_nonzero"]: nonzero_box = get_bbox_for_foreground(images[0], background_label=0) for i in range(len(images)): images[i] = crop_to_box(images[i], nonzero_box) image_properties["nonzero_box"] = nonzero_box image_properties["cropped_shape"] = np.array(images[0].shape) images = self._resample_and_normalize_case( images, norm_op=self.plans["normalization_scheme"], transpose=self.transpose_forward, original_spacing=image_properties["original_spacing"], target_spacing=self.target_spacing, ) # From this point images are shape (1, c, x, y, z) image_properties["resampled_transposed_shape"] = np.array(images[0].shape) for i in range(len(images)):
""" Takes raw data conforming with Yucca standards and preprocesses according to the generic scheme """ class YuccaMultiTaskPreprocessor(YuccaPreprocessor): """ Multi Task equivalent of the YuccaPreprocessor, which prepares a dataset consisting of a combination of segmentation, classification and registration cases. """ def __init__(self, plans_path, task=None, threads=12, disable_unittests=False): self.name = str(self.__class__.__name__) self.task = task self.plans_path = plans_path self.plans = load_json(plans_path) self.threads = threads self.disable_unittests = disable_unittests # lists for information we would like to attain self.transpose_forward = [] self.transpose_backward = [] self.target_spacing = [] def initialize_paths(self): self.target_dir = join(yucca_preprocessed_data, self.task, self.plans["plans_name"]) self.input_dir = join(yucca_raw_data, self.task) self.imagedirs = join(self.input_dir, "imagesTr") self.labeldirs = join(self.input_dir, "labelsTr") def initialize_properties(self): """ here we basically set up things that are needed for preprocessing during training, but that aren't necessary during inference """ self.dataset_properties = self.plans["dataset_properties"] self.intensities = self.dataset_properties["intensities"] # op values self.transpose_forward = np.array(self.plans["transpose_forward"]) self.transpose_backward = np.array(self.plans["transpose_backward"]) self.target_spacing = np.array(self.plans["target_spacing"]) def run(self): self.initialize_properties() self.initialize_paths() maybe_mkdir_p(self.target_dir) tasks = subdirs(join(self.input_dir, "imagesTr"), join=False) subject_ids = [] for task in tasks: for subject in subfiles(join(self.input_dir, "imagesTr", task), join=False): if subject.endswith("_000.nii.gz"): s = subject[: -len("_000.nii.gz")] subject_ids.append((s, task)) print( f"{'Preprocessing Task:':25.25} {self.task} \n" f"{'Using Planner:':25.25} {self.plans_path} \n" f"{'Crop to nonzero:':25.25} {self.plans['crop_to_nonzero']} \n" f"{'Normalization scheme:':25.25} {self.plans['normalization_scheme']} \n" f"{'Transpose Forward:':25.25} {self.transpose_forward} \n" f"{'Transpose Backward:':25.25} {self.transpose_backward} \n" ) p = Pool(self.threads) p.map(self._preprocess_train_subject, subject_ids) p.close() p.join() def _preprocess_train_subject(self, subject_id_and_task): subject_id, task = subject_id_and_task assert task in ["Classification", "Reconstruction", "Segmentation"] image_props = {} subject_id = subject_id.split(".")[0] print(f"Preprocessing: {subject_id}") arraypath = join(self.target_dir, subject_id + ".npy") picklepath = join(self.target_dir, subject_id + ".pkl") if isfile(arraypath) and isfile(picklepath): print(f"Case: {subject_id} already exists. Skipping.") return # First find relevant images by their paths and save them in the image property pickle # Then load them as images # The '_' in the end is to avoid treating Case_4_000 AND Case_42_000 as different versions # of the seg named Case_4 as both would start with "Case_4", however only the correct one is # followed by an underscore imagepaths = [ impath for impath in subfiles(join(self.imagedirs, task)) if os.path.split(impath)[-1].startswith(subject_id + "_") ] image_props["image files"] = imagepaths images = [nib.load(image) for image in imagepaths] # Do the same with segmentation seg = [ segpath for segpath in subfiles(join(self.labeldirs, task)) if os.path.split(segpath)[-1].startswith(subject_id + ".") ] print(subject_id, seg) image_props["segmentation file"] = seg assert len(seg) < 2, f"unexpected number of segmentations found. Expected 1 or 0 and found {len(seg)}" if task == "Classification": seg = np.load(seg[0]) elif task == "Segmentation": seg = nib.load(seg[0]) else: seg = None if not self.disable_unittests: assert len(images) > 0, f"found no images for {subject_id + '_'}, " f"attempted imagepaths: {imagepaths}" assert ( len(images[0].shape) == self.plans["dataset_properties"]["data_dimensions"] ), f"image should be shape (x, y(, z)) but is {images[0].shape}" # Make sure all modalities are correctly registered if len(images) > 1: for image in images: assert images[0].shape == image.shape, ( f"Sizes do not match for {subject_id}" f"One is: {images[0].shape} while another is {image.shape}" ) assert np.allclose(get_nib_spacing(images[0]), get_nib_spacing(image)), ( f"Spacings do not match for {subject_id}" f"One is: {get_nib_spacing(images[0])} while another is {get_nib_spacing(image)}" ) assert get_nib_orientation(images[0]) == get_nib_orientation(image), ( f"Directions do not match for {subject_id}" f"One is: {get_nib_orientation(images[0])} while another is {get_nib_orientation(image)}" ) original_spacing = get_nib_spacing(images[0]) original_size = np.array(images[0].shape) if self.target_spacing.size: target_spacing = self.target_spacing else: target_spacing = original_spacing # If qform and sform are both missing the header is corrupt and we do not trust the # direction from the affine # Make sure you know what you're doing if images[0].get_qform(coded=True)[1] or images[0].get_sform(coded=True)[1]: original_orientation = get_nib_orientation(images[0]) final_direction = self.plans["target_coordinate_system"] images = [nifti_or_np_to_np(reorient_nib_image(image, original_orientation, final_direction)) for image in images] if isinstance(seg, nib.Nifti1Image): seg = nifti_or_np_to_np(reorient_nib_image(seg, original_orientation, final_direction)) else: original_orientation = "INVALID" final_direction = "INVALID" images = [nifti_or_np_to_np(image) for image in images] if isinstance(seg, nib.Nifti1Image): seg = nifti_or_np_to_np(seg) # Cropping is performed to save computational resources. We are only removing background. if self.plans["crop_to_nonzero"]: nonzero_box = get_bbox_for_foreground(images[0], background_label=0) image_props["crop_to_nonzero"] = nonzero_box for i in range(len(images)): images[i] = crop_to_box(images[i], nonzero_box) if task == "Segmentation": seg = crop_to_box(seg, nonzero_box) else: image_props["crop_to_nonzero"] = self.plans["crop_to_nonzero"] if task != "Segmentation": images = self._resample_and_normalize_case( images, None, self.plans["normalization_scheme"], self.transpose_forward, original_spacing, target_spacing ) if seg is not None: images = np.array((np.array(images).T, seg), dtype="object") images[0] = images[0].T final_size = list(images[0][0].shape) else: images = np.array(images) final_size = list(images[0].shape) else: images, seg = self._resample_and_normalize_case( images, seg, self.plans["normalization_scheme"], self.transpose_forward, original_spacing, target_spacing ) # Stack and fix dimensions images = np.vstack((np.array(images), np.array(seg)[np.newaxis])) final_size = list(images[0].shape) # now AFTER transposition etc., we get some (no need to get all) # locations of foreground, that we will later use in the # oversampling of foreground classes if task == "Segmentation": foreground_locs = np.array(np.nonzero(images[-1])).T[::10] numbered_ground_truth, ground_truth_numb_lesion = cc3d.connected_components( images[-1], connectivity=26, return_N=True ) if ground_truth_numb_lesion == 0: object_sizes = 0 else: object_sizes = [ i * np.prod(target_spacing) for i in np.unique(numbered_ground_truth, return_counts=True)[-1][1:] ] else: foreground_locs = [] numbered_ground_truth = ground_truth_numb_lesion = object_sizes = 0 # save relevant values image_props["original_spacing"] = original_spacing image_props["original_size"] = original_size image_props["original_orientation"] = original_orientation image_props["new_spacing"] = target_spacing[self.transpose_forward].tolist() image_props["new_size"] = final_size image_props["task"] = task image_props["new_direction"] = final_direction image_props["foreground_locations"] = foreground_locs image_props["n_cc"] = ground_truth_numb_lesion image_props["size_cc"] = object_sizes print( f"size before: {original_size} size after: {image_props['new_size']} \n" f"spacing before: {original_spacing} spacing after: {image_props['new_spacing']} \n" f"Saving {subject_id} in {arraypath} \n" ) # save the image np.save(arraypath, images) # save metadata as .pkl save_pickle(image_props, picklepath) def _resample_and_normalize_case( self, images: list, seg: np.ndarray = None, norm_op=None, transpose=None, original_spacing=None, target_spacing=None ): # Normalize and Transpose images to target view. # Transpose segmentations to target view. assert len(images) == len(norm_op) == len(self.intensities), ( "number of images, " "normalization operations and intensities does not match. \n" f"len(images) == {len(images)} \n" f"len(norm_op) == {len(norm_op)} \n" f"len(self.intensities) == {len(self.intensities)} \n" ) for i in range(len(images)): images[i] = normalizer(images[i], scheme=norm_op[i], intensities=self.intensities[i]) assert len(images[i].shape) == len(transpose), ( "image and transpose axes do not match. \n" f"images[i].shape == {images[i].shape} \n" f"transpose == {transpose} \n" f"len(images[i].shape) == {len(images[i]).shape} \n" f"len(transpose) == {len(transpose)} \n" ) images[i] = images[i].transpose(transpose) print(f"Normalized with: {norm_op[0]} \n" f"Transposed with: {transpose}") shape_t = images[0].shape original_spacing_t = original_spacing[transpose] target_spacing_t = target_spacing[transpose] # Find new shape based on the target spacing target_shape = np.round((original_spacing_t / target_spacing_t).astype(float) * shape_t).astype(int) # Resample to target shape and spacing for i in range(len(images)): try: images[i] = resize(images[i], output_shape=target_shape, order=3) except OverflowError: print("Unexpected values in either shape or image for resize") if seg is not None: seg = seg.transpose(transpose) try: seg = resize(seg, output_shape=target_shape, order=0, anti_aliasing=False) except OverflowError: print("Unexpected values in either shape or seg for resize") return images, seg return images def preprocess_case_for_inference(self, images: list, patch_size: tuple, do_tta=False): """ Will reorient ONLY if we have valid qform or sform codes. with coded=True the methods will return {affine or None} and {0 or 1}. If both are 0 we cannot rely on headers for orientations and will instead assume images are in the desired orientation already. Afterwards images will be normalized and transposed as specified by the plans file also used in training. Finally images are resampled to the required spacing/size and returned as torch tensors of the required shape (b, c, x, y, (z)) """ assert isinstance(images, list), "image(s) should be a list, even if only one " "image is passed" self.initialize_properties() image_properties = {} images = [nib.load(image) for image in images] image_properties["original_spacing"] = get_nib_spacing(images[0]) image_properties["original_shape"] = np.array(images[0].shape) image_properties["qform"] = images[0].get_qform() image_properties["sform"] = images[0].get_sform() assert len(image_properties["original_shape"]) in [2, 3], "images must be either 2D or 3D for preprocessing" # Check if header is valid and then attempt to orient to target orientation. if ( images[0].get_qform(coded=True)[1] or images[0].get_sform(coded=True)[1] and self.plans.get("target_coordinate_system") ): image_properties["reoriented"] = True original_orientation = get_nib_orientation(images[0]) image_properties["original_orientation"] = original_orientation images = [ reorient_nib_image(image, original_orientation, self.plans["target_coordinate_system"]) for image in images ] image_properties["new_orientation"] = get_nib_orientation(images[0]) else: print("Insufficient header information. Reorientation will not be attempted.") image_properties["reoriented"] = False image_properties["affine"] = images[0].affine images = [nifti_or_np_to_np(image) for image in images] image_properties["uncropped_shape"] = np.array(images[0].shape) if self.plans["crop_to_nonzero"]: nonzero_box = get_bbox_for_foreground(images[0], background_label=0) for i in range(len(images)): images[i] = crop_to_box(images[i], nonzero_box) image_properties["nonzero_box"] = nonzero_box image_properties["cropped_shape"] = np.array(images[0].shape) images = self._resample_and_normalize_case( images, norm_op=self.plans["normalization_scheme"], transpose=self.transpose_forward, original_spacing=image_properties["original_spacing"], target_spacing=self.target_spacing, ) # From this point images are shape (1, c, x, y, z) image_properties["resampled_transposed_shape"] = np.array(images[0].shape) for i in range(len(images)):
images[i], padding = pad_to_size(images[i], patch_size)
9
2023-10-26 08:13:03+00:00
16k
Elfenreigen/UniChest
optim/optim_factory.py
[ { "identifier": "Adafactor", "path": "optim/adafactor.py", "snippet": "class Adafactor(torch.optim.Optimizer):\n \"\"\"Implements Adafactor algorithm.\n This implementation is based on: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`\n (see https://arxiv.org/abs/1804.04235)\n\n ...
import torch from torch import optim as optim from .adafactor import Adafactor from .adahessian import Adahessian from .adamp import AdamP from .lookahead import Lookahead from .nadam import Nadam from .novograd import NovoGrad from .nvnovograd import NvNovoGrad from .radam import RAdam from .rmsprop_tf import RMSpropTF from .sgdp import SGDP from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
12,597
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2020 Ross Wightman """ try: has_apex = True except ImportError: has_apex = False def add_weight_decay(model, image_encoder,text_encoder, weight_decay=1e-5, skip_list=()): decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in image_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in text_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip) weight_decay = 0. else: parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())] #model.parameters() # print(parameters) if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas if hasattr(args, 'opt_args') and args.opt_args is not None: opt_args.update(args.opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam': optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'radam': optimizer = RAdam(parameters, **opt_args) elif opt_lower == 'adamp': optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif opt_lower == 'sgdp': optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'adadelta': optimizer = optim.Adadelta(parameters, **opt_args) elif opt_lower == 'adafactor': if not args.lr: opt_args['lr'] = None optimizer = Adafactor(parameters, **opt_args) elif opt_lower == 'adahessian': optimizer = Adahessian(parameters, **opt_args) elif opt_lower == 'rmsprop': optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'rmsproptf': optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'novograd':
""" Optimizer Factory w/ Custom Weight Decay Hacked together by / Copyright 2020 Ross Wightman """ try: has_apex = True except ImportError: has_apex = False def add_weight_decay(model, image_encoder,text_encoder, weight_decay=1e-5, skip_list=()): decay = [] no_decay = [] for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in image_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) for name, param in text_encoder.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: no_decay.append(param) else: decay.append(param) return [ {'params': no_decay, 'weight_decay': 0.}, {'params': decay, 'weight_decay': weight_decay}] def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip) weight_decay = 0. else: parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())] #model.parameters() # print(parameters) if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas if hasattr(args, 'opt_args') and args.opt_args is not None: opt_args.update(args.opt_args) opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam': optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'radam': optimizer = RAdam(parameters, **opt_args) elif opt_lower == 'adamp': optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif opt_lower == 'sgdp': optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'adadelta': optimizer = optim.Adadelta(parameters, **opt_args) elif opt_lower == 'adafactor': if not args.lr: opt_args['lr'] = None optimizer = Adafactor(parameters, **opt_args) elif opt_lower == 'adahessian': optimizer = Adahessian(parameters, **opt_args) elif opt_lower == 'rmsprop': optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'rmsproptf': optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
5
2023-10-30 00:24:16+00:00
16k
YichenZW/Coh-MGT-Detection
run_detector.py
[ { "identifier": "glue_compute_metrics", "path": "util.py", "snippet": "def glue_compute_metrics(task_name, preds, labels):\n assert len(preds) == len(labels)\n if task_name == \"cola\":\n return {\"mcc\": matthews_corrcoef(labels, preds)}\n elif task_name == \"sst-2\":\n return {\...
import os import torch import argparse import logging import random import wandb import numpy as np import ray from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from torch.optim import AdamW from transformers import ( set_seed, AutoTokenizer, AutoConfig, AutoModel, AutoModelForSequenceClassification, get_linear_schedule_with_warmup, ) from functools import partial from util import glue_compute_metrics as compute_metrics from util import ( glue_convert_examples_to_features as convert_examples_to_features, ) from util import glue_output_modes as output_modes from util import glue_processors as processors from modeling_roberta import ( RobertaForGraphBasedSequenceClassification, RobertaForGraphBasedSequenceClassification_CL, RobertaForGraphBasedSequenceClassification_MBCL, EncoderForMBCL, RobertaForGraphBasedSequenceClassification_RFCL, ) from ray import tune from ray.tune import CLIReporter from ray.tune.schedulers import ASHAScheduler from apex import amp
11,110
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples( args, eval_task, tokenizer, evaluate=True, mode=mode ) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly. eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Evaluation logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds, out_label_ids = None, None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs, _ = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0 ) probs = preds eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) wandb.log( { "eval/acc": result["acc"], "eval/f1": result["f1"], "eval/acc_and_f1": result["acc_and_f1"], } ) return results def load_and_cache_examples( args, task, tokenizer, evaluate=False, mode="train", dataset_name="", rel="" ): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}_{}_{}".format( mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), str(dataset_name), str(rel), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if mode == "train": examples = processor.get_train_examples(args.with_relation, args.data_dir) elif mode == "dev": examples = processor.get_dev_examples(args.with_relation, args.data_dir) elif mode == "test": examples = processor.get_test_examples(args.with_relation, args.data_dir)
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Based on code from the above authors, modifications made by Xi'an Jiaotong University. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def number_h(num): for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: if abs(num) < 1000.0: return "%3.1f%s" % (num, unit) num /= 1000.0 return "%.1f%s" % (num, "Yi") def generate_shaped_nodes_mask(nodes, max_seq_length, max_nodes_num): nodes_mask = np.zeros(shape=(max_nodes_num, max_seq_length)) nodes_num = min(len(nodes), max_nodes_num) for i in range(nodes_num): span = nodes[i] if span[0] != -1: if span[0] < max_seq_length - 1: end_pos = ( span[1] if span[1] < max_seq_length - 1 else max_seq_length - 1 ) nodes_mask[i, span[0] + 1 : end_pos + 1] = 1 else: continue return nodes_mask, nodes_num def generate_shaped_edge_mask(adj_metric, nodes_num, max_nodes_num, relation_n): if nodes_num != 0: if relation_n != 0: new_adj_metric = np.zeros(shape=(relation_n, max_nodes_num, max_nodes_num)) for i in range(relation_n): new_adj_metric[i][:nodes_num, :nodes_num] = adj_metric[i][ :nodes_num, :nodes_num ] else: new_adj_metric = np.zeros(shape=(max_nodes_num, max_nodes_num)) new_adj_metric[:nodes_num, :nodes_num] = adj_metric[:nodes_num, :nodes_num] return new_adj_metric def train(args, train_dataset, model, tokenizer): """Train the model""" total_params = sum(p.numel() for p in model.parameters()) total_trainable_params = sum( p.numel() for p in model.parameters() if p.requires_grad ) print("Total Params:", number_h(total_params)) print("Total Trainable Params:", number_h(total_trainable_params)) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = ( RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) ) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = ( args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.01, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile( os.path.join(args.model_name_or_path, "optimizer.pt") ) and os.path.isfile(os.path.join(args.model_name_or_path, "scheduler.pt")): optimizer.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")) ) scheduler.load_state_dict( torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")) ) if args.fp16: try: except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use fp16 training." ) model, optimizer = amp.initialize( model, optimizer, opt_level=args.fp16_opt_level ) # Multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Training logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) best_acc, best_f1 = 0.0, 0.0 global_step, epochs_trained, steps_trained_in_current_epoch = 0, 0, 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to gobal_step of last saved checkpoint from model path global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0]) epochs_trained = global_step // ( len(train_dataloader) // args.gradient_accumulation_steps ) steps_trained_in_current_epoch = global_step % ( len(train_dataloader) // args.gradient_accumulation_steps ) logger.info( " Continuing training from checkpoint, will skip to saved global_step" ) logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info( " Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch, ) tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) max_acc, max_acc_f1, max_f1, max_f1_acc = 0.0, 0.0, 0.0, 0.0 for idx, _ in enumerate(train_iterator): tr_loss = 0.0 epoch_iterator = tqdm( train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0] ) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], "batch_id": batch[10], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) outputs, _ = model(**inputs) loss = outputs[0] wandb.log({"train/loss": loss}) if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() epoch_iterator.set_description( "loss {}".format( round(tr_loss * args.gradient_accumulation_steps / (step + 1), 4) ) ) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm ) else: torch.nn.utils.clip_grad_norm_( model.parameters(), args.max_grad_norm ) optimizer.step() scheduler.step() model.zero_grad() global_step += 1 if ( args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0 ): logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): results = evaluate(args, model, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss wandb.log({"eval/loss": loss_scalar}) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.local_rank in [-1, 0] and args.save_steps > 0 and args.do_eval: results = evaluate(args, model, tokenizer, checkpoint=str(idx)) logger.info("the results is {}".format(results)) if results["acc"] > max_acc: max_acc = results["acc"] max_acc_f1 = results["f1"] if results["f1"] > max_f1: max_f1 = results["f1"] max_f1_acc = results["acc"] if results["f1"] > best_f1: best_f1 = results["f1"] output_dir = os.path.join( args.output_dir, "seed-{}".format(args.seed), "checkpoint-{}-{}".format(idx, best_f1), ) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_{}.bin".format(idx)) ) logger.info("Saving model checkpoint to %s", output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt") ) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def mb_train(args, train_dataset, encoder_q, encoder_k, dataloader, tokenizer): """Train the model""" global memory_queue encoder_q.train() total_params = sum(p.numel() for p in encoder_q.parameters()) total_trainable_params = sum( p.numel() for p in encoder_q.parameters() if p.requires_grad ) print("Encoder Params:", number_h(total_params)) print("Encoder Trainable Params:", number_h(total_trainable_params)) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = ( RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) ) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = ( args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 ) else: t_total = ( len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs ) # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in encoder_q.named_parameters() if not any(nd in n for nd in no_decay) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in encoder_q.named_parameters() if any(nd in n for nd in no_decay) ], "weight_decay": 0.01, }, ] optimizer = AdamW( optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon ) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Training logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info( " Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size ) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) best_f1 = 0.0 global_step, epochs_trained, steps_trained_in_current_epoch = 0, 0, 0 tr_loss, logging_loss = 0.0, 0.0 encoder_q.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0], ) set_seed(args) max_acc, max_acc_f1, max_f1, max_f1_acc = 0.0, 0.0, 0.0, 0.0 for idx, _ in enumerate(train_iterator): tr_loss = 0.0 epoch_iterator = tqdm( train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0] ) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue encoder_q.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], "batch_id": batch[10], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids q_outputs, q_rep = encoder_q(**inputs) # Model outputs are always tuple in transformers (see doc). if args.n_gpu > 1: loss = loss.mean() if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps loss.backward() tr_loss += loss.item() epoch_iterator.set_description( "loss {}".format( round(tr_loss * args.gradient_accumulation_steps / (step + 1), 4) ) ) if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_( amp.master_params(optimizer), args.max_grad_norm ) else: torch.nn.utils.clip_grad_norm_( encoder_q.parameters(), args.max_grad_norm ) optimizer.step() scheduler.step() encoder_q.zero_grad() global_step += 1 if ( args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0 ): logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, encoder_q, tokenizer) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss wandb.log({"train/loss": loss_scalar}) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.local_rank in [-1, 0] and args.save_steps > 0 and args.do_eval: results = evaluate(args, encoder_q, tokenizer, checkpoint=str(idx)) logger.info("the results is {}".format(results)) if results["f1"] > max_f1: max_f1 = results["f1"] max_f1_acc = results["acc"] if results["acc"] > max_acc: max_acc = results["acc"] max_acc_f1 = results["f1"] if results["f1"] > best_f1: best_f1 = results["f1"] output_dir = os.path.join( args.output_dir, "seed-{}".format(args.seed), "checkpoint-{}-{}".format(idx, best_f1), ) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( encoder_q.module if hasattr(encoder_q, "module") else encoder_q ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save( args, os.path.join(output_dir, "training_{}.bin".format(idx)) ) logger.info("Saving model checkpoint to %s", output_dir) torch.save( optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt") ) torch.save( scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt") ) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break return_res = { "max_acc": max_acc, "max_acc_f1": max_acc_f1, "max_f1": max_f1, "max_f1_acc": max_f1_acc, } if args.do_ray: tune.report( accuracy=max_acc, max_acc_f1=max_acc_f1, f1=max_f1, max_f1_acc=max_f1_acc ) return global_step, tr_loss / global_step, return_res, output_dir def evaluate(args, model, tokenizer, checkpoint=None, prefix="", mode="dev"): eval_task_names = (args.task_name,) eval_outputs_dirs = (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples( args, eval_task, tokenizer, evaluate=True, mode=mode ) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly. eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size ) if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Evaluation logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds, out_label_ids = None, None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3], "nodes_index_mask": batch[4], "adj_metric": batch[5], "node_mask": batch[6], "sen2node": batch[7], "sentence_mask": batch[8], "sentence_length": batch[9], } if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet", "albert"] else None ) # XLM, DistilBERT, RoBERTa, and XLM-RoBERTa don't use segment_ids outputs, _ = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append( out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0 ) probs = preds eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) wandb.log( { "eval/acc": result["acc"], "eval/f1": result["f1"], "eval/acc_and_f1": result["acc_and_f1"], } ) return results def load_and_cache_examples( args, task, tokenizer, evaluate=False, mode="train", dataset_name="", rel="" ): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}_{}_{}".format( mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), str(dataset_name), str(rel), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if mode == "train": examples = processor.get_train_examples(args.with_relation, args.data_dir) elif mode == "dev": examples = processor.get_dev_examples(args.with_relation, args.data_dir) elif mode == "test": examples = processor.get_test_examples(args.with_relation, args.data_dir)
features = convert_examples_to_features(
0
2023-10-24 14:03:11+00:00
16k
deforum-studio/deforum
src/deforum/models/depth_models/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py
[ { "identifier": "DepthModel", "path": "src/deforum/models/depth_models/zoedepth/models/depth_model.py", "snippet": "class DepthModel(nn.Module):\n def __init__(self):\n super().__init__()\n self.device = 'cuda'\n \n def to(self, device) -> nn.Module:\n self.device = device\...
import itertools import torch import torch.nn as nn from ..depth_model import DepthModel from ..base_models.midas import MidasCore from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed from ..layers.dist_layers import ConditionalLogBinomial from ..layers.localbins_layers import (Projector, SeedBinRegressor, SeedBinRegressorUnnormed) from ..layers.patch_transformer import PatchTransformerEncoder from ..model_io import load_state_from_resource
11,156
max_depth = conf['max_depth'] seed_bin_regressor = self.seed_bin_regressors[bin_conf_name] _, seed_b_centers = seed_bin_regressor(x) if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': b_prev = (seed_b_centers - min_depth) / (max_depth - min_depth) else: b_prev = seed_b_centers prev_b_embedding = self.seed_projector(x) attractors = self.attractors[bin_conf_name] for projector, attractor, x in zip(self.projectors, attractors, x_blocks): b_embedding = projector(x) b, b_centers = attractor( b_embedding, b_prev, prev_b_embedding, interpolate=True) b_prev = b prev_b_embedding = b_embedding last = outconv_activation b_centers = nn.functional.interpolate( b_centers, last.shape[-2:], mode='bilinear', align_corners=True) b_embedding = nn.functional.interpolate( b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) clb = self.conditional_log_binomial[bin_conf_name] x = clb(last, b_embedding) # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor # print(x.shape, b_centers.shape) # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True) out = torch.sum(x * b_centers, dim=1, keepdim=True) output = dict(domain_logits=domain_logits, metric_depth=out) if return_final_centers or return_probs: output['bin_centers'] = b_centers if return_probs: output['probs'] = x return output def get_lr_params(self, lr): """ Learning rate configuration for different layers of the model Args: lr (float) : Base learning rate Returns: list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. """ param_conf = [] if self.train_midas: def get_rel_pos_params(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" in name: yield p def get_enc_params_except_rel_pos(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" not in name: yield p encoder_params = get_enc_params_except_rel_pos() rel_pos_params = get_rel_pos_params() midas_params = self.core.core.scratch.parameters() midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0 param_conf.extend([ {'params': encoder_params, 'lr': lr / self.encoder_lr_factor}, {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor}, {'params': midas_params, 'lr': lr / midas_lr_factor} ]) remaining_modules = [] for name, child in self.named_children(): if name != 'core': remaining_modules.append(child) remaining_params = itertools.chain( *[child.parameters() for child in remaining_modules]) param_conf.append({'params': remaining_params, 'lr': lr}) return param_conf def get_conf_parameters(self, conf_name): """ Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ params = [] for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): if bin_conf_name == conf_name: params += list(module.parameters()) return params def freeze_conf(self, conf_name): """ Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = False def unfreeze_conf(self, conf_name): """ Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = True def freeze_all_confs(self): """ Freezes all the parameters of all the ModuleDicts children """ for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): for p in module.parameters(): p.requires_grad = False @staticmethod def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Shariq Farooq Bhat class ZoeDepthNK(DepthModel): def __init__(self, core, bin_conf, bin_centers_type="softplus", bin_embedding_dim=128, n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, memory_efficient=False, train_midas=True, is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs): """ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts. Args: core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys: "name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float) The length of this list determines the number of metric heads. bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers. For "softplus", softplus activation is used and thus are unbounded. Defaults to "normed". bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128. n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1]. attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300. attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2. attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'. attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'. min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5. max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50. memory_efficient (bool, optional): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to False. train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True. is_midas_pretrained (bool, optional): Is "core" pretrained? Defaults to True. midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10. encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10. pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10. """ super().__init__() self.core = core self.bin_conf = bin_conf self.min_temp = min_temp self.max_temp = max_temp self.memory_efficient = memory_efficient self.train_midas = train_midas self.is_midas_pretrained = is_midas_pretrained self.midas_lr_factor = midas_lr_factor self.encoder_lr_factor = encoder_lr_factor self.pos_enc_lr_factor = pos_enc_lr_factor self.inverse_midas = inverse_midas N_MIDAS_OUT = 32 btlnck_features = self.core.output_channels[0] num_out_features = self.core.output_channels[1:] # self.scales = [16, 8, 4, 2] # spatial scale factors self.conv2 = nn.Conv2d( btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0) # Transformer classifier on the bottleneck self.patch_transformer = PatchTransformerEncoder( btlnck_features, 1, 128, use_class_token=True) self.mlp_classifier = nn.Sequential( nn.Linear(128, 128), nn.ReLU(), nn.Linear(128, 2) ) if bin_centers_type == "normed": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayer elif bin_centers_type == "softplus": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid1": SeedBinRegressorLayer = SeedBinRegressor Attractor = AttractorLayerUnnormed elif bin_centers_type == "hybrid2": SeedBinRegressorLayer = SeedBinRegressorUnnormed Attractor = AttractorLayer else: raise ValueError( "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'") self.bin_centers_type = bin_centers_type # We have bins for each bin conf. # Create a map (ModuleDict) of 'name' -> seed_bin_regressor self.seed_bin_regressors = nn.ModuleDict( {conf['name']: SeedBinRegressorLayer(btlnck_features, conf["n_bins"], mlp_dim=bin_embedding_dim // 2, min_depth=conf["min_depth"], max_depth=conf["max_depth"]) for conf in bin_conf} ) self.seed_projector = Projector( btlnck_features, bin_embedding_dim, mlp_dim=bin_embedding_dim // 2) self.projectors = nn.ModuleList([ Projector(num_out, bin_embedding_dim, mlp_dim=bin_embedding_dim // 2) for num_out in num_out_features ]) # Create a map (ModuleDict) of 'name' -> attractors (ModuleList) self.attractors = nn.ModuleDict( {conf['name']: nn.ModuleList([ Attractor(bin_embedding_dim, n_attractors[i], mlp_dim=bin_embedding_dim, alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type, memory_efficient=memory_efficient, min_depth=conf["min_depth"], max_depth=conf["max_depth"]) for i in range(len(n_attractors)) ]) for conf in bin_conf} ) last_in = N_MIDAS_OUT # conditional log binomial for each bin conf self.conditional_log_binomial = nn.ModuleDict( {conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp) for conf in bin_conf} ) def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs): """ Args: x (torch.Tensor): Input image tensor of shape (B, C, H, W). Assumes all images are from the same domain. return_final_centers (bool, optional): Whether to return the final centers of the attractors. Defaults to False. denorm (bool, optional): Whether to denormalize the input image. Defaults to False. return_probs (bool, optional): Whether to return the probabilities of the bins. Defaults to False. Returns: dict: Dictionary of outputs with keys: - "rel_depth": Relative depth map of shape (B, 1, H, W) - "metric_depth": Metric depth map of shape (B, 1, H, W) - "domain_logits": Domain logits of shape (B, 2) - "bin_centers": Bin centers of shape (B, N, H, W). Present only if return_final_centers is True - "probs": Bin probabilities of shape (B, N, H, W). Present only if return_probs is True """ b, c, h, w = x.shape self.orig_input_width = w self.orig_input_height = h rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True) outconv_activation = out[0] btlnck = out[1] x_blocks = out[2:] x_d0 = self.conv2(btlnck) x = x_d0 # Predict which path to take embedding = self.patch_transformer(x)[0] # N, E domain_logits = self.mlp_classifier(embedding) # N, 2 domain_vote = torch.softmax(domain_logits.sum( dim=0, keepdim=True), dim=-1) # 1, 2 # Get the path bin_conf_name = ["nyu", "kitti"][torch.argmax( domain_vote, dim=-1).squeeze().item()] try: conf = [c for c in self.bin_conf if c["name"] == bin_conf_name][0] except IndexError: raise ValueError( f"bin_conf_name {bin_conf_name} not found in bin_confs") min_depth = conf['min_depth'] max_depth = conf['max_depth'] seed_bin_regressor = self.seed_bin_regressors[bin_conf_name] _, seed_b_centers = seed_bin_regressor(x) if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2': b_prev = (seed_b_centers - min_depth) / (max_depth - min_depth) else: b_prev = seed_b_centers prev_b_embedding = self.seed_projector(x) attractors = self.attractors[bin_conf_name] for projector, attractor, x in zip(self.projectors, attractors, x_blocks): b_embedding = projector(x) b, b_centers = attractor( b_embedding, b_prev, prev_b_embedding, interpolate=True) b_prev = b prev_b_embedding = b_embedding last = outconv_activation b_centers = nn.functional.interpolate( b_centers, last.shape[-2:], mode='bilinear', align_corners=True) b_embedding = nn.functional.interpolate( b_embedding, last.shape[-2:], mode='bilinear', align_corners=True) clb = self.conditional_log_binomial[bin_conf_name] x = clb(last, b_embedding) # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor # print(x.shape, b_centers.shape) # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True) out = torch.sum(x * b_centers, dim=1, keepdim=True) output = dict(domain_logits=domain_logits, metric_depth=out) if return_final_centers or return_probs: output['bin_centers'] = b_centers if return_probs: output['probs'] = x return output def get_lr_params(self, lr): """ Learning rate configuration for different layers of the model Args: lr (float) : Base learning rate Returns: list : list of parameters to optimize and their learning rates, in the format required by torch optimizers. """ param_conf = [] if self.train_midas: def get_rel_pos_params(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" in name: yield p def get_enc_params_except_rel_pos(): for name, p in self.core.core.pretrained.named_parameters(): if "relative_position" not in name: yield p encoder_params = get_enc_params_except_rel_pos() rel_pos_params = get_rel_pos_params() midas_params = self.core.core.scratch.parameters() midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0 param_conf.extend([ {'params': encoder_params, 'lr': lr / self.encoder_lr_factor}, {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor}, {'params': midas_params, 'lr': lr / midas_lr_factor} ]) remaining_modules = [] for name, child in self.named_children(): if name != 'core': remaining_modules.append(child) remaining_params = itertools.chain( *[child.parameters() for child in remaining_modules]) param_conf.append({'params': remaining_params, 'lr': lr}) return param_conf def get_conf_parameters(self, conf_name): """ Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ params = [] for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): if bin_conf_name == conf_name: params += list(module.parameters()) return params def freeze_conf(self, conf_name): """ Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = False def unfreeze_conf(self, conf_name): """ Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration """ for p in self.get_conf_parameters(conf_name): p.requires_grad = True def freeze_all_confs(self): """ Freezes all the parameters of all the ModuleDicts children """ for name, child in self.named_children(): if isinstance(child, nn.ModuleDict): for bin_conf_name, module in child.items(): for p in module.parameters(): p.requires_grad = False @staticmethod def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas,
1
2023-10-28 14:23:27+00:00
16k
samholt/ActiveObservingInContinuous-timeControl
mppi_dataset_collector.py
[ { "identifier": "dotdict", "path": "config.py", "snippet": "class dotdict(dict):\n \"\"\"dot.notation access to dictionary attributes\"\"\"\n\n __getattr__ = dict.get\n __setattr__ = dict.__setitem__\n __delattr__ = dict.__delitem__" }, { "identifier": "create_env", "path": "over...
import logging import os import time import imageio import numpy as np import torch import torch.multiprocessing as multiprocessing from functools import partial from tqdm import tqdm from config import dotdict from overlay import create_env, setup_logger, start_virtual_display, step_env from planners.mppi import MPPI from planners.mppi_active_observing import MPPIActiveObserving from oracle import pendulum_dynamics_dt from oracle import cartpole_dynamics_dt from oracle import acrobot_dynamics_dt from oracle import cancer_dynamics_dt from pathlib import Path from config import get_config, seed_all
11,134
nu, device=device, dtype=dtype ) * (gamma - off_diagonal) logger.info(mppi_noise_sigma) mppi_lambda_ = 1.0 random_action_noise = config.collect_expert_random_action_noise if model_name == "random": def dynamics(state, perturbed_action): pass elif model_name == "oracle": oracle_sigma = config.observation_noise if env_name == "oderl-pendulum": dynamics_oracle = pendulum_dynamics_dt elif env_name == "oderl-cartpole": dynamics_oracle = cartpole_dynamics_dt elif env_name == "oderl-acrobot": dynamics_oracle = acrobot_dynamics_dt elif env_name == "oderl-cancer": dynamics_oracle = cancer_dynamics_dt def dynamics(*args, **kwargs): state_mu = dynamics_oracle(*args, **kwargs) return state_mu, torch.ones_like(state_mu) * oracle_sigma dynamics = partial(dynamics, friction=config.friction) def running_cost(state, action): if state_constraint: reward = env.diff_obs_reward_( state, exp_reward=False, state_constraint=state_constraint ) + env.diff_ac_reward_(action) elif change_goal: global change_goal_flipped reward = env.diff_obs_reward_( state, exp_reward=False, change_goal=change_goal, change_goal_flipped=change_goal_flipped ) + env.diff_ac_reward_(action) else: reward = env.diff_obs_reward_(state, exp_reward=False) + env.diff_ac_reward_(action) cost = -reward return cost if config.planner == "mppi": mppi_gym = MPPI( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, ) elif config.planner == "mppi_active_observing": mppi_gym = MPPIActiveObserving( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, observing_cost=config.observing_cost, sampling_policy=config.sampling_policy, observing_var_threshold=config.observing_var_threshold, limit_actions_to_only_positive=limit_actions_to_only_positive, dt=dt, ) if save_video: start_virtual_display() videos_folder = "./logs/new_videos" Path(videos_folder).mkdir(parents=True, exist_ok=True) filename = f"{videos_folder}/{env_name}_{model_name}_{uniq}.mp4" fps = int(1 / dt) def loop(): s0 = [] a0 = [] sn = [] ts = [] ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] it = 0 total_reward = 0 env.reset() start_time = time.perf_counter() mppi_gym.reset() while it < iter_: if change_goal_flipped_iter_ < it: change_goal_flipped = True state = env.get_obs() s0.append(state) command_start = time.perf_counter() if model_name != "random": action, costs_std = mppi_gym.command(state) if random_action_noise is not None: action += ( (torch.rand(nu, device=device) - 0.5) * 2.0 * env.action_space.high[0] ) * random_action_noise action = action.clip(min=ACTION_LOW, max=ACTION_HIGH) action = action.view(nu) else: action = torch.from_numpy(env.action_space.sample()) elapsed = time.perf_counter() - command_start
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") logger = logging.getLogger() def inner_mppi_with_model_collect_data( seed, model_name, env_name, roll_outs=1000, time_steps=30, lambda_=1.0, sigma=1.0, dt=0.05, model_seed=11, save_video=False, state_constraint=False, change_goal=False, encode_obs_time=False, model=None, uniq=None, log_debug=False, episodes_per_sampler_task=10, config={}, iter_=200, change_goal_flipped_iter_=False, ts_grid="exp", intermediate_run=False, ): config = dotdict(config) env = create_env(env_name, dt=dt, ts_grid=ts_grid, friction=config.friction) ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] if env_name == "oderl-cancer": limit_actions_to_only_positive = True else: limit_actions_to_only_positive = False nx = env.get_obs().shape[0] nu = env.action_space.shape[0] dtype = torch.float32 gamma = sigma**2 off_diagonal = 0.5 * gamma mppi_noise_sigma = torch.ones((nu, nu), device=device, dtype=dtype) * off_diagonal + torch.eye( nu, device=device, dtype=dtype ) * (gamma - off_diagonal) logger.info(mppi_noise_sigma) mppi_lambda_ = 1.0 random_action_noise = config.collect_expert_random_action_noise if model_name == "random": def dynamics(state, perturbed_action): pass elif model_name == "oracle": oracle_sigma = config.observation_noise if env_name == "oderl-pendulum": dynamics_oracle = pendulum_dynamics_dt elif env_name == "oderl-cartpole": dynamics_oracle = cartpole_dynamics_dt elif env_name == "oderl-acrobot": dynamics_oracle = acrobot_dynamics_dt elif env_name == "oderl-cancer": dynamics_oracle = cancer_dynamics_dt def dynamics(*args, **kwargs): state_mu = dynamics_oracle(*args, **kwargs) return state_mu, torch.ones_like(state_mu) * oracle_sigma dynamics = partial(dynamics, friction=config.friction) def running_cost(state, action): if state_constraint: reward = env.diff_obs_reward_( state, exp_reward=False, state_constraint=state_constraint ) + env.diff_ac_reward_(action) elif change_goal: global change_goal_flipped reward = env.diff_obs_reward_( state, exp_reward=False, change_goal=change_goal, change_goal_flipped=change_goal_flipped ) + env.diff_ac_reward_(action) else: reward = env.diff_obs_reward_(state, exp_reward=False) + env.diff_ac_reward_(action) cost = -reward return cost if config.planner == "mppi": mppi_gym = MPPI( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, ) elif config.planner == "mppi_active_observing": mppi_gym = MPPIActiveObserving( dynamics, running_cost, nx, mppi_noise_sigma, num_samples=roll_outs, horizon=time_steps, device=device, lambda_=mppi_lambda_, u_min=torch.tensor(ACTION_LOW), u_max=torch.tensor(ACTION_HIGH), u_scale=ACTION_HIGH, observing_cost=config.observing_cost, sampling_policy=config.sampling_policy, observing_var_threshold=config.observing_var_threshold, limit_actions_to_only_positive=limit_actions_to_only_positive, dt=dt, ) if save_video: start_virtual_display() videos_folder = "./logs/new_videos" Path(videos_folder).mkdir(parents=True, exist_ok=True) filename = f"{videos_folder}/{env_name}_{model_name}_{uniq}.mp4" fps = int(1 / dt) def loop(): s0 = [] a0 = [] sn = [] ts = [] ACTION_LOW = env.action_space.low[0] ACTION_HIGH = env.action_space.high[0] it = 0 total_reward = 0 env.reset() start_time = time.perf_counter() mppi_gym.reset() while it < iter_: if change_goal_flipped_iter_ < it: change_goal_flipped = True state = env.get_obs() s0.append(state) command_start = time.perf_counter() if model_name != "random": action, costs_std = mppi_gym.command(state) if random_action_noise is not None: action += ( (torch.rand(nu, device=device) - 0.5) * 2.0 * env.action_space.high[0] ) * random_action_noise action = action.clip(min=ACTION_LOW, max=ACTION_HIGH) action = action.view(nu) else: action = torch.from_numpy(env.action_space.sample()) elapsed = time.perf_counter() - command_start
state, reward, done, tsn = step_env(env, action.detach().cpu().numpy(), obs_noise=config.observation_noise)
4
2023-10-24 16:19:14+00:00
16k
s1tools/s1-etad
s1etad/_jupyter_support.py
[ { "identifier": "Sentinel1Etad", "path": "s1etad/product.py", "snippet": "class Sentinel1Etad:\n \"\"\"Sentinel-1 ETAD product.\n\n Class to decode and access the elements of the Sentinel ETAD product\n which specification is governed by ETAD-DLR-PS-0014.\n\n The index operator [] (implement...
from .product import Sentinel1Etad, Sentinel1EtadSwath, Sentinel1EtadBurst
13,474
# -*- coding: utf-8 -*- def _sentinel1_etad_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() plist = obj.s1_product_list() if isinstance(plist, str): plist = [plist] p.text(f"Number of Sentinel-1 slices: {len(plist)}") p.break_() with p.group(2, "Sentinel-1 products list:"): for name in plist: p.break_() p.text(name) p.break_() p.text(f"Number of swaths: {obj.number_of_swath}") p.break_() p.text("Swath list: {}".format(", ".join(obj.swath_list))) p.break_() with p.group(2, "Azimuth time:"): p.break_() p.text(f"min: {obj.min_azimuth_time}") p.break_() p.text(f"max: {obj.max_azimuth_time}") p.break_() with p.group(2, "Range time:"): p.break_() p.text(f"min: {obj.min_range_time}") p.break_() p.text(f"max: {obj.max_range_time}") p.break_() with p.group(2, "Grid sampling:"): for key, value in obj.grid_sampling.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Grid spacing:"): for key, value in obj.grid_spacing.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Processing settings:"): for key, value in obj.processing_setting().items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_swath_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Number of bursts: {obj.number_of_burst}") p.break_() p.text("Burst list: " + str(obj.burst_list)) p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_burst_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Burst index: {obj.burst_index}") p.break_() p.text(f"Shape: ({obj.lines}, {obj.samples})") p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _register_jupyter_formatters(): try: ipy = get_ipython() # noqa except NameError: return False else: formatter = ipy.display_formatter.formatters["text/plain"] formatter.for_type(Sentinel1Etad, _sentinel1_etad_repr_pretty_) formatter.for_type( Sentinel1EtadSwath, _sentinel1_etad_swath_repr_pretty_ ) formatter.for_type(
# -*- coding: utf-8 -*- def _sentinel1_etad_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() plist = obj.s1_product_list() if isinstance(plist, str): plist = [plist] p.text(f"Number of Sentinel-1 slices: {len(plist)}") p.break_() with p.group(2, "Sentinel-1 products list:"): for name in plist: p.break_() p.text(name) p.break_() p.text(f"Number of swaths: {obj.number_of_swath}") p.break_() p.text("Swath list: {}".format(", ".join(obj.swath_list))) p.break_() with p.group(2, "Azimuth time:"): p.break_() p.text(f"min: {obj.min_azimuth_time}") p.break_() p.text(f"max: {obj.max_azimuth_time}") p.break_() with p.group(2, "Range time:"): p.break_() p.text(f"min: {obj.min_range_time}") p.break_() p.text(f"max: {obj.max_range_time}") p.break_() with p.group(2, "Grid sampling:"): for key, value in obj.grid_sampling.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Grid spacing:"): for key, value in obj.grid_spacing.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Processing settings:"): for key, value in obj.processing_setting().items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_swath_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Number of bursts: {obj.number_of_burst}") p.break_() p.text("Burst list: " + str(obj.burst_list)) p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _sentinel1_etad_burst_repr_pretty_(obj, p, cycle): if cycle: p.text(repr(obj)) else: p.text(repr(obj)) p.break_() p.text(f"Swaths ID: {obj.swath_id}") p.break_() p.text(f"Burst index: {obj.burst_index}") p.break_() p.text(f"Shape: ({obj.lines}, {obj.samples})") p.break_() with p.group(2, "Sampling start:"): for key, value in obj.sampling_start.items(): p.break_() p.text(f"{key}: {value}") p.break_() with p.group(2, "Sampling:"): for key, value in obj.sampling.items(): p.break_() p.text(f"{key}: {value}") def _register_jupyter_formatters(): try: ipy = get_ipython() # noqa except NameError: return False else: formatter = ipy.display_formatter.formatters["text/plain"] formatter.for_type(Sentinel1Etad, _sentinel1_etad_repr_pretty_) formatter.for_type( Sentinel1EtadSwath, _sentinel1_etad_swath_repr_pretty_ ) formatter.for_type(
Sentinel1EtadBurst, _sentinel1_etad_burst_repr_pretty_
2
2023-10-27 13:47:30+00:00
16k
ifrit98/storage-subnet
storage/validator/store.py
[ { "identifier": "EventSchema", "path": "storage/validator/event.py", "snippet": "class EventSchema:\n task_name: str # Task type, e.g. 'store', 'challenge', 'retrieve' 'broadcast'\n successful: List[bool] # List of whether or not the task was successful or not\n completion_times: List[float] ...
import os import sys import copy import time import torch import base64 import typing import asyncio import aioredis import bittensor as bt import websocket from pprint import pformat from pyinstrument import Profiler from Crypto.Random import get_random_bytes, random from dataclasses import asdict from storage.validator.event import EventSchema from storage import protocol from storage.shared.ecc import ( hash_data, setup_CRS, ecc_point_to_hex, ) from storage.shared.utils import b64_encode from storage.validator.utils import ( make_random_file, compute_chunk_distribution_mut_exclusive_numpy_reuse_uids, ) from storage.validator.encryption import encrypt_data from storage.validator.verify import verify_store_with_seed from storage.validator.reward import apply_reward_scores from storage.validator.database import ( add_metadata_to_hotkey, store_chunk_metadata, store_file_chunk_mapping_ordered, get_ordered_metadata, hotkey_at_capacity, ) from storage.validator.bonding import update_statistics from .reward import create_reward_vector from .network import ping_and_retry_uids, compute_and_ping_chunks, reroll_distribution from .utils import compute_chunk_distribution_mut_exclusive_numpy_reuse_uids
10,879
rewards=[], moving_averaged_scores=[], ) g, h = setup_CRS(curve=self.config.neuron.curve) bt.logging.debug(f"type(chunk): {type(chunk)}") bt.logging.debug(f"chunk: {chunk[:100]}") chunk = chunk.encode("utf-8") if isinstance(chunk, str) else chunk b64_encoded_chunk = await asyncio.to_thread(base64.b64encode, chunk) b64_encoded_chunk = b64_encoded_chunk.decode("utf-8") bt.logging.debug(f"b64_encoded_chunk: {b64_encoded_chunk[:100]}") random_seed = get_random_bytes(32).hex() synapse = protocol.Store( encrypted_data=b64_encoded_chunk, curve=self.config.neuron.curve, g=ecc_point_to_hex(g), h=ecc_point_to_hex(h), seed=random_seed, ) uids = [ uid for uid in uids if not await hotkey_at_capacity(self.metagraph.hotkeys[uid], self.database) ] axons = [self.metagraph.axons[uid] for uid in uids] responses = await self.dendrite( axons, synapse, deserialize=False, timeout=self.config.neuron.store_timeout, ) # Compute the rewards for the responses given proc time. rewards: torch.FloatTensor = torch.zeros( len(responses), dtype=torch.float32 ).to(self.device) async def success(hotkey, idx, uid, response): bt.logging.debug(f"Stored data in database with key: {hotkey}") failed_uids = [] def failure(uid): failed_uids.append(uid) await create_reward_vector( self, synapse, rewards, uids, responses, event, success, failure ) event.rewards.extend(rewards.tolist()) apply_reward_scores( self, uids, responses, rewards, timeout=self.config.neuron.store_timeout, mode=self.config.neuron.reward_mode, ) bt.logging.debug(f"Updated reward scores: {rewards.tolist()}") # Determine the best UID based on rewards if event.rewards: best_index = max(range(len(event.rewards)), key=event.rewards.__getitem__) event.best_uid = event.uids[best_index] event.best_hotkey = self.metagraph.hotkeys[event.best_uid] chunk_size = sys.getsizeof(chunk) # chunk size in bytes bt.logging.debug(f"chunk size: {chunk_size}") await store_chunk_metadata( full_hash, chunk_hash, [self.metagraph.hotkeys[uid] for uid in uids], chunk_size, # this should be len(chunk) but we need to fix the chunking self.database, ) return responses, b64_encoded_chunk, random_seed async def handle_uid_operations( uid, response, b64_encoded_chunk, random_seed, chunk_hash, chunk_size ): ss = time.time() start = time.time() # Offload the CPU-intensive verification to a separate thread verified = await asyncio.to_thread( verify_store_with_seed, response, b64_encoded_chunk, random_seed ) end = time.time() bt.logging.debug(f"verify_store_with_seed time for uid {uid} : {end-start}") if verified: # Prepare storage for the data for particular miner response_storage = { "prev_seed": response.seed, "size": chunk_size, "encryption_payload": encryption_payload, } start = time.time() # Store in the database according to the data hash and the miner hotkey await add_metadata_to_hotkey( self.metagraph.hotkeys[uid], chunk_hash, response_storage, # seed + size + encryption keys self.database, ) end = time.time() bt.logging.debug( f"Stored data in database for uid: {uid} | {str(chunk_hash)}" ) else: bt.logging.error(f"Failed to verify store commitment from UID: {uid}") # Update the storage statistics
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. async def store_encrypted_data( self, encrypted_data: typing.Union[bytes, str], encryption_payload: dict, exclude_uids: typing.List[str] = [], ttl: int = 0, k: int = None, max_retries: int = 3, ) -> bool: event = EventSchema( task_name="Store", successful=[], completion_times=[], task_status_messages=[], task_status_codes=[], block=self.subtensor.get_current_block(), uids=[], step_length=0.0, best_uid="", best_hotkey="", rewards=[], moving_averaged_scores=[], ) start_time = time.time() encrypted_data = ( encrypted_data.encode("utf-8") if isinstance(encrypted_data, str) else encrypted_data ) # Setup CRS for this round of validation g, h = setup_CRS(curve=self.config.neuron.curve) # Hash the data data_hash = hash_data(encrypted_data) # Convert to base64 for compactness # TODO: Don't do this if it's already b64 encoded. (Check first) b64_encrypted_data = base64.b64encode(encrypted_data).decode("utf-8") if self.config.neuron.verbose: bt.logging.debug(f"storing user data: {encrypted_data[:12]}...") bt.logging.debug(f"storing user hash: {data_hash}") bt.logging.debug(f"b64 encrypted data: {b64_encrypted_data[:12]}...") synapse = protocol.Store( encrypted_data=b64_encrypted_data, curve=self.config.neuron.curve, g=ecc_point_to_hex(g), h=ecc_point_to_hex(h), seed=get_random_bytes(32).hex(), # 256-bit seed ) # Select subset of miners to query (e.g. redunancy factor of N) uids, _ = await ping_and_retry_uids( self, k=k or self.config.neuron.store_redundancy, max_retries=max_retries, exclude_uids=exclude_uids, ) bt.logging.debug(f"store_encrypted_data() uids: {uids}") axons = [self.metagraph.axons[uid] for uid in uids] failed_uids = [None] retries = 0 while len(failed_uids) and retries < max_retries: if failed_uids == [None]: # initial loop failed_uids = [] # Broadcast the query to selected miners on the network. responses = await self.dendrite( axons, synapse, deserialize=False, timeout=self.config.neuron.store_timeout, ) # Compute the rewards for the responses given proc time. rewards: torch.FloatTensor = torch.zeros( len(responses), dtype=torch.float32 ).to(self.device) async def success(hotkey, idx, uid, response): # Prepare storage for the data for particular miner response_storage = { "prev_seed": synapse.seed, "size": sys.getsizeof(encrypted_data), # in bytes, not len(data) "encryption_payload": encryption_payload, } bt.logging.trace(f"Storing UID {uid} data {pformat(response_storage)}") # Store in the database according to the data hash and the miner hotkey await add_metadata_to_hotkey( hotkey, data_hash, response_storage, self.database, ) if ttl > 0: await self.database.expire( f"{hotkey}:{data_hash}", ttl, ) bt.logging.debug( f"Stored data in database with hotkey: {hotkey} | uid {uid} | {data_hash}" ) def failure(uid): failed_uids.append(uid) await create_reward_vector( self, synapse, rewards, uids, responses, event, success, failure ) event.rewards.extend(rewards.tolist()) if self.config.neuron.verbose and self.config.neuron.log_responses: bt.logging.debug(f"Store responses round: {retries}") [ bt.logging.debug(f"Store response: {response.dendrite.dict()}") for response in responses ] bt.logging.trace(f"Applying store rewards for retry: {retries}") apply_reward_scores( self, uids, responses, rewards, timeout=self.config.neuron.store_timeout, mode=self.config.neuron.reward_mode, ) # Get a new set of UIDs to query for those left behind if failed_uids != []: bt.logging.trace(f"Failed to store on uids: {failed_uids}") uids, _ = await ping_and_retry_uids( self, k=len(failed_uids), exclude_uids=exclude_uids ) bt.logging.trace(f"Retrying with new uids: {uids}") axons = [self.metagraph.axons[uid] for uid in uids] failed_uids = [] # reset failed uids for next round retries += 1 # Calculate step length end_time = time.time() event.step_length = end_time - start_time # Determine the best UID based on rewards if event.rewards: best_index = max(range(len(event.rewards)), key=event.rewards.__getitem__) event.best_uid = event.uids[best_index] event.best_hotkey = self.metagraph.hotkeys[event.best_uid] # Update event log with moving averaged scores event.moving_averaged_scores = self.moving_averaged_scores.tolist() return event async def store_random_data(self): """ Stores data on the network and ensures it is correctly committed by the miners. Parameters: - data (bytes, optional): The data to be stored. - wallet (bt.wallet, optional): The wallet to be used for encrypting the data. Returns: - The status of the data storage operation. """ # Setup CRS for this round of validation g, h = setup_CRS(curve=self.config.neuron.curve) # Make a random bytes file to test the miner if none provided data = make_random_file(maxsize=self.config.neuron.maxsize) bt.logging.debug(f"Random store data size: {sys.getsizeof(data)}") # Encrypt the data # TODO: create and use a throwaway wallet (never decrypable) encrypted_data, encryption_payload = encrypt_data(data, self.encryption_wallet) return await store_encrypted_data( self, encrypted_data, encryption_payload, k=self.config.neuron.store_sample_size, ttl=self.config.neuron.data_ttl, ) async def store_broadband( self, encrypted_data, encryption_payload, R=3, k=10, data_hash=None, exclude_uids=None, ): """ Asynchronously stores encrypted data across a distributed network by splitting it into chunks and assigning these chunks to various miners for storage. This method ensures redundancy and efficient data distribution while handling network requests concurrently. The process includes chunking the data, selecting miners for storage, and verifying the integrity of stored data through response validation. Parameters: encrypted_data (bytes): The encrypted data to be stored across the network. encryption_payload (dict): Additional payload information required for encryption. R (int, optional): The redundancy factor, denoting how many times each chunk is replicated. Default is 3. k (int, optional): The number of miners to query for each chunk. Default is 10. data_hash (str, optional): The hash of the data to be stored. If not provided, compute it. Default is None. exclude_uids: (list of int, optional): A list of UIDs to exclude from the storage process. Default is None. Returns: str: The hash of the full data, representing its unique identifier in the network. Raises: Exception: If the process of creating initial distributions fails after multiple retries. Note: - Uses a semaphore to limit the number of concurrent network requests. - Employs a retry mechanism for handling network and miner availability issues. - Logs various stages of the process for debugging and monitoring purposes. """ if self.config.neuron.profile: # Create a profiler instance profiler = Profiler() profiler.start() semaphore = asyncio.Semaphore(self.config.neuron.semaphore_size) async def store_chunk_group(chunk_hash, chunk, uids): event = EventSchema( task_name="Store", successful=[], completion_times=[], task_status_messages=[], task_status_codes=[], block=self.subtensor.get_current_block(), uids=[], step_length=0.0, best_uid="", best_hotkey="", rewards=[], moving_averaged_scores=[], ) g, h = setup_CRS(curve=self.config.neuron.curve) bt.logging.debug(f"type(chunk): {type(chunk)}") bt.logging.debug(f"chunk: {chunk[:100]}") chunk = chunk.encode("utf-8") if isinstance(chunk, str) else chunk b64_encoded_chunk = await asyncio.to_thread(base64.b64encode, chunk) b64_encoded_chunk = b64_encoded_chunk.decode("utf-8") bt.logging.debug(f"b64_encoded_chunk: {b64_encoded_chunk[:100]}") random_seed = get_random_bytes(32).hex() synapse = protocol.Store( encrypted_data=b64_encoded_chunk, curve=self.config.neuron.curve, g=ecc_point_to_hex(g), h=ecc_point_to_hex(h), seed=random_seed, ) uids = [ uid for uid in uids if not await hotkey_at_capacity(self.metagraph.hotkeys[uid], self.database) ] axons = [self.metagraph.axons[uid] for uid in uids] responses = await self.dendrite( axons, synapse, deserialize=False, timeout=self.config.neuron.store_timeout, ) # Compute the rewards for the responses given proc time. rewards: torch.FloatTensor = torch.zeros( len(responses), dtype=torch.float32 ).to(self.device) async def success(hotkey, idx, uid, response): bt.logging.debug(f"Stored data in database with key: {hotkey}") failed_uids = [] def failure(uid): failed_uids.append(uid) await create_reward_vector( self, synapse, rewards, uids, responses, event, success, failure ) event.rewards.extend(rewards.tolist()) apply_reward_scores( self, uids, responses, rewards, timeout=self.config.neuron.store_timeout, mode=self.config.neuron.reward_mode, ) bt.logging.debug(f"Updated reward scores: {rewards.tolist()}") # Determine the best UID based on rewards if event.rewards: best_index = max(range(len(event.rewards)), key=event.rewards.__getitem__) event.best_uid = event.uids[best_index] event.best_hotkey = self.metagraph.hotkeys[event.best_uid] chunk_size = sys.getsizeof(chunk) # chunk size in bytes bt.logging.debug(f"chunk size: {chunk_size}") await store_chunk_metadata( full_hash, chunk_hash, [self.metagraph.hotkeys[uid] for uid in uids], chunk_size, # this should be len(chunk) but we need to fix the chunking self.database, ) return responses, b64_encoded_chunk, random_seed async def handle_uid_operations( uid, response, b64_encoded_chunk, random_seed, chunk_hash, chunk_size ): ss = time.time() start = time.time() # Offload the CPU-intensive verification to a separate thread verified = await asyncio.to_thread( verify_store_with_seed, response, b64_encoded_chunk, random_seed ) end = time.time() bt.logging.debug(f"verify_store_with_seed time for uid {uid} : {end-start}") if verified: # Prepare storage for the data for particular miner response_storage = { "prev_seed": response.seed, "size": chunk_size, "encryption_payload": encryption_payload, } start = time.time() # Store in the database according to the data hash and the miner hotkey await add_metadata_to_hotkey( self.metagraph.hotkeys[uid], chunk_hash, response_storage, # seed + size + encryption keys self.database, ) end = time.time() bt.logging.debug( f"Stored data in database for uid: {uid} | {str(chunk_hash)}" ) else: bt.logging.error(f"Failed to verify store commitment from UID: {uid}") # Update the storage statistics
await update_statistics(
16
2023-10-26 18:54:47+00:00
16k
Eclectic-Sheep/sheeprlhf
sheeprlhf/task/train/ppo.py
[ { "identifier": "PPOAgent", "path": "sheeprlhf/agent/ppo.py", "snippet": "class PPOAgent:\n \"\"\"Agent model for PPO training.\"\"\"\n\n _reference: ActorModel\n _reward: RewardModel\n _finetune_mode: FINETUNE_MODE\n _actor: Optional[ActorModel] = None\n _critic: Optional[CriticModel]...
import copy import time import torch from pathlib import Path from typing import Dict from lightning import Fabric from torch.utils.data import DataLoader from tqdm import tqdm from transformers import GenerationConfig, PreTrainedTokenizer from sheeprlhf.agent.ppo import PPOAgent from sheeprlhf.data.base import TextDataset from sheeprlhf.data.collate import LeftPadCollate from sheeprlhf.loss.ppo import policy_loss, value_loss from sheeprlhf.model.actor import ActorModel from sheeprlhf.structure.data import DataConfig from sheeprlhf.structure.generation import GenConfig from sheeprlhf.structure.model import ModelConfig from sheeprlhf.structure.task import PPOConfig from sheeprlhf.utils.data import prepare_generation_config, validate_dataset from sheeprlhf.utils.helper import create_tensorboard_logger, get_log_dir, log_text from sheeprlhf.utils.hydra import instantiate_from_config from sheeprlhf.utils.metric import PPOMetricManager from sheeprlhf.utils.model import compute_grad_norm, prepare_optimizer_parameters from sheeprlhf.utils.ppo import AdaptiveKLController, FixedKLController, collect_rollout, masked_normalize from sheeprlhf.utils.registry import register_task
11,059
agent.setup_finetuning() agent.actor = fabric.setup_module(agent.actor) agent.critic = fabric.setup_module(agent.critic) if not agent.share_critic_reward: agent.reward = fabric.setup_module(agent.reward) if not agent.share_actor_critic and not agent.lora_enabled: agent.reference = fabric.setup_module(agent.reference) # Setup Generation Configs generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=gen_cfg, fabric=fabric, ) eval_gen_cfg = copy.deepcopy(gen_cfg) eval_gen_cfg.do_sample = False eval_generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=eval_gen_cfg, fabric=fabric, ) # Setup Optimizer Scheduler fabric models actor_trainable_params, _, _ = prepare_optimizer_parameters(agent.actor, weight_decay=optim_cfg.weight_decay) actor_optimizer = instantiate_from_config( optim_cfg, params=actor_trainable_params, _convert_="partial", ) actor_optimizer = fabric.setup_optimizers(actor_optimizer) critic_trainable_params, _, _ = prepare_optimizer_parameters(agent.critic, weight_decay=optim_cfg.weight_decay) critic_optimizer = instantiate_from_config( optim_cfg, params=critic_trainable_params, _convert_="partial", ) critic_optimizer = fabric.setup_optimizers(critic_optimizer) if fabric.is_global_zero: gen_text, score = generate( agent=agent, tokenizer=tokenizer, generation_config=eval_generation_config, example_prompt=example_prompt, device=fabric.device, ) log_text(fabric, gen_text, "info/example_sample", step=0) fabric.log("info/example_last_reward", score, step=0) num_training_steps = 2 if cfg.dry_run else task_cfg.epochs * len(train_dataloader) # KL Controller if task_cfg.adaptive_kl_coeff: kl_controller = AdaptiveKLController( init_kl_coef=task_cfg.init_kl_coeff, target=task_cfg.target_kl_coeff, kl_horizon=num_training_steps ) else: kl_controller = FixedKLController(kl_coeff=task_cfg.init_kl_coeff) fabric.print("Model Checkpoint interval: ", task_cfg.save_interval, "steps") fabric.print("Model Evaluation interval: ", task_cfg.eval_interval, "steps") iterator = tqdm(range(num_training_steps), disable=not fabric.is_global_zero) data_iterator = iter(train_dataloader) agent.reward.eval() for k in iterator: # Setup counters and data if k % len(train_dataloader) == 0 or data_iterator is None: data_iterator = iter(train_dataloader) is_accumulating = (k) % task_cfg.gradient_accumulation_steps != 0 last_step = k == num_training_steps - 1 # Setup batch data batch = next(data_iterator) max_prompt_length = batch["prompt_input_ids"].shape[1] agent.actor.eval() agent.critic.eval() t0 = time.time() rollout, sample_output = collect_rollout( batch=batch, agent=agent, generation_config=generation_config, kl_controller=kl_controller, task_cfg=task_cfg, tokenizer=tokenizer, fabric=fabric, metrics=metrics, ) time_rollout = time.time() - t0 rollout_dataloader = DataLoader( rollout, batch_size=task_cfg.micro_batch_size, shuffle=True, collate_fn=lambda x: x ) rollout_dataloader = fabric.setup_dataloaders(rollout_dataloader, use_distributed_sampler=False) agent.actor.train() agent.critic.train() for _ in range(task_cfg.ppo_epochs): accumulator_counter = 0 for micro_batch in rollout_dataloader: is_accumulating = (accumulator_counter) % task_cfg.gradient_accumulation_steps != 0 generated_data = { "input_ids": micro_batch["input_ids"], "attention_mask": micro_batch["attention_mask"], } old_log_probs = micro_batch["actor_log_probs"] old_values = micro_batch["values"] advantages = micro_batch["advantages"] returns = micro_batch["returns"] start_token_idx = max_prompt_length - 1 action_mask = micro_batch["attention_mask"][:, start_token_idx:-1].int() if task_cfg.normalize_advantages: advantages = masked_normalize(advantages, action_mask) with fabric.no_backward_sync(agent.actor, enabled=is_accumulating): log_probs = agent.actor(**generated_data)[:, start_token_idx:] # (B, num_new_tokens)
@torch.no_grad() def generate( # noqa: D103 agent: PPOAgent, tokenizer: PreTrainedTokenizer, generation_config: GenerationConfig, example_prompt: Dict[str, torch.Tensor], device: torch.device, ): generated_input_ids = agent.actor.module.generate( input_ids=example_prompt["input_ids"].to(device), attention_mask=example_prompt["attention_mask"].to(device), generation_config=generation_config, use_cache=True, ) prompt_length = example_prompt["input_ids"].shape[1] generated_attention_mask = (generated_input_ids != generation_config.pad_token_id).int() generated_data = {"input_ids": generated_input_ids, "attention_mask": generated_attention_mask} reward = agent.reward(**generated_data)[:, prompt_length:] action_mask = (generated_input_ids != generation_config.pad_token_id).int()[:, prompt_length:] last_token_idx = torch.argmax(torch.cumsum(action_mask, dim=1) * action_mask, dim=1, keepdim=True) reward_score = torch.gather(reward, dim=-1, index=last_token_idx).squeeze(-1) return tokenizer.decode(generated_input_ids[0], skip_special_tokens=True), reward_score.item() @register_task() def main(fabric: Fabric, cfg: Dict): # noqa: D103 task_cfg = PPOConfig(**cfg.task) model_cfg = ModelConfig(**cfg.model) data_cfg = DataConfig(**cfg.data) gen_cfg = GenConfig(**cfg.generation) optim_cfg = cfg.optim fabric.seed_everything(cfg.seed + fabric.global_rank) # Create TensorBoardLogger. This will create the logger only on the # rank-0 process logger = create_tensorboard_logger(fabric, cfg, override_log_level=True) if logger and fabric.is_global_zero: fabric._loggers = [logger] fabric.logger.log_hyperparams(cfg) log_dir = get_log_dir(fabric, cfg.root_dir, cfg.run_name) experiment_dir = Path(log_dir).parent # Setup Metrics metrics = PPOMetricManager(log_interval=task_cfg.log_interval).to(fabric.device) # Setup Dataloaders data_processor = validate_dataset(fabric, data_cfg) dataset_path = Path(data_processor.full_path) tokenizer = data_processor.tokenizer collator = LeftPadCollate(pad_value=tokenizer.pad_token_id, ignore_index=data_cfg.ignore_index) train_dataset = TextDataset(dataframe_path=dataset_path / "finetune_train.pkl") train_dataloader = DataLoader( train_dataset, shuffle=True, batch_size=task_cfg.micro_batch_size, collate_fn=collator, num_workers=task_cfg.num_workers, ) train_dataloader = fabric.setup_dataloaders(train_dataloader) example_prompt = torch.load(dataset_path / "example_prompt.pt") # Setup Model with fabric.init_module(empty_init=model_cfg.fabric_empty_init): agent = PPOAgent(model_cfg=model_cfg, task_cfg=task_cfg) agent.load_checkpoint(device=fabric.device) agent.setup_finetuning() agent.actor = fabric.setup_module(agent.actor) agent.critic = fabric.setup_module(agent.critic) if not agent.share_critic_reward: agent.reward = fabric.setup_module(agent.reward) if not agent.share_actor_critic and not agent.lora_enabled: agent.reference = fabric.setup_module(agent.reference) # Setup Generation Configs generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=gen_cfg, fabric=fabric, ) eval_gen_cfg = copy.deepcopy(gen_cfg) eval_gen_cfg.do_sample = False eval_generation_config = prepare_generation_config( tokenizer=tokenizer, model_cfg=model_cfg, gen_cfg=eval_gen_cfg, fabric=fabric, ) # Setup Optimizer Scheduler fabric models actor_trainable_params, _, _ = prepare_optimizer_parameters(agent.actor, weight_decay=optim_cfg.weight_decay) actor_optimizer = instantiate_from_config( optim_cfg, params=actor_trainable_params, _convert_="partial", ) actor_optimizer = fabric.setup_optimizers(actor_optimizer) critic_trainable_params, _, _ = prepare_optimizer_parameters(agent.critic, weight_decay=optim_cfg.weight_decay) critic_optimizer = instantiate_from_config( optim_cfg, params=critic_trainable_params, _convert_="partial", ) critic_optimizer = fabric.setup_optimizers(critic_optimizer) if fabric.is_global_zero: gen_text, score = generate( agent=agent, tokenizer=tokenizer, generation_config=eval_generation_config, example_prompt=example_prompt, device=fabric.device, ) log_text(fabric, gen_text, "info/example_sample", step=0) fabric.log("info/example_last_reward", score, step=0) num_training_steps = 2 if cfg.dry_run else task_cfg.epochs * len(train_dataloader) # KL Controller if task_cfg.adaptive_kl_coeff: kl_controller = AdaptiveKLController( init_kl_coef=task_cfg.init_kl_coeff, target=task_cfg.target_kl_coeff, kl_horizon=num_training_steps ) else: kl_controller = FixedKLController(kl_coeff=task_cfg.init_kl_coeff) fabric.print("Model Checkpoint interval: ", task_cfg.save_interval, "steps") fabric.print("Model Evaluation interval: ", task_cfg.eval_interval, "steps") iterator = tqdm(range(num_training_steps), disable=not fabric.is_global_zero) data_iterator = iter(train_dataloader) agent.reward.eval() for k in iterator: # Setup counters and data if k % len(train_dataloader) == 0 or data_iterator is None: data_iterator = iter(train_dataloader) is_accumulating = (k) % task_cfg.gradient_accumulation_steps != 0 last_step = k == num_training_steps - 1 # Setup batch data batch = next(data_iterator) max_prompt_length = batch["prompt_input_ids"].shape[1] agent.actor.eval() agent.critic.eval() t0 = time.time() rollout, sample_output = collect_rollout( batch=batch, agent=agent, generation_config=generation_config, kl_controller=kl_controller, task_cfg=task_cfg, tokenizer=tokenizer, fabric=fabric, metrics=metrics, ) time_rollout = time.time() - t0 rollout_dataloader = DataLoader( rollout, batch_size=task_cfg.micro_batch_size, shuffle=True, collate_fn=lambda x: x ) rollout_dataloader = fabric.setup_dataloaders(rollout_dataloader, use_distributed_sampler=False) agent.actor.train() agent.critic.train() for _ in range(task_cfg.ppo_epochs): accumulator_counter = 0 for micro_batch in rollout_dataloader: is_accumulating = (accumulator_counter) % task_cfg.gradient_accumulation_steps != 0 generated_data = { "input_ids": micro_batch["input_ids"], "attention_mask": micro_batch["attention_mask"], } old_log_probs = micro_batch["actor_log_probs"] old_values = micro_batch["values"] advantages = micro_batch["advantages"] returns = micro_batch["returns"] start_token_idx = max_prompt_length - 1 action_mask = micro_batch["attention_mask"][:, start_token_idx:-1].int() if task_cfg.normalize_advantages: advantages = masked_normalize(advantages, action_mask) with fabric.no_backward_sync(agent.actor, enabled=is_accumulating): log_probs = agent.actor(**generated_data)[:, start_token_idx:] # (B, num_new_tokens)
p_loss = policy_loss(
3
2023-10-31 12:02:02+00:00
16k
cpacker/MemGPT
memgpt/cli/cli_config.py
[ { "identifier": "logger", "path": "memgpt/log.py", "snippet": "" }, { "identifier": "utils", "path": "memgpt/utils.py", "snippet": "DEBUG = False\r\nADJECTIVE_BANK = [\r\n \"beautiful\",\r\n \"gentle\",\r\n \"angry\",\r\n \"vivacious\",\r\n \"grumpy\",\r\n \"luxurious\"...
import builtins import uuid import questionary import typer import os import shutil from tqdm import tqdm from prettytable import PrettyTable from typing import Annotated from enum import Enum from memgpt.log import logger from memgpt import utils from memgpt.config import MemGPTConfig from memgpt.credentials import MemGPTCredentials, SUPPORTED_AUTH_TYPES from memgpt.constants import MEMGPT_DIR from memgpt.constants import LLM_MAX_TOKENS from memgpt.local_llm.constants import DEFAULT_ENDPOINTS, DEFAULT_OLLAMA_MODEL, DEFAULT_WRAPPER_NAME from memgpt.local_llm.utils import get_available_wrappers from memgpt.llm_api_tools import openai_get_model_list, azure_openai_get_model_list, smart_urljoin from memgpt.server.utils import shorten_key_middle from memgpt.data_types import User, LLMConfig, EmbeddingConfig from memgpt.metadata import MetadataStore from memgpt.agent_store.storage import StorageConnector, TableType from memgpt.presets.presets import preset_options
13,996
def get_azure_credentials(): creds = dict( azure_key=os.getenv("AZURE_OPENAI_KEY"), azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), azure_version=os.getenv("AZURE_OPENAI_VERSION"), azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT"), azure_embedding_deployment=os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT"), ) # embedding endpoint and version default to non-embedding creds["azure_embedding_endpoint"] = os.getenv("AZURE_OPENAI_EMBEDDING_ENDPOINT", creds["azure_endpoint"]) creds["azure_embedding_version"] = os.getenv("AZURE_OPENAI_EMBEDDING_VERSION", creds["azure_version"]) return creds def get_openai_credentials(): openai_key = os.getenv("OPENAI_API_KEY") return openai_key def configure_llm_endpoint(config: MemGPTConfig, credentials: MemGPTCredentials): # configure model endpoint model_endpoint_type, model_endpoint = None, None # get default default_model_endpoint_type = config.default_llm_config.model_endpoint_type if config.default_llm_config.model_endpoint_type is not None and config.default_llm_config.model_endpoint_type not in [ "openai", "azure", ]: # local model default_model_endpoint_type = "local" provider = questionary.select( "Select LLM inference provider:", choices=["openai", "azure", "local"], default=default_model_endpoint_type ).ask() if provider is None: raise KeyboardInterrupt # set: model_endpoint_type, model_endpoint if provider == "openai": # check for key if credentials.openai_key is None: # allow key to get pulled from env vars openai_api_key = os.getenv("OPENAI_API_KEY", None) # if we still can't find it, ask for it as input if openai_api_key is None: while openai_api_key is None or len(openai_api_key) == 0: # Ask for API key as input openai_api_key = questionary.password( "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):" ).ask() if openai_api_key is None: raise KeyboardInterrupt credentials.openai_key = openai_api_key credentials.save() else: # Give the user an opportunity to overwrite the key openai_api_key = None default_input = ( shorten_key_middle(credentials.openai_key) if credentials.openai_key.startswith("sk-") else credentials.openai_key ) openai_api_key = questionary.password( "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):", default=default_input, ).ask() if openai_api_key is None: raise KeyboardInterrupt # If the user modified it, use the new one if openai_api_key != default_input: credentials.openai_key = openai_api_key credentials.save() model_endpoint_type = "openai" model_endpoint = "https://api.openai.com/v1" model_endpoint = questionary.text("Override default endpoint:", default=model_endpoint).ask() if model_endpoint is None: raise KeyboardInterrupt provider = "openai" elif provider == "azure": # check for necessary vars azure_creds = get_azure_credentials() if not all([azure_creds["azure_key"], azure_creds["azure_endpoint"], azure_creds["azure_version"]]): raise ValueError( "Missing environment variables for Azure (see https://memgpt.readme.io/docs/endpoints#azure-openai). Please set then run `memgpt configure` again." ) else: credentials.azure_key = azure_creds["azure_key"] credentials.azure_endpoint = azure_creds["azure_endpoint"] credentials.azure_version = azure_creds["azure_version"] config.save() model_endpoint_type = "azure" model_endpoint = azure_creds["azure_endpoint"] else: # local models backend_options = ["webui", "webui-legacy", "llamacpp", "koboldcpp", "ollama", "lmstudio", "lmstudio-legacy", "vllm", "openai"] default_model_endpoint_type = None if config.default_llm_config.model_endpoint_type in backend_options: # set from previous config default_model_endpoint_type = config.default_llm_config.model_endpoint_type model_endpoint_type = questionary.select( "Select LLM backend (select 'openai' if you have an OpenAI compatible proxy):", backend_options, default=default_model_endpoint_type, ).ask() if model_endpoint_type is None: raise KeyboardInterrupt # set default endpoint # if OPENAI_API_BASE is set, assume that this is the IP+port the user wanted to use default_model_endpoint = os.getenv("OPENAI_API_BASE") # if OPENAI_API_BASE is not set, try to pull a default IP+port format from a hardcoded set if default_model_endpoint is None: if model_endpoint_type in DEFAULT_ENDPOINTS: default_model_endpoint = DEFAULT_ENDPOINTS[model_endpoint_type] model_endpoint = questionary.text("Enter default endpoint:", default=default_model_endpoint).ask() if model_endpoint is None: raise KeyboardInterrupt
# from global logging configuration # from memgpt.cli import app # from memgpt.agent_store.storage import StorageConnector, TableType app = typer.Typer() def get_azure_credentials(): creds = dict( azure_key=os.getenv("AZURE_OPENAI_KEY"), azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), azure_version=os.getenv("AZURE_OPENAI_VERSION"), azure_deployment=os.getenv("AZURE_OPENAI_DEPLOYMENT"), azure_embedding_deployment=os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT"), ) # embedding endpoint and version default to non-embedding creds["azure_embedding_endpoint"] = os.getenv("AZURE_OPENAI_EMBEDDING_ENDPOINT", creds["azure_endpoint"]) creds["azure_embedding_version"] = os.getenv("AZURE_OPENAI_EMBEDDING_VERSION", creds["azure_version"]) return creds def get_openai_credentials(): openai_key = os.getenv("OPENAI_API_KEY") return openai_key def configure_llm_endpoint(config: MemGPTConfig, credentials: MemGPTCredentials): # configure model endpoint model_endpoint_type, model_endpoint = None, None # get default default_model_endpoint_type = config.default_llm_config.model_endpoint_type if config.default_llm_config.model_endpoint_type is not None and config.default_llm_config.model_endpoint_type not in [ "openai", "azure", ]: # local model default_model_endpoint_type = "local" provider = questionary.select( "Select LLM inference provider:", choices=["openai", "azure", "local"], default=default_model_endpoint_type ).ask() if provider is None: raise KeyboardInterrupt # set: model_endpoint_type, model_endpoint if provider == "openai": # check for key if credentials.openai_key is None: # allow key to get pulled from env vars openai_api_key = os.getenv("OPENAI_API_KEY", None) # if we still can't find it, ask for it as input if openai_api_key is None: while openai_api_key is None or len(openai_api_key) == 0: # Ask for API key as input openai_api_key = questionary.password( "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):" ).ask() if openai_api_key is None: raise KeyboardInterrupt credentials.openai_key = openai_api_key credentials.save() else: # Give the user an opportunity to overwrite the key openai_api_key = None default_input = ( shorten_key_middle(credentials.openai_key) if credentials.openai_key.startswith("sk-") else credentials.openai_key ) openai_api_key = questionary.password( "Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):", default=default_input, ).ask() if openai_api_key is None: raise KeyboardInterrupt # If the user modified it, use the new one if openai_api_key != default_input: credentials.openai_key = openai_api_key credentials.save() model_endpoint_type = "openai" model_endpoint = "https://api.openai.com/v1" model_endpoint = questionary.text("Override default endpoint:", default=model_endpoint).ask() if model_endpoint is None: raise KeyboardInterrupt provider = "openai" elif provider == "azure": # check for necessary vars azure_creds = get_azure_credentials() if not all([azure_creds["azure_key"], azure_creds["azure_endpoint"], azure_creds["azure_version"]]): raise ValueError( "Missing environment variables for Azure (see https://memgpt.readme.io/docs/endpoints#azure-openai). Please set then run `memgpt configure` again." ) else: credentials.azure_key = azure_creds["azure_key"] credentials.azure_endpoint = azure_creds["azure_endpoint"] credentials.azure_version = azure_creds["azure_version"] config.save() model_endpoint_type = "azure" model_endpoint = azure_creds["azure_endpoint"] else: # local models backend_options = ["webui", "webui-legacy", "llamacpp", "koboldcpp", "ollama", "lmstudio", "lmstudio-legacy", "vllm", "openai"] default_model_endpoint_type = None if config.default_llm_config.model_endpoint_type in backend_options: # set from previous config default_model_endpoint_type = config.default_llm_config.model_endpoint_type model_endpoint_type = questionary.select( "Select LLM backend (select 'openai' if you have an OpenAI compatible proxy):", backend_options, default=default_model_endpoint_type, ).ask() if model_endpoint_type is None: raise KeyboardInterrupt # set default endpoint # if OPENAI_API_BASE is set, assume that this is the IP+port the user wanted to use default_model_endpoint = os.getenv("OPENAI_API_BASE") # if OPENAI_API_BASE is not set, try to pull a default IP+port format from a hardcoded set if default_model_endpoint is None: if model_endpoint_type in DEFAULT_ENDPOINTS: default_model_endpoint = DEFAULT_ENDPOINTS[model_endpoint_type] model_endpoint = questionary.text("Enter default endpoint:", default=default_model_endpoint).ask() if model_endpoint is None: raise KeyboardInterrupt
while not utils.is_valid_url(model_endpoint):
1
2023-10-11 07:38:37+00:00
16k
xxlong0/Wonder3D
mvdiffusion/models/unet_mv2d_condition.py
[ { "identifier": "CrossAttnDownBlockMV2D", "path": "mvdiffusion/models/unet_mv2d_blocks.py", "snippet": "class CrossAttnDownBlockMV2D(nn.Module):\n def __init__(\n self,\n in_channels: int,\n out_channels: int,\n temb_channels: int,\n dropout: float = 0.0,\n n...
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Tuple, Union from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.loaders import UNet2DConditionLoadersMixin from diffusers.utils import BaseOutput, logging from diffusers.models.activations import get_activation from diffusers.models.attention_processor import AttentionProcessor, AttnProcessor from diffusers.models.embeddings import ( GaussianFourierProjection, ImageHintTimeEmbedding, ImageProjection, ImageTimeEmbedding, TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps, ) from diffusers.models.modeling_utils import ModelMixin, load_state_dict, _load_state_dict_into_model from diffusers.models.unet_2d_blocks import ( CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UNetMidBlock2DCrossAttn, UNetMidBlock2DSimpleCrossAttn, UpBlock2D, ) from diffusers.utils import ( CONFIG_NAME, DIFFUSERS_CACHE, FLAX_WEIGHTS_NAME, HF_HUB_OFFLINE, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, _add_variant, _get_model_file, deprecate, is_accelerate_available, is_safetensors_available, is_torch_version, logging, ) from diffusers import __version__ from mvdiffusion.models.unet_mv2d_blocks import ( CrossAttnDownBlockMV2D, CrossAttnUpBlockMV2D, UNetMidBlockMV2DCrossAttn, get_down_block, get_up_block, ) import os import torch import torch.nn as nn import torch.utils.checkpoint import copy
11,063
def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class UNetMV2DConditionOutput(BaseOutput): """ The output of [`UNet2DConditionModel`]. Args: sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: torch.FloatTensor = None class UNetMV2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin): r""" A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample shaped output. This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented for all models (such as downloading or saving). Parameters: sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`): Height and width of input/output sample. in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): Number of channels in the output. center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample. flip_sin_to_cos (`bool`, *optional*, defaults to `False`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`): Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped. up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`): The tuple of upsample blocks to use. only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`): Whether to include self-attention in the basic transformer blocks, see [`~models.attention.BasicTransformerBlock`]. block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution. mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block. act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use. norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization. If `None`, normalization and activation layers is skipped in post-processing. norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization. cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280): The dimension of the cross attention features. transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. encoder_hid_dim (`int`, *optional*, defaults to None): If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` dimension to `cross_attention_dim`. encoder_hid_dim_type (`str`, *optional*, defaults to `None`): If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads. num_attention_heads (`int`, *optional*): The number of attention heads. If not defined, defaults to `attention_head_dim` resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`. class_embed_type (`str`, *optional*, defaults to `None`): The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`, `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. addition_embed_type (`str`, *optional*, defaults to `None`): Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or "text". "text" will use the `TextTimeEmbedding` layer. addition_time_embed_dim: (`int`, *optional*, defaults to `None`): Dimension for the timestep embeddings. num_class_embeds (`int`, *optional*, defaults to `None`): Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing class conditioning with `class_embed_type` equal to `None`. time_embedding_type (`str`, *optional*, defaults to `positional`): The type of position embedding to use for timesteps. Choose from `positional` or `fourier`. time_embedding_dim (`int`, *optional*, defaults to `None`): An optional override for the dimension of the projected time embedding. time_embedding_act_fn (`str`, *optional*, defaults to `None`): Optional activation function to use only once on the time embeddings before they are passed to the rest of the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`. timestep_post_act (`str`, *optional*, defaults to `None`): The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`. time_cond_proj_dim (`int`, *optional*, defaults to `None`): The dimension of `cond_proj` layer in the timestep embedding. conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when `class_embed_type="projection"`. class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time embeddings with the class embeddings. mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`): Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False` otherwise. """ _supports_gradient_checkpointing = True @register_to_config def __init__( self, sample_size: Optional[int] = None, in_channels: int = 4, out_channels: int = 4, center_input_sample: bool = False, flip_sin_to_cos: bool = True, freq_shift: int = 0, down_block_types: Tuple[str] = ( "CrossAttnDownBlockMV2D", "CrossAttnDownBlockMV2D", "CrossAttnDownBlockMV2D", "DownBlock2D", ), mid_block_type: Optional[str] = "UNetMidBlockMV2DCrossAttn", up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D", "CrossAttnUpBlockMV2D"), only_cross_attention: Union[bool, Tuple[bool]] = False, block_out_channels: Tuple[int] = (320, 640, 1280, 1280), layers_per_block: Union[int, Tuple[int]] = 2, downsample_padding: int = 1, mid_block_scale_factor: float = 1, act_fn: str = "silu", norm_num_groups: Optional[int] = 32, norm_eps: float = 1e-5, cross_attention_dim: Union[int, Tuple[int]] = 1280, transformer_layers_per_block: Union[int, Tuple[int]] = 1, encoder_hid_dim: Optional[int] = None, encoder_hid_dim_type: Optional[str] = None, attention_head_dim: Union[int, Tuple[int]] = 8, num_attention_heads: Optional[Union[int, Tuple[int]]] = None, dual_cross_attention: bool = False, use_linear_projection: bool = False, class_embed_type: Optional[str] = None, addition_embed_type: Optional[str] = None, addition_time_embed_dim: Optional[int] = None, num_class_embeds: Optional[int] = None, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", resnet_skip_time_act: bool = False, resnet_out_scale_factor: int = 1.0, time_embedding_type: str = "positional", time_embedding_dim: Optional[int] = None, time_embedding_act_fn: Optional[str] = None, timestep_post_act: Optional[str] = None, time_cond_proj_dim: Optional[int] = None, conv_in_kernel: int = 3, conv_out_kernel: int = 3, projection_class_embeddings_input_dim: Optional[int] = None, class_embeddings_concat: bool = False, mid_block_only_cross_attention: Optional[bool] = None, cross_attention_norm: Optional[str] = None, addition_embed_type_num_heads=64, num_views: int = 1, cd_attention_last: bool = False, cd_attention_mid: bool = False, multiview_attention: bool = True, sparse_mv_attention: bool = False, mvcd_attention: bool = False ): super().__init__() self.sample_size = sample_size if num_attention_heads is not None: raise ValueError( "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. num_attention_heads = num_attention_heads or attention_head_dim # Check inputs if len(down_block_types) != len(up_block_types): raise ValueError( f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}." ) if len(block_out_channels) != len(down_block_types): raise ValueError( f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." ) if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): raise ValueError( f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." ) if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): raise ValueError( f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." ) if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}." ) if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types): raise ValueError( f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}." ) if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types): raise ValueError( f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}." ) # input conv_in_padding = (conv_in_kernel - 1) // 2 self.conv_in = nn.Conv2d( in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding ) # time if time_embedding_type == "fourier": time_embed_dim = time_embedding_dim or block_out_channels[0] * 2 if time_embed_dim % 2 != 0: raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.") self.time_proj = GaussianFourierProjection( time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos ) timestep_input_dim = time_embed_dim elif time_embedding_type == "positional": time_embed_dim = time_embedding_dim or block_out_channels[0] * 4 self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) timestep_input_dim = block_out_channels[0] else: raise ValueError( f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`." ) self.time_embedding = TimestepEmbedding( timestep_input_dim, time_embed_dim, act_fn=act_fn, post_act_fn=timestep_post_act, cond_proj_dim=time_cond_proj_dim, ) if encoder_hid_dim_type is None and encoder_hid_dim is not None: encoder_hid_dim_type = "text_proj" self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") if encoder_hid_dim is None and encoder_hid_dim_type is not None: raise ValueError( f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." ) if encoder_hid_dim_type == "text_proj": self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) elif encoder_hid_dim_type == "text_image_proj": # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` self.encoder_hid_proj = TextImageProjection( text_embed_dim=encoder_hid_dim, image_embed_dim=cross_attention_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type == "image_proj": # Kandinsky 2.2 self.encoder_hid_proj = ImageProjection( image_embed_dim=encoder_hid_dim, cross_attention_dim=cross_attention_dim, ) elif encoder_hid_dim_type is not None: raise ValueError( f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." ) else: self.encoder_hid_proj = None # class embedding if class_embed_type is None and num_class_embeds is not None: self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) elif class_embed_type == "timestep": self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn) elif class_embed_type == "identity": self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) elif class_embed_type == "projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" ) # The projection `class_embed_type` is the same as the timestep `class_embed_type` except # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings # 2. it projects from an arbitrary input dimension. # # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. # As a result, `TimestepEmbedding` can be passed arbitrary vectors. self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif class_embed_type == "simple_projection": if projection_class_embeddings_input_dim is None: raise ValueError( "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set" ) self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim) else: self.class_embedding = None if addition_embed_type == "text": if encoder_hid_dim is not None: text_time_embedding_from_dim = encoder_hid_dim else: text_time_embedding_from_dim = cross_attention_dim self.add_embedding = TextTimeEmbedding( text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads ) elif addition_embed_type == "text_image": # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` self.add_embedding = TextImageTimeEmbedding( text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim ) elif addition_embed_type == "text_time": self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) elif addition_embed_type == "image": # Kandinsky 2.2 self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type == "image_hint": # Kandinsky 2.2 ControlNet self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim) elif addition_embed_type is not None: raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") if time_embedding_act_fn is None: self.time_embed_act = None else: self.time_embed_act = get_activation(time_embedding_act_fn) self.down_blocks = nn.ModuleList([]) self.up_blocks = nn.ModuleList([]) if isinstance(only_cross_attention, bool): if mid_block_only_cross_attention is None: mid_block_only_cross_attention = only_cross_attention only_cross_attention = [only_cross_attention] * len(down_block_types) if mid_block_only_cross_attention is None: mid_block_only_cross_attention = False if isinstance(num_attention_heads, int): num_attention_heads = (num_attention_heads,) * len(down_block_types) if isinstance(attention_head_dim, int): attention_head_dim = (attention_head_dim,) * len(down_block_types) if isinstance(cross_attention_dim, int): cross_attention_dim = (cross_attention_dim,) * len(down_block_types) if isinstance(layers_per_block, int): layers_per_block = [layers_per_block] * len(down_block_types) if isinstance(transformer_layers_per_block, int): transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) if class_embeddings_concat: # The time embeddings are concatenated with the class embeddings. The dimension of the # time embeddings passed to the down, middle, and up blocks is twice the dimension of the # regular time embeddings blocks_time_embed_dim = time_embed_dim * 2 else: blocks_time_embed_dim = time_embed_dim # down output_channel = block_out_channels[0] for i, down_block_type in enumerate(down_block_types): input_channel = output_channel output_channel = block_out_channels[i] is_final_block = i == len(block_out_channels) - 1 down_block = get_down_block( down_block_type, num_layers=layers_per_block[i], transformer_layers_per_block=transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, temb_channels=blocks_time_embed_dim, add_downsample=not is_final_block, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=cross_attention_dim[i], num_attention_heads=num_attention_heads[i], downsample_padding=downsample_padding, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, num_views=num_views, cd_attention_last=cd_attention_last, cd_attention_mid=cd_attention_mid, multiview_attention=multiview_attention, sparse_mv_attention=sparse_mv_attention, mvcd_attention=mvcd_attention ) self.down_blocks.append(down_block) # mid if mid_block_type == "UNetMidBlock2DCrossAttn": self.mid_block = UNetMidBlock2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) # custom MV2D attention block elif mid_block_type == "UNetMidBlockMV2DCrossAttn": self.mid_block = UNetMidBlockMV2DCrossAttn( transformer_layers_per_block=transformer_layers_per_block[-1], in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, resnet_time_scale_shift=resnet_time_scale_shift, cross_attention_dim=cross_attention_dim[-1], num_attention_heads=num_attention_heads[-1], resnet_groups=norm_num_groups, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, num_views=num_views, cd_attention_last=cd_attention_last, cd_attention_mid=cd_attention_mid, multiview_attention=multiview_attention, sparse_mv_attention=sparse_mv_attention, mvcd_attention=mvcd_attention ) elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn": self.mid_block = UNetMidBlock2DSimpleCrossAttn( in_channels=block_out_channels[-1], temb_channels=blocks_time_embed_dim, resnet_eps=norm_eps, resnet_act_fn=act_fn, output_scale_factor=mid_block_scale_factor, cross_attention_dim=cross_attention_dim[-1], attention_head_dim=attention_head_dim[-1], resnet_groups=norm_num_groups, resnet_time_scale_shift=resnet_time_scale_shift, skip_time_act=resnet_skip_time_act, only_cross_attention=mid_block_only_cross_attention, cross_attention_norm=cross_attention_norm, ) elif mid_block_type is None: self.mid_block = None else: raise ValueError(f"unknown mid_block_type : {mid_block_type}") # count how many layers upsample the images self.num_upsamplers = 0 # up reversed_block_out_channels = list(reversed(block_out_channels)) reversed_num_attention_heads = list(reversed(num_attention_heads)) reversed_layers_per_block = list(reversed(layers_per_block)) reversed_cross_attention_dim = list(reversed(cross_attention_dim)) reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block)) only_cross_attention = list(reversed(only_cross_attention)) output_channel = reversed_block_out_channels[0] for i, up_block_type in enumerate(up_block_types): is_final_block = i == len(block_out_channels) - 1 prev_output_channel = output_channel output_channel = reversed_block_out_channels[i] input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)] # add upsample block for all BUT final layer if not is_final_block: add_upsample = True self.num_upsamplers += 1 else: add_upsample = False up_block = get_up_block( up_block_type, num_layers=reversed_layers_per_block[i] + 1, transformer_layers_per_block=reversed_transformer_layers_per_block[i], in_channels=input_channel, out_channels=output_channel, prev_output_channel=prev_output_channel, temb_channels=blocks_time_embed_dim, add_upsample=add_upsample, resnet_eps=norm_eps, resnet_act_fn=act_fn, resnet_groups=norm_num_groups, cross_attention_dim=reversed_cross_attention_dim[i], num_attention_heads=reversed_num_attention_heads[i], dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention[i], upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resnet_skip_time_act=resnet_skip_time_act, resnet_out_scale_factor=resnet_out_scale_factor, cross_attention_norm=cross_attention_norm, attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, num_views=num_views, cd_attention_last=cd_attention_last, cd_attention_mid=cd_attention_mid, multiview_attention=multiview_attention, sparse_mv_attention=sparse_mv_attention, mvcd_attention=mvcd_attention ) self.up_blocks.append(up_block) prev_output_channel = output_channel # out if norm_num_groups is not None: self.conv_norm_out = nn.GroupNorm( num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps ) self.conv_act = get_activation(act_fn) else: self.conv_norm_out = None self.conv_act = None conv_out_padding = (conv_out_kernel - 1) // 2 self.conv_out = nn.Conv2d( block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding ) @property def attn_processors(self) -> Dict[str, AttentionProcessor]: r""" Returns: `dict` of attention processors: A dictionary containing all attention processors used in the model with indexed by its weight name. """ # set recursively processors = {} def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): if hasattr(module, "set_processor"): processors[f"{name}.processor"] = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) return processors for name, module in self.named_children(): fn_recursive_add_processors(name, module, processors) return processors def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): r""" Sets the attention processor to use to compute attention. Parameters: processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): The instantiated processor class or a dictionary of processor classes that will be set as the processor for **all** `Attention` layers. If `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors. """ count = len(self.attn_processors.keys()) if isinstance(processor, dict) and len(processor) != count: raise ValueError( f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" f" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): if hasattr(module, "set_processor"): if not isinstance(processor, dict): module.set_processor(processor) else: module.set_processor(processor.pop(f"{name}.processor")) for sub_name, child in module.named_children(): fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) for name, module in self.named_children(): fn_recursive_attn_processor(name, module, processor) def set_default_attn_processor(self): """ Disables custom attention processors and sets the default attention implementation. """ self.set_attn_processor(AttnProcessor()) def set_attention_slice(self, slice_size): r""" Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor in slices to compute attention in several steps. This is useful for saving some memory in exchange for a small decrease in speed. Args: slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` must be a multiple of `slice_size`. """ sliceable_head_dims = [] def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): sliceable_head_dims.append(module.sliceable_head_dim) for child in module.children(): fn_recursive_retrieve_sliceable_dims(child) # retrieve number of attention layers for module in self.children(): fn_recursive_retrieve_sliceable_dims(module) num_sliceable_layers = len(sliceable_head_dims) if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory slice_size = [dim // 2 for dim in sliceable_head_dims] elif slice_size == "max": # make smallest slice possible slice_size = num_sliceable_layers * [1] slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size if len(slice_size) != len(sliceable_head_dims): raise ValueError( f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." ) for i in range(len(slice_size)): size = slice_size[i] dim = sliceable_head_dims[i] if size is not None and size > dim: raise ValueError(f"size {size} has to be smaller or equal to {dim}.") # Recursively walk through all the children. # Any children which exposes the set_attention_slice method # gets the message def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): if hasattr(module, "set_attention_slice"): module.set_attention_slice(slice_size.pop()) for child in module.children(): fn_recursive_set_attention_slice(child, slice_size) reversed_slice_size = list(reversed(slice_size)) for module in self.children(): fn_recursive_set_attention_slice(module, reversed_slice_size) def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, (CrossAttnDownBlock2D, CrossAttnDownBlockMV2D, DownBlock2D, CrossAttnUpBlock2D, CrossAttnUpBlockMV2D, UpBlock2D)):
1
2023-10-14 12:18:38+00:00
16k
PixArt-alpha/PixArt-alpha
train_scripts/train_controlnet.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_s...
import argparse import datetime import os import sys import time import types import warnings import torch from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from mmcv.runner import LogBuffer from torch.utils.data import RandomSampler from diffusion import IDDPM from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.model.nets import PixArtMS, ControlPixArtHalf, ControlPixArtMSHalf from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.utils.logger import get_root_logger from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
14,165
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) if not load_vae_feat: raise ValueError("Only support load vae features for now.") # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start = time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start z = batch[0] # 4 x 4 x 128 x 128 z:vae output, 3x1024x1024->vae->4x128x128 clean_images = z * config.scale_factor # vae needed scale factor y = batch[1] # 4 x 1 x 120 x 4096 # T5 extracted feature of caption, 120 token, 4096 y_mask = batch[2] # 4 x 1 x 1 x 120 # caption indicate whether valid data_info = batch[3] # Sample a random timestep for each image bs = clean_images.shape[0] timesteps = torch.randint(0, config.train_sampling_steps, (bs,), device=clean_images.device).long() grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() loss_term = train_diffusion.training_losses(model, clean_images, timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info, c=data_info['condition'] * config.scale_factor)) loss = loss_term['loss'].mean() accelerator.backward(loss) if accelerator.sync_gradients:
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) if not load_vae_feat: raise ValueError("Only support load vae features for now.") # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start = time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start z = batch[0] # 4 x 4 x 128 x 128 z:vae output, 3x1024x1024->vae->4x128x128 clean_images = z * config.scale_factor # vae needed scale factor y = batch[1] # 4 x 1 x 120 x 4096 # T5 extracted feature of caption, 120 token, 4096 y_mask = batch[2] # 4 x 1 x 1 x 120 # caption indicate whether valid data_info = batch[3] # Sample a random timestep for each image bs = clean_images.shape[0] timesteps = torch.randint(0, config.train_sampling_steps, (bs,), device=clean_images.device).long() grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() loss_term = train_diffusion.training_losses(model, clean_images, timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info, c=data_info['condition'] * config.scale_factor)) loss = loss_term['loss'].mean() accelerator.backward(loss) if accelerator.sync_gradients:
grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip)
14
2023-10-12 14:16:33+00:00
16k
showlab/MotionDirector
MotionDirector_train.py
[ { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample sh...
import argparse import datetime import logging import inspect import math import os import random import gc import copy import torch import torch.nn.functional as F import torch.utils.checkpoint import diffusers import transformers import imageio import numpy as np import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from torchvision import transforms from tqdm.auto import tqdm from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from models.unet_3d_condition import UNet3DConditionModel from diffusers.models import AutoencoderKL from diffusers import DDIMScheduler, TextToVideoSDPipeline from diffusers.optimization import get_scheduler from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset from einops import rearrange, repeat from utils.lora_handler import LoraHandler from utils.lora import extract_lora_child_module from utils.ddim_utils import ddim_inversion from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
13,405
if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, accelerator, weight_dtype): for model in model_list: if model is not None: model.to(accelerator.device, dtype=weight_dtype) def inverse_video(pipe, latents, num_steps): ddim_inv_scheduler = DDIMScheduler.from_config(pipe.scheduler.config) ddim_inv_scheduler.set_timesteps(num_steps) ddim_inv_latent = ddim_inversion( pipe, ddim_inv_scheduler, video_latent=latents.to(pipe.device), num_inv_steps=num_steps, prompt="")[-1] return ddim_inv_latent def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, unet, pretrained_model_path, noise_prior, cached_latent_dir=None, ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() pipe = TextToVideoSDPipeline.from_pretrained( pretrained_model_path, vae=vae, unet=copy.deepcopy(unet).to('cuda', dtype=torch.float16) ) pipe.text_encoder.to('cuda', dtype=torch.float16) cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16) batch['latents'] = tensor_to_vae_latent(pixel_values, vae) if noise_prior > 0.: batch['inversion_noise'] = inverse_video(pipe, batch['latents'], 50) for k, v in batch.items(): batch[k] = v[0] torch.save(batch, full_out_path) del pixel_values del batch # We do this to avoid fragmentation from casting latents between devices. torch.cuda.empty_cache() else: cache_save_dir = cached_latent_dir return torch.utils.data.DataLoader(
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = [] # Loop through all available datasets, get the name, then add to list of data to process. for DataSet in [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset]: for dataset in dataset_types: if dataset == DataSet.__getname__(): train_datasets.append(DataSet(**train_data, tokenizer=tokenizer)) if len(train_datasets) > 0: return train_datasets else: raise ValueError("Dataset type not found: 'json', 'single_video', 'folder', 'image'") def extend_datasets(datasets, dataset_items, extend=False): biggest_data_len = max(x.__len__() for x in datasets) extended = [] for dataset in datasets: if dataset.__len__() == 0: del dataset continue if dataset.__len__() < biggest_data_len: for item in dataset_items: if extend and item not in extended and hasattr(dataset, item): print(f"Extending {item}") value = getattr(dataset, item) value *= biggest_data_len value = value[:biggest_data_len] setattr(dataset, item, value) print(f"New {item} dataset length: {dataset.__len__()}") extended.append(item) def export_to_video(video_frames, output_video_path, fps): video_writer = imageio.get_writer(output_video_path, fps=fps) for img in video_frames: video_writer.append_data(np.array(img)) video_writer.close() def create_output_folders(output_dir, config): now = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") out_dir = os.path.join(output_dir, f"train_{now}") os.makedirs(out_dir, exist_ok=True) os.makedirs(f"{out_dir}/samples", exist_ok=True) # OmegaConf.save(config, os.path.join(out_dir, 'config.yaml')) return out_dir def load_primary_models(pretrained_model_path): noise_scheduler = DDIMScheduler.from_pretrained(pretrained_model_path, subfolder="scheduler") tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder") vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae") unet = UNet3DConditionModel.from_pretrained(pretrained_model_path, subfolder="unet") return noise_scheduler, tokenizer, text_encoder, vae, unet def unet_and_text_g_c(unet, text_encoder, unet_enable, text_enable): unet._set_gradient_checkpointing(value=unet_enable) text_encoder._set_gradient_checkpointing(CLIPEncoder, value=text_enable) def freeze_models(models_to_freeze): for model in models_to_freeze: if model is not None: model.requires_grad_(False) def is_attn(name): return ('attn1' or 'attn2' == name.split('.')[-1]) def set_processors(attentions): for attn in attentions: attn.set_processor(AttnProcessor2_0()) def set_torch_2_attn(unet): optim_count = 0 for name, module in unet.named_modules(): if is_attn(name): if isinstance(module, torch.nn.ModuleList): for m in module: if isinstance(m, BasicTransformerBlock): set_processors([m.attn1, m.attn2]) optim_count += 1 if optim_count > 0: print(f"{optim_count} Attention layers using Scaled Dot Product Attention.") def handle_memory_attention(enable_xformers_memory_efficient_attention, enable_torch_2_attn, unet): try: is_torch_2 = hasattr(F, 'scaled_dot_product_attention') enable_torch_2 = is_torch_2 and enable_torch_2_attn if enable_xformers_memory_efficient_attention and not enable_torch_2: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp) else: raise ValueError("xformers is not available. Make sure it is installed correctly") if enable_torch_2: set_torch_2_attn(unet) except: print("Could not enable memory efficient attention for xformers or Torch 2.0.") def param_optim(model, condition, extra_params=None, is_lora=False, negation=None): extra_params = extra_params if len(extra_params.keys()) > 0 else None return { "model": model, "condition": condition, 'extra_params': extra_params, 'is_lora': is_lora, "negation": negation } def create_optim_params(name='param', params=None, lr=5e-6, extra_params=None): params = { "name": name, "params": params, "lr": lr } if extra_params is not None: for k, v in extra_params.items(): params[k] = v return params def negate_params(name, negation): # We have to do this if we are co-training with LoRA. # This ensures that parameter groups aren't duplicated. if negation is None: return False for n in negation: if n in name and 'temp' not in name: return True return False def create_optimizer_params(model_list, lr): optimizer_params = [] for optim in model_list: model, condition, extra_params, is_lora, negation = optim.values() # Check if we are doing LoRA training. if is_lora and condition and isinstance(model, list): params = create_optim_params( params=itertools.chain(*model), extra_params=extra_params ) optimizer_params.append(params) continue if is_lora and condition and not isinstance(model, list): for n, p in model.named_parameters(): if 'lora' in n: params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) continue # If this is true, we can train it. if condition: for n, p in model.named_parameters(): should_negate = 'lora' in n and not is_lora if should_negate: continue params = create_optim_params(n, p, lr, extra_params) optimizer_params.append(params) return optimizer_params def get_optimizer(use_8bit_adam): if use_8bit_adam: try: except ImportError: raise ImportError( "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`" ) return bnb.optim.AdamW8bit else: return torch.optim.AdamW def is_mixed_precision(accelerator): weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 return weight_dtype def cast_to_gpu_and_type(model_list, accelerator, weight_dtype): for model in model_list: if model is not None: model.to(accelerator.device, dtype=weight_dtype) def inverse_video(pipe, latents, num_steps): ddim_inv_scheduler = DDIMScheduler.from_config(pipe.scheduler.config) ddim_inv_scheduler.set_timesteps(num_steps) ddim_inv_latent = ddim_inversion( pipe, ddim_inv_scheduler, video_latent=latents.to(pipe.device), num_inv_steps=num_steps, prompt="")[-1] return ddim_inv_latent def handle_cache_latents( should_cache, output_dir, train_dataloader, train_batch_size, vae, unet, pretrained_model_path, noise_prior, cached_latent_dir=None, ): # Cache latents by storing them in VRAM. # Speeds up training and saves memory by not encoding during the train loop. if not should_cache: return None vae.to('cuda', dtype=torch.float16) vae.enable_slicing() pipe = TextToVideoSDPipeline.from_pretrained( pretrained_model_path, vae=vae, unet=copy.deepcopy(unet).to('cuda', dtype=torch.float16) ) pipe.text_encoder.to('cuda', dtype=torch.float16) cached_latent_dir = ( os.path.abspath(cached_latent_dir) if cached_latent_dir is not None else None ) if cached_latent_dir is None: cache_save_dir = f"{output_dir}/cached_latents" os.makedirs(cache_save_dir, exist_ok=True) for i, batch in enumerate(tqdm(train_dataloader, desc="Caching Latents.")): save_name = f"cached_{i}" full_out_path = f"{cache_save_dir}/{save_name}.pt" pixel_values = batch['pixel_values'].to('cuda', dtype=torch.float16) batch['latents'] = tensor_to_vae_latent(pixel_values, vae) if noise_prior > 0.: batch['inversion_noise'] = inverse_video(pipe, batch['latents'], 50) for k, v in batch.items(): batch[k] = v[0] torch.save(batch, full_out_path) del pixel_values del batch # We do this to avoid fragmentation from casting latents between devices. torch.cuda.empty_cache() else: cache_save_dir = cached_latent_dir return torch.utils.data.DataLoader(
CachedDataset(cache_dir=cache_save_dir),
5
2023-10-12 12:06:55+00:00
16k
NVlabs/EmerNeRF
builders.py
[ { "identifier": "SceneDataset", "path": "datasets/base/scene_dataset.py", "snippet": "class SceneDataset(abc.ABC):\n \"\"\"\n Base class for scene dataset.\n \"\"\"\n\n data_cfg: OmegaConf = None\n pixel_source: ScenePixelSource = None\n lidar_source: SceneLidarSource = None\n # tra...
import itertools import logging import torch from typing import List, Tuple from omegaconf import OmegaConf from datasets.base import SceneDataset from radiance_fields import ( DensityField, RadianceField, build_density_field, build_radiance_field_from_cfg, ) from third_party.nerfacc_prop_net import PropNetEstimator
11,630
logger = logging.getLogger() def build_model_from_cfg( cfg: OmegaConf,
logger = logging.getLogger() def build_model_from_cfg( cfg: OmegaConf,
dataset: SceneDataset,
0
2023-10-11 20:56:27+00:00
16k
alibaba-damo-academy/FunCodec
funcodec/train/gan_trainer.py
[ { "identifier": "AbsBatchStepScheduler", "path": "funcodec/schedulers/abs_scheduler.py", "snippet": "class AbsBatchStepScheduler(AbsScheduler):\n @abstractmethod\n def step(self, epoch: int = None):\n pass\n\n @abstractmethod\n def state_dict(self):\n pass\n\n @abstractmetho...
import argparse import dataclasses import logging import time import numpy as np import torch import os import soundfile import gc import fairscale from contextlib import contextmanager from distutils.version import LooseVersion from typing import Dict from typing import Iterable from typing import List from typing import Optional from typing import Sequence from typing import Tuple from io import BytesIO from typeguard import check_argument_types from funcodec.schedulers.abs_scheduler import AbsBatchStepScheduler from funcodec.schedulers.abs_scheduler import AbsScheduler from funcodec.torch_utils.device_funcs import to_device from funcodec.torch_utils.recursive_op import recursive_average from funcodec.train.distributed_utils import DistributedOption from funcodec.train.reporter import SubReporter from funcodec.train.trainer import Trainer from funcodec.train.trainer import TrainerOptions from funcodec.utils.build_dataclass import build_dataclass from funcodec.utils.types import str2bool from torch.distributed import ReduceOp from torch.cuda.amp import autocast from torch.cuda.amp import GradScaler
11,288
# Copyright 2021 Tomoki Hayashi # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) # Adapted by Zhihao Du for GAN-based Codec models. """Trainer module for GAN-based training.""" if torch.distributed.is_available(): if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"): else: # Nothing to do if torch<1.6.0 @contextmanager def autocast(enabled=True): # NOQA yield GradScaler = None try: except ImportError: fairscale = None @dataclasses.dataclass class GANTrainerOptions(TrainerOptions): """Trainer option dataclass for GANTrainer.""" generator_first: bool disc_grad_clip: float disc_grad_clip_type: float gen_train_interval: int disc_train_interval: int sampling_rate: int class GANTrainer(Trainer): """Trainer for GAN-based training. If you'd like to use this trainer, the model must inherit espnet.train.abs_gan_espnet_model.AbsGANESPnetModel. """ @classmethod def build_options(cls, args: argparse.Namespace) -> TrainerOptions: """Build options consumed by train(), eval(), and plot_attention().""" assert check_argument_types() return build_dataclass(GANTrainerOptions, args) @classmethod def add_arguments(cls, parser: argparse.ArgumentParser): """Add additional arguments for GAN-trainer.""" parser.add_argument( "--generator_first",
# Copyright 2021 Tomoki Hayashi # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) # Adapted by Zhihao Du for GAN-based Codec models. """Trainer module for GAN-based training.""" if torch.distributed.is_available(): if LooseVersion(torch.__version__) >= LooseVersion("1.6.0"): else: # Nothing to do if torch<1.6.0 @contextmanager def autocast(enabled=True): # NOQA yield GradScaler = None try: except ImportError: fairscale = None @dataclasses.dataclass class GANTrainerOptions(TrainerOptions): """Trainer option dataclass for GANTrainer.""" generator_first: bool disc_grad_clip: float disc_grad_clip_type: float gen_train_interval: int disc_train_interval: int sampling_rate: int class GANTrainer(Trainer): """Trainer for GAN-based training. If you'd like to use this trainer, the model must inherit espnet.train.abs_gan_espnet_model.AbsGANESPnetModel. """ @classmethod def build_options(cls, args: argparse.Namespace) -> TrainerOptions: """Build options consumed by train(), eval(), and plot_attention().""" assert check_argument_types() return build_dataclass(GANTrainerOptions, args) @classmethod def add_arguments(cls, parser: argparse.ArgumentParser): """Add additional arguments for GAN-trainer.""" parser.add_argument( "--generator_first",
type=str2bool,
9
2023-10-07 02:00:40+00:00
16k
longzw1997/Open-GroundingDino
models/GroundingDINO/groundingdino.py
[ { "identifier": "box_ops", "path": "groundingdino/util/box_ops.py", "snippet": "def box_cxcywh_to_xyxy(x):\ndef box_xyxy_to_cxcywh(x):\ndef box_iou(boxes1, boxes2):\ndef generalized_box_iou(boxes1, boxes2):\ndef box_iou_pairwise(boxes1, boxes2):\ndef generalized_box_iou_pairwise(boxes1, boxes2):\ndef ma...
import copy import torch import torch.nn.functional as F from typing import List from torch import nn from torchvision.ops.boxes import nms from transformers import AutoTokenizer, BertModel, BertTokenizer, RobertaModel, RobertaTokenizerFast from groundingdino.util import box_ops, get_tokenlizer from groundingdino.util.misc import ( NestedTensor, accuracy, get_world_size, interpolate, inverse_sigmoid, is_dist_avail_and_initialized, nested_tensor_from_tensor_list, ) from groundingdino.util.utils import get_phrases_from_posmap from groundingdino.util.visualizer import COCOVisualizer from groundingdino.util.vl_utils import create_positive_map_from_span from ..registry import MODULE_BUILD_FUNCS from .backbone import build_backbone from .bertwarper import ( BertModelWarper, generate_masks_with_special_tokens, generate_masks_with_special_tokens_and_transfer_map, ) from .transformer import build_transformer from .utils import MLP, ContrastiveEmbed, sigmoid_focal_loss from .matcher import build_matcher from pycocotools.coco import COCO
13,534
class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100,text_encoder_type='text_encoder_type', nms_iou_threshold=-1,use_coco_eval=False,args=None) -> None: super().__init__() self.num_select = num_select self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) if args.use_coco_eval: coco = COCO(args.coco_val_path) category_dict = coco.loadCats(coco.getCatIds()) cat_list = [item['name'] for item in category_dict] else: cat_list=args.label_list caption = " . ".join(cat_list) + ' .' tokenized = self.tokenizer(caption, padding="longest", return_tensors="pt") label_list = torch.arange(len(cat_list)) pos_map=create_positive_map(tokenized,label_list,cat_list,caption) # build a mapping from label_id to pos_map if args.use_coco_eval: id_map = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31, 27: 32, 28: 33, 29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43, 39: 44, 40: 46, 41: 47, 42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56, 51: 57, 52: 58, 53: 59, 54: 60, 55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72, 63: 73, 64: 74, 65: 75, 66: 76, 67: 77, 68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85, 75: 86, 76: 87, 77: 88, 78: 89, 79: 90} new_pos_map = torch.zeros((91, 256)) for k, v in id_map.items(): new_pos_map[v] = pos_map[k] pos_map=new_pos_map self.nms_iou_threshold=nms_iou_threshold self.positive_map = pos_map @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] prob_to_token = out_logits.sigmoid() pos_maps = self.positive_map.to(prob_to_token.device) for label_ind in range(len(pos_maps)): if pos_maps[label_ind].sum() != 0: pos_maps[label_ind]=pos_maps[label_ind]/pos_maps[label_ind].sum() prob_to_label = prob_to_token @ pos_maps.T assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = prob_to_label topk_values, topk_indexes = torch.topk(prob.view(prob.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, prob.shape[2], rounding_mode='trunc') labels = topk_indexes % prob.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) # if test: # assert not not_to_xyxy # boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results @MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino") def build_groundingdino(args): device = torch.device(args.device) backbone = build_backbone(args) transformer = build_transformer(args) dn_labelbook_size = args.dn_labelbook_size dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share sub_sentence_present = args.sub_sentence_present model = GroundingDINO( backbone, transformer, num_queries=args.num_queries, aux_loss=args.aux_loss, iter_update=True, query_dim=4, num_feature_levels=args.num_feature_levels, nheads=args.nheads, dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, two_stage_type=args.two_stage_type, two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, two_stage_class_embed_share=args.two_stage_class_embed_share, num_patterns=args.num_patterns, dn_number=0, dn_box_noise_scale=args.dn_box_noise_scale, dn_label_noise_ratio=args.dn_label_noise_ratio, dn_labelbook_size=dn_labelbook_size, text_encoder_type=args.text_encoder_type, sub_sentence_present=sub_sentence_present, max_text_len=args.max_text_len, )
# ------------------------------------------------------------------------ # Grounding DINO # url: https://github.com/IDEA-Research/GroundingDINO # Copyright (c) 2023 IDEA. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Conditional DETR model and criterion classes. # Copyright (c) 2021 Microsoft. All Rights Reserved. # Licensed under the Apache License, Version 2.0 [see LICENSE for details] # ------------------------------------------------------------------------ # Modified from DETR (https://github.com/facebookresearch/detr) # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # ------------------------------------------------------------------------ # Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR) # Copyright (c) 2020 SenseTime. All Rights Reserved. # ------------------------------------------------------------------------ class GroundingDINO(nn.Module): """This is the Cross-Attention Detector module that performs object detection""" def __init__( self, backbone, transformer, num_queries, aux_loss=False, iter_update=False, query_dim=2, num_feature_levels=1, nheads=8, # two stage two_stage_type="no", # ['no', 'standard'] dec_pred_bbox_embed_share=True, two_stage_class_embed_share=True, two_stage_bbox_embed_share=True, num_patterns=0, dn_number=100, dn_box_noise_scale=0.4, dn_label_noise_ratio=0.5, dn_labelbook_size=100, text_encoder_type="bert-base-uncased", sub_sentence_present=True, max_text_len=256, ): """Initializes the model. Parameters: backbone: torch module of the backbone to be used. See backbone.py transformer: torch module of the transformer architecture. See transformer.py num_queries: number of object queries, ie detection slot. This is the maximal number of objects Conditional DETR can detect in a single image. For COCO, we recommend 100 queries. aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. """ super().__init__() self.num_queries = num_queries self.transformer = transformer self.hidden_dim = hidden_dim = transformer.d_model self.num_feature_levels = num_feature_levels self.nheads = nheads self.max_text_len = 256 self.sub_sentence_present = sub_sentence_present # setting query dim self.query_dim = query_dim assert query_dim == 4 # for dn training self.num_patterns = num_patterns self.dn_number = dn_number self.dn_box_noise_scale = dn_box_noise_scale self.dn_label_noise_ratio = dn_label_noise_ratio self.dn_labelbook_size = dn_labelbook_size # bert self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type) self.bert.pooler.dense.weight.requires_grad_(False) self.bert.pooler.dense.bias.requires_grad_(False) self.bert = BertModelWarper(bert_model=self.bert) self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True) nn.init.constant_(self.feat_map.bias.data, 0) nn.init.xavier_uniform_(self.feat_map.weight.data) # freeze # special tokens self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"]) # prepare input projection layers if num_feature_levels > 1: num_backbone_outs = len(backbone.num_channels) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.num_channels[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ) for _ in range(num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, hidden_dim), ) ) in_channels = hidden_dim self.input_proj = nn.ModuleList(input_proj_list) else: assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!" self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1), nn.GroupNorm(32, hidden_dim), ) ] ) self.backbone = backbone self.aux_loss = aux_loss self.box_pred_damping = box_pred_damping = None self.iter_update = iter_update assert iter_update, "Why not iter_update?" # prepare pred layers self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share # prepare class & box embed _class_embed = ContrastiveEmbed() _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0) if dec_pred_bbox_embed_share: box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)] else: box_embed_layerlist = [ copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers) ] class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)] self.bbox_embed = nn.ModuleList(box_embed_layerlist) self.class_embed = nn.ModuleList(class_embed_layerlist) self.transformer.decoder.bbox_embed = self.bbox_embed self.transformer.decoder.class_embed = self.class_embed # two stage self.two_stage_type = two_stage_type assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format( two_stage_type ) if two_stage_type != "no": if two_stage_bbox_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_bbox_embed = _bbox_embed else: self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed) if two_stage_class_embed_share: assert dec_pred_bbox_embed_share self.transformer.enc_out_class_embed = _class_embed else: self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed) self.refpoint_embed = None self._reset_parameters() def _reset_parameters(self): # init input_proj for proj in self.input_proj: nn.init.xavier_uniform_(proj[0].weight, gain=1) nn.init.constant_(proj[0].bias, 0) def init_ref_points(self, use_num_queries): self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim) def forward(self, samples: NestedTensor, targets: List = None, **kw): """The forward expects a NestedTensor, which consists of: - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels It returns a dict with the following elements: - "pred_logits": the classification logits (including no-object) for all queries. Shape= [batch_size x num_queries x num_classes] - "pred_boxes": The normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image (disregarding possible padding). See PostProcess for information on how to retrieve the unnormalized bounding box. - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of dictionnaries containing the two above keys for each decoder layer. """ if targets is None: captions = kw["captions"] else: captions = [t["caption"] for t in targets] # encoder texts tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to( samples.device ) one_hot_token = tokenized ( text_self_attention_masks, position_ids, cate_to_token_mask_list, ) = generate_masks_with_special_tokens_and_transfer_map( tokenized, self.specical_tokens, self.tokenizer ) if text_self_attention_masks.shape[1] > self.max_text_len: text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] position_ids = position_ids[:, : self.max_text_len] tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len] tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len] tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len] # extract text embeddings if self.sub_sentence_present: tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"} tokenized_for_encoder["attention_mask"] = text_self_attention_masks tokenized_for_encoder["position_ids"] = position_ids else: tokenized_for_encoder = tokenized bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768 encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model text_token_mask = tokenized.attention_mask.bool() # bs, 195 # text_token_mask: True for nomask, False for mask # text_self_attention_masks: True for nomask, False for mask if encoded_text.shape[1] > self.max_text_len: encoded_text = encoded_text[:, : self.max_text_len, :] text_token_mask = text_token_mask[:, : self.max_text_len] position_ids = position_ids[:, : self.max_text_len] text_self_attention_masks = text_self_attention_masks[ :, : self.max_text_len, : self.max_text_len ] text_dict = { "encoded_text": encoded_text, # bs, 195, d_model "text_token_mask": text_token_mask, # bs, 195 "position_ids": position_ids, # bs, 195 "text_self_attention_masks": text_self_attention_masks, # bs, 195,195 } if isinstance(samples, (list, torch.Tensor)): samples = nested_tensor_from_tensor_list(samples) features, poss = self.backbone(samples) srcs = [] masks = [] for l, feat in enumerate(features): src, mask = feat.decompose() srcs.append(self.input_proj[l](src)) masks.append(mask) assert mask is not None if self.num_feature_levels > len(srcs): _len_srcs = len(srcs) for l in range(_len_srcs, self.num_feature_levels): if l == _len_srcs: src = self.input_proj[l](features[-1].tensors) else: src = self.input_proj[l](srcs[-1]) m = samples.mask mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype) srcs.append(src) masks.append(mask) poss.append(pos_l) input_query_bbox = input_query_label = attn_mask = dn_meta = None hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer( srcs, masks, input_query_bbox, poss, input_query_label, attn_mask, text_dict ) # deformable-detr-like anchor update outputs_coord_list = [] for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate( zip(reference[:-1], self.bbox_embed, hs) ): layer_delta_unsig = layer_bbox_embed(layer_hs) layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig) layer_outputs_unsig = layer_outputs_unsig.sigmoid() outputs_coord_list.append(layer_outputs_unsig) outputs_coord_list = torch.stack(outputs_coord_list) outputs_class = torch.stack( [ layer_cls_embed(layer_hs, text_dict) for layer_cls_embed, layer_hs in zip(self.class_embed, hs) ] ) out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord_list[-1]} # Used to calculate losses bs, len_td = text_dict['text_token_mask'].shape out['text_mask']=torch.zeros(bs, self.max_text_len, dtype=torch.bool).to( samples.device ) for b in range(bs): for j in range(len_td): if text_dict['text_token_mask'][b][j] == True: out['text_mask'][b][j] = True # for intermediate outputs if self.aux_loss: out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list) out['token']=one_hot_token # # for encoder output if hs_enc is not None: # prepare intermediate outputs interm_coord = ref_enc[-1] interm_class = self.transformer.enc_out_class_embed(hs_enc[-1], text_dict) out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord} out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal} # outputs['pred_logits'].shape # torch.Size([4, 900, 256]) # outputs['pred_boxes'].shape # torch.Size([4, 900, 4]) # outputs['text_mask'].shape # torch.Size([256]) # outputs['text_mask'] # outputs['aux_outputs'][0].keys() # dict_keys(['pred_logits', 'pred_boxes', 'one_hot', 'text_mask']) # outputs['aux_outputs'][img_idx] # outputs['token'] # <class 'transformers.tokenization_utils_base.BatchEncoding'> # outputs['interm_outputs'].keys() # dict_keys(['pred_logits', 'pred_boxes', 'one_hot', 'text_mask']) # outputs['interm_outputs_for_matching_pre'].keys() # dict_keys(['pred_logits', 'pred_boxes']) # outputs['one_hot'].shape # torch.Size([4, 900, 256]) return out @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [ {"pred_logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1]) ] class SetCriterion(nn.Module): def __init__(self, matcher, weight_dict, focal_alpha,focal_gamma, losses): """ Create the criterion. Parameters: matcher: module able to compute a matching between targets and proposals weight_dict: dict containing as key the names of the losses and as values their relative weight. losses: list of all the losses to be applied. See get_loss for list of available losses. focal_alpha: alpha in Focal Loss """ super().__init__() self.matcher = matcher self.weight_dict = weight_dict self.losses = losses self.focal_alpha = focal_alpha self.focal_gamma= focal_gamma @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients """ pred_logits = outputs['pred_logits'] device = pred_logits.device tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) losses = {'cardinality_error': card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ assert 'pred_boxes' in outputs idx = self._get_src_permutation_idx(indices) src_boxes = outputs['pred_boxes'][idx] target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') losses = {} losses['loss_bbox'] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))) losses['loss_giou'] = loss_giou.sum() / num_boxes # calculate the x,y and h,w loss with torch.no_grad(): losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes return losses def token_sigmoid_binary_focal_loss(self, outputs, targets, indices, num_boxes): pred_logits=outputs['pred_logits'] new_targets=outputs['one_hot'].to(pred_logits.device) text_mask=outputs['text_mask'] assert (new_targets.dim() == 3) assert (pred_logits.dim() == 3) # batch x from x to bs, n, _ = pred_logits.shape alpha=self.focal_alpha gamma=self.focal_gamma if text_mask is not None: # ODVG: each sample has different mask text_mask = text_mask.repeat(1, pred_logits.size(1)).view(outputs['text_mask'].shape[0],-1,outputs['text_mask'].shape[1]) pred_logits = torch.masked_select(pred_logits, text_mask) new_targets = torch.masked_select(new_targets, text_mask) new_targets=new_targets.float() p = torch.sigmoid(pred_logits) ce_loss = F.binary_cross_entropy_with_logits(pred_logits, new_targets, reduction="none") p_t = p * new_targets + (1 - p) * (1 - new_targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * new_targets + (1 - alpha) * (1 - new_targets) loss = alpha_t * loss total_num_pos=0 for batch_indices in indices: total_num_pos += len(batch_indices[0]) num_pos_avg_per_gpu = max(total_num_pos , 1.0) loss=loss.sum()/num_pos_avg_per_gpu losses = {'loss_ce': loss} return losses def _get_src_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) src_idx = torch.cat([src for (src, _) in indices]) return batch_idx, src_idx def _get_tgt_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) tgt_idx = torch.cat([tgt for (_, tgt) in indices]) return batch_idx, tgt_idx def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): loss_map = { 'labels': self.token_sigmoid_binary_focal_loss, 'cardinality': self.loss_cardinality, 'boxes': self.loss_boxes, } assert loss in loss_map, f'do you really want to compute {loss} loss?' return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) def forward(self, outputs, targets, cat_list, caption, return_indices=False): """ This performs the loss computation. Parameters: outputs: dict of tensors, see the output specification of the model for the format targets: list of dicts, such that len(targets) == batch_size. The expected keys in each dict depends on the losses applied, see each loss' doc return_indices: used for vis. if True, the layer0-5 indices will be returned as well. """ device=next(iter(outputs.values())).device one_hot = torch.zeros(outputs['pred_logits'].size(),dtype=torch.int64) # torch.Size([bs, 900, 256]) token = outputs['token'] label_map_list = [] indices = [] for j in range(len(cat_list)): # bs label_map=[] for i in range(len(cat_list[j])): label_id=torch.tensor([i]) per_label=create_positive_map(token[j], label_id, cat_list[j], caption[j]) label_map.append(per_label) label_map=torch.stack(label_map,dim=0).squeeze(1) label_map_list.append(label_map) for j in range(len(cat_list)): # bs for_match = { "pred_logits" : outputs['pred_logits'][j].unsqueeze(0), "pred_boxes" : outputs['pred_boxes'][j].unsqueeze(0) } inds = self.matcher(for_match, [targets[j]], label_map_list[j]) indices.extend(inds) # indices : A list of size batch_size, containing tuples of (index_i, index_j) where: # - index_i is the indices of the selected predictions (in order) # - index_j is the indices of the corresponding selected targets (in order) # import pdb; pdb.set_trace() tgt_ids = [v["labels"].cpu() for v in targets] # len(tgt_ids) == bs for i in range(len(indices)): tgt_ids[i]=tgt_ids[i][indices[i][1]] one_hot[i,indices[i][0]] = label_map_list[i][tgt_ids[i]].to(torch.long) outputs['one_hot'] = one_hot if return_indices: indices0_copy = indices indices_list = [] # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes_list = [len(t["labels"]) for t in targets] num_boxes = sum(num_boxes_list) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device) if is_dist_avail_and_initialized(): torch.distributed.all_reduce(num_boxes) num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if 'aux_outputs' in outputs: for idx, aux_outputs in enumerate(outputs['aux_outputs']): indices = [] for j in range(len(cat_list)): # bs aux_output_single = { 'pred_logits' : aux_outputs['pred_logits'][j].unsqueeze(0), 'pred_boxes': aux_outputs['pred_boxes'][j].unsqueeze(0) } inds = self.matcher(aux_output_single, [targets[j]], label_map_list[j]) indices.extend(inds) one_hot_aux = torch.zeros(outputs['pred_logits'].size(),dtype=torch.int64) tgt_ids = [v["labels"].cpu() for v in targets] for i in range(len(indices)): tgt_ids[i]=tgt_ids[i][indices[i][1]] one_hot_aux[i,indices[i][0]] = label_map_list[i][tgt_ids[i]].to(torch.long) aux_outputs['one_hot'] = one_hot_aux aux_outputs['text_mask'] = outputs['text_mask'] if return_indices: indices_list.append(indices) for loss in self.losses: kwargs = {} l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_{idx}': v for k, v in l_dict.items()} losses.update(l_dict) # interm_outputs loss if 'interm_outputs' in outputs: interm_outputs = outputs['interm_outputs'] indices = [] for j in range(len(cat_list)): # bs interm_output_single = { 'pred_logits' : interm_outputs['pred_logits'][j].unsqueeze(0), 'pred_boxes': interm_outputs['pred_boxes'][j].unsqueeze(0) } inds = self.matcher(interm_output_single, [targets[j]], label_map_list[j]) indices.extend(inds) one_hot_aux = torch.zeros(outputs['pred_logits'].size(),dtype=torch.int64) tgt_ids = [v["labels"].cpu() for v in targets] for i in range(len(indices)): tgt_ids[i]=tgt_ids[i][indices[i][1]] one_hot_aux[i,indices[i][0]] = label_map_list[i][tgt_ids[i]].to(torch.long) interm_outputs['one_hot'] = one_hot_aux interm_outputs['text_mask'] = outputs['text_mask'] if return_indices: indices_list.append(indices) for loss in self.losses: kwargs = {} l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs) l_dict = {k + f'_interm': v for k, v in l_dict.items()} losses.update(l_dict) if return_indices: indices_list.append(indices0_copy) return losses, indices_list return losses class PostProcess(nn.Module): """ This module converts the model's output into the format expected by the coco api""" def __init__(self, num_select=100,text_encoder_type='text_encoder_type', nms_iou_threshold=-1,use_coco_eval=False,args=None) -> None: super().__init__() self.num_select = num_select self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type) if args.use_coco_eval: coco = COCO(args.coco_val_path) category_dict = coco.loadCats(coco.getCatIds()) cat_list = [item['name'] for item in category_dict] else: cat_list=args.label_list caption = " . ".join(cat_list) + ' .' tokenized = self.tokenizer(caption, padding="longest", return_tensors="pt") label_list = torch.arange(len(cat_list)) pos_map=create_positive_map(tokenized,label_list,cat_list,caption) # build a mapping from label_id to pos_map if args.use_coco_eval: id_map = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31, 27: 32, 28: 33, 29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43, 39: 44, 40: 46, 41: 47, 42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56, 51: 57, 52: 58, 53: 59, 54: 60, 55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72, 63: 73, 64: 74, 65: 75, 66: 76, 67: 77, 68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85, 75: 86, 76: 87, 77: 88, 78: 89, 79: 90} new_pos_map = torch.zeros((91, 256)) for k, v in id_map.items(): new_pos_map[v] = pos_map[k] pos_map=new_pos_map self.nms_iou_threshold=nms_iou_threshold self.positive_map = pos_map @torch.no_grad() def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False): """ Perform the computation Parameters: outputs: raw outputs of the model target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch For evaluation, this must be the original image size (before any data augmentation) For visualization, this should be the image size after data augment, but before padding """ num_select = self.num_select out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] prob_to_token = out_logits.sigmoid() pos_maps = self.positive_map.to(prob_to_token.device) for label_ind in range(len(pos_maps)): if pos_maps[label_ind].sum() != 0: pos_maps[label_ind]=pos_maps[label_ind]/pos_maps[label_ind].sum() prob_to_label = prob_to_token @ pos_maps.T assert len(out_logits) == len(target_sizes) assert target_sizes.shape[1] == 2 prob = prob_to_label topk_values, topk_indexes = torch.topk(prob.view(prob.shape[0], -1), num_select, dim=1) scores = topk_values topk_boxes = torch.div(topk_indexes, prob.shape[2], rounding_mode='trunc') labels = topk_indexes % prob.shape[2] if not_to_xyxy: boxes = out_bbox else: boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) # if test: # assert not not_to_xyxy # boxes[:,:,2:] = boxes[:,:,2:] - boxes[:,:,:2] boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1,1,4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] if self.nms_iou_threshold > 0: item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b,s in zip(boxes, scores)] results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in zip(scores, labels, boxes, item_indices)] else: results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] return results @MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino") def build_groundingdino(args): device = torch.device(args.device) backbone = build_backbone(args) transformer = build_transformer(args) dn_labelbook_size = args.dn_labelbook_size dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share sub_sentence_present = args.sub_sentence_present model = GroundingDINO( backbone, transformer, num_queries=args.num_queries, aux_loss=args.aux_loss, iter_update=True, query_dim=4, num_feature_levels=args.num_feature_levels, nheads=args.nheads, dec_pred_bbox_embed_share=dec_pred_bbox_embed_share, two_stage_type=args.two_stage_type, two_stage_bbox_embed_share=args.two_stage_bbox_embed_share, two_stage_class_embed_share=args.two_stage_class_embed_share, num_patterns=args.num_patterns, dn_number=0, dn_box_noise_scale=args.dn_box_noise_scale, dn_label_noise_ratio=args.dn_label_noise_ratio, dn_labelbook_size=dn_labelbook_size, text_encoder_type=args.text_encoder_type, sub_sentence_present=sub_sentence_present, max_text_len=args.max_text_len, )
matcher = build_matcher(args)
21
2023-10-14 02:20:31+00:00
16k
LehengTHU/Agent4Rec
main.py
[ { "identifier": "parse_args", "path": "parse.py", "snippet": "def parse_args():\n parser = argparse.ArgumentParser()\n\n # Overall settings\n parser.add_argument('--simulation_name', type=str, default= 'Test',\n help='The name of one trial of simulation.')\n parser.add...
import numpy as np import os import wandb from tqdm import tqdm from parse import parse_args from simulation.utils import fix_seeds from simulation.avatar import Avatar from simulation.arena import Arena
12,740
# load model if __name__ == '__main__': args = parse_args() # print(args) fix_seeds(args.seed) # set random seed if(args.use_wandb): wandb.init( # set the wandb project where this run will be logged project = "sandbox", name = args.simulation_name, group = args.dataset )
# load model if __name__ == '__main__': args = parse_args() # print(args) fix_seeds(args.seed) # set random seed if(args.use_wandb): wandb.init( # set the wandb project where this run will be logged project = "sandbox", name = args.simulation_name, group = args.dataset )
arena_ = Arena(args)
3
2023-10-12 02:33:22+00:00
16k
Beckschen/3D-TransUNet
nn_transunet/trainer/network_trainer.py
[ { "identifier": "SegmentationNetwork", "path": "nn_transunet/networks/neural_network.py", "snippet": "class SegmentationNetwork(NeuralNetwork):\n def __init__(self):\n super(NeuralNetwork, self).__init__()\n\n # if we have 5 pooling then our patch size must be divisible by 2**5\n ...
from _warnings import warn from typing import Tuple from batchgenerators.utilities.file_and_folder_operations import * from nn_transunet.networks.neural_network import SegmentationNetwork from sklearn.model_selection import KFold from torch import nn from torch.cuda.amp import GradScaler, autocast from torch.optim.lr_scheduler import _LRScheduler from ..trainer.loss_functions import ModelLossSemsegGatedCRF from time import time, sleep from collections import OrderedDict from abc import abstractmethod from datetime import datetime from tqdm import trange from ..utils.dist_utils import check_call_hdfs_command, mkdir_hdfs import matplotlib import numpy as np import matplotlib.pyplot as plt import sys import torch.backends.cudnn as cudnn import torch import math import matplotlib.pyplot as plt
13,685
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. matplotlib.use("agg") def maybe_to_torch(d): if isinstance(d, list): d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d] elif not isinstance(d, torch.Tensor): d = torch.from_numpy(d).float() return d def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs) ** exponent def warmup_poly_lr(epoch, max_epochs, warmup_epochs, initial_lr, exponent=0.9): if epoch < warmup_epochs: return initial_lr * (float(epoch) / float(max(1.0, warmup_epochs))) epoch_rel = epoch - warmup_epochs max_epochs_rel = max_epochs - warmup_epochs return initial_lr * (1 - epoch_rel / max_epochs_rel) ** exponent def to_cuda(data, non_blocking=True, gpu_id=0): if isinstance(data, list): data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data] else: data = data.cuda(gpu_id, non_blocking=non_blocking) return data class NetworkTrainer(object): def __init__(self, deterministic=True, fp16=False): """ A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such as the training loop, tracking of training and validation losses (and the target metric if you implement it) Training can be terminated early if the validation loss (or the target metric if implemented) do not improve anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth results. What you need to override: - __init__ - initialize - run_online_evaluation (optional) - finish_online_evaluation (optional) - validate - predict_test_case """ self.fp16 = fp16 self.amp_grad_scaler = None if deterministic: np.random.seed(12345) torch.manual_seed(12345) if torch.cuda.is_available(): torch.cuda.manual_seed_all(12345) cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: cudnn.deterministic = False torch.backends.cudnn.benchmark = True ################# SET THESE IN self.initialize() ###################################
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. matplotlib.use("agg") def maybe_to_torch(d): if isinstance(d, list): d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d] elif not isinstance(d, torch.Tensor): d = torch.from_numpy(d).float() return d def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs) ** exponent def warmup_poly_lr(epoch, max_epochs, warmup_epochs, initial_lr, exponent=0.9): if epoch < warmup_epochs: return initial_lr * (float(epoch) / float(max(1.0, warmup_epochs))) epoch_rel = epoch - warmup_epochs max_epochs_rel = max_epochs - warmup_epochs return initial_lr * (1 - epoch_rel / max_epochs_rel) ** exponent def to_cuda(data, non_blocking=True, gpu_id=0): if isinstance(data, list): data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data] else: data = data.cuda(gpu_id, non_blocking=non_blocking) return data class NetworkTrainer(object): def __init__(self, deterministic=True, fp16=False): """ A generic class that can train almost any neural network (RNNs excluded). It provides basic functionality such as the training loop, tracking of training and validation losses (and the target metric if you implement it) Training can be terminated early if the validation loss (or the target metric if implemented) do not improve anymore. This is based on a moving average (MA) of the loss/metric instead of the raw values to get more smooth results. What you need to override: - __init__ - initialize - run_online_evaluation (optional) - finish_online_evaluation (optional) - validate - predict_test_case """ self.fp16 = fp16 self.amp_grad_scaler = None if deterministic: np.random.seed(12345) torch.manual_seed(12345) if torch.cuda.is_available(): torch.cuda.manual_seed_all(12345) cudnn.deterministic = True torch.backends.cudnn.benchmark = False else: cudnn.deterministic = False torch.backends.cudnn.benchmark = True ################# SET THESE IN self.initialize() ###################################
self.network: Tuple[SegmentationNetwork, nn.DataParallel] = None
0
2023-10-11 05:19:25+00:00
16k
AMAAI-Lab/Video2Music
train.py
[ { "identifier": "compute_vevo_accuracy", "path": "dataset/vevo_dataset.py", "snippet": "def compute_vevo_accuracy(out, tgt):\n softmax = nn.Softmax(dim=-1)\n out = torch.argmax(softmax(out), dim=-1)\n\n out = out.flatten()\n tgt = tgt.flatten()\n\n mask = (tgt != CHORD_PAD)\n\n out = o...
import os import csv import shutil import torch import torch.nn as nn from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.vevo_dataset import compute_vevo_accuracy, create_vevo_datasets from model.music_transformer import MusicTransformer from model.video_music_transformer import VideoMusicTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model_vevo import train_epoch, eval_model from torch.utils.tensorboard import SummaryWriter
13,853
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs: print_train_args(args) if vm != "": args.vis_models = vm if args.is_video: vis_arr = args.vis_models.split(" ") vis_arr.sort() vis_abbr_path = "" for v in vis_arr: vis_abbr_path = vis_abbr_path + "_" + VIS_ABBR_DIC[v] vis_abbr_path = vis_abbr_path[1:] else: vis_abbr_path = "no_video" if(args.force_cpu): use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs( args.output_dir, exist_ok=True) os.makedirs( os.path.join( args.output_dir, version), exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, version, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, version, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, version) os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if(args.no_tensorboard): tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, version, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) train_dataset, val_dataset, _ = create_vevo_datasets( dataset_root = "./dataset/", max_seq_chord = args.max_sequence_chord, max_seq_video = args.max_sequence_video, vis_models = args.vis_models, emo_model = args.emo_model, split_ver = SPLIT_VER, random_seq = True, is_video = args.is_video) total_vf_dim = 0 if args.is_video: for vf in train_dataset[0]["semanticList"]: total_vf_dim += vf.shape[1] total_vf_dim += 1 # Scene_offset total_vf_dim += 1 # Motion # Emotion if args.emo_model.startswith("6c"): total_vf_dim += 6 else: total_vf_dim += 5 train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) if args.is_video: model = VideoMusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_video=args.max_sequence_video, max_sequence_chord=args.max_sequence_chord, total_vf_dim=total_vf_dim, rpr=args.rpr).to(get_device()) else: model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_chord=args.max_sequence_chord, rpr=args.rpr).to(get_device()) start_epoch = BASELINE_EPOCH if(args.continue_weights is not None): if(args.continue_epoch is None): print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") assert(False) else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif(args.continue_epoch is not None): print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") assert(False) ##### Lr Scheduler vs static lr ##### if(args.lr is None): if(args.continue_epoch is None): init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs: print_train_args(args) if vm != "": args.vis_models = vm if args.is_video: vis_arr = args.vis_models.split(" ") vis_arr.sort() vis_abbr_path = "" for v in vis_arr: vis_abbr_path = vis_abbr_path + "_" + VIS_ABBR_DIC[v] vis_abbr_path = vis_abbr_path[1:] else: vis_abbr_path = "no_video" if(args.force_cpu): use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs( args.output_dir, exist_ok=True) os.makedirs( os.path.join( args.output_dir, version), exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, version, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, version, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, version) os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if(args.no_tensorboard): tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, version, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) train_dataset, val_dataset, _ = create_vevo_datasets( dataset_root = "./dataset/", max_seq_chord = args.max_sequence_chord, max_seq_video = args.max_sequence_video, vis_models = args.vis_models, emo_model = args.emo_model, split_ver = SPLIT_VER, random_seq = True, is_video = args.is_video) total_vf_dim = 0 if args.is_video: for vf in train_dataset[0]["semanticList"]: total_vf_dim += vf.shape[1] total_vf_dim += 1 # Scene_offset total_vf_dim += 1 # Motion # Emotion if args.emo_model.startswith("6c"): total_vf_dim += 6 else: total_vf_dim += 5 train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) if args.is_video: model = VideoMusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_video=args.max_sequence_video, max_sequence_chord=args.max_sequence_chord, total_vf_dim=total_vf_dim, rpr=args.rpr).to(get_device()) else: model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_chord=args.max_sequence_chord, rpr=args.rpr).to(get_device()) start_epoch = BASELINE_EPOCH if(args.continue_weights is not None): if(args.continue_epoch is None): print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") assert(False) else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif(args.continue_epoch is not None): print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") assert(False) ##### Lr Scheduler vs static lr ##### if(args.lr is None): if(args.continue_epoch is None): init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START
lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step)
7
2023-10-13 09:06:24+00:00
16k
RobotLocomotion/gcs-science-robotics
reproduction/bimanual/helpers.py
[ { "identifier": "BezierGCS", "path": "gcs/bezier.py", "snippet": "class BezierGCS(BaseGCS):\n def __init__(self, regions, order, continuity, edges=None, hdot_min=1e-6, full_dim_overlap=False):\n BaseGCS.__init__(self, regions)\n\n self.order = order\n self.continuity = continuity...
import numpy as np import os import time from copy import copy from pydrake.common import FindResourceOrThrow from pydrake.geometry import ( CollisionFilterDeclaration, GeometrySet, MeshcatVisualizer, Rgba, Role, SceneGraph ) from pydrake.math import RigidTransform, RollPitchYaw, RotationMatrix from pydrake.multibody.inverse_kinematics import InverseKinematics from pydrake.multibody.parsing import LoadModelDirectives, Parser, ProcessModelDirectives from pydrake.multibody.plant import AddMultibodyPlantSceneGraph, MultibodyPlant from pydrake.perception import PointCloud from pydrake.solvers import MosekSolver, Solve from pydrake.systems.analysis import Simulator from pydrake.systems.framework import DiagramBuilder, LeafSystem from pydrake.systems.primitives import TrajectorySource from pydrake.systems.rendering import MultibodyPositionToGeometryPose from gcs.bezier import BezierGCS from gcs.linear import LinearGCS from gcs.rounding import * from reproduction.prm_comparison.helpers import set_transparency_of_models from reproduction.util import *
12,267
elif "shelves::" in gid_name: shelf.append(gid) elif "binR" in gid_name: bins[0].append(gid) elif "binL" in gid_name: bins[1].append(gid) elif "table" in gid_name: table.append(gid) else: print("Geometry", gid_name, "not assigned to an object.") filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[1] + iiwa1[2]+ iiwa1[3]), GeometrySet(iiwa1[4] + iiwa1[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[3] + iiwa1[4]), GeometrySet(iiwa1[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2] + iiwa1[3] + iiwa1[4] + iiwa1[5] + iiwa1[6]), GeometrySet(iiwa1[7] + wsg1))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + iiwa1[4]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[1] + iiwa2[2]+ iiwa2[3]), GeometrySet(iiwa2[4] + iiwa2[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[3] + iiwa2[4]), GeometrySet(iiwa2[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[2] + iiwa2[3] + iiwa2[4] + iiwa2[5] + iiwa2[6]), GeometrySet(iiwa2[7] + wsg2))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + iiwa2[4]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[2]))) pairs = scene_graph.get_query_output_port().Eval(context).inspector().GetCollisionCandidates() print("Filtered collision pairs from", len(inspector.GetCollisionCandidates()), "to", len(pairs)) # initial_guess = np.concatenate((q0, q0)) # min_dist = (0.01, 0.01)??? def runBimanualIK(plant, context, wsg1_id, wsg2_id, wsg1_pose, wsg2_pose, initial_guess, min_dist=None): hand_frame1 = plant.GetBodyByName("body", wsg1_id).body_frame() hand_frame2 = plant.GetBodyByName("body", wsg2_id).body_frame() ik = InverseKinematics(plant, context) if min_dist is not None: ik.AddMinimumDistanceConstraint(*min_dist) ik.prog().AddBoundingBoxConstraint(plant.GetPositionLowerLimits(), plant.GetPositionUpperLimits(), ik.q()) ik.prog().SetInitialGuess(ik.q(), initial_guess) ik.prog().AddQuadraticCost((ik.q() - initial_guess).dot(ik.q() - initial_guess)) ik.AddPositionConstraint(hand_frame1, [0, 0, 0], plant.world_frame(), wsg1_pose.translation(), wsg1_pose.translation()) ik.AddOrientationConstraint(hand_frame1, RotationMatrix(), plant.world_frame(), wsg1_pose.rotation(), 0.001) ik.AddPositionConstraint(hand_frame2, [0, 0, 0], plant.world_frame(), wsg2_pose.translation(), wsg2_pose.translation()) ik.AddOrientationConstraint(hand_frame2, RotationMatrix(), plant.world_frame(), wsg2_pose.rotation(), 0.001) result = Solve(ik.prog()) return result.GetSolution(ik.q()) def visualizeConfig(diagram, plant, context, q): plant_context = plant.GetMyMutableContextFromRoot(context) plant.SetPositions(plant_context, q) diagram.ForcedPublish(context) def getLinearGcsPath(regions, sequence): path = [sequence[0]] run_time = 0.0 gcs = LinearGCS(regions) gcs.setPaperSolverOptions() gcs.setSolver(MosekSolver()) for start_pt, goal_pt in zip(sequence[:-1], sequence[1:]): gcs.addSourceTarget(start_pt, goal_pt) start_time = time.time() waypoints, results_dict = gcs.SolvePath(True, False, preprocessing=True) if waypoints is None: print(f"Failed between {start_pt} and {goal_pt}") return None print(f"Planned segment in {np.round(time.time() - start_time, 4)}", flush=True) # run_time += results_dict["preprocessing_stats"]['linear_programs'] run_time += results_dict["relaxation_solver_time"] run_time += results_dict["total_rounded_solver_time"] path += waypoints.T[1:].tolist() gcs.ResetGraph() return np.stack(path).T, run_time def getBezierGcsPath(plant, regions, sequence, order, continuity, hdot_min = 1e-3): run_time = [] trajectories = []
def getIkSeeds(): return { "top_shelf/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "top_shelf/shelf_1": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "top_shelf/shelf_2": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "top_shelf/bin_L": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "shelf_1/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "shelf_1/shelf_1": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "shelf_1/shelf_2": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "shelf_1/bin_L": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "shelf_2/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "shelf_2/shelf_1": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "shelf_2/shelf_2": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "shelf_2/bin_L": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "bin_R/top_shelf": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "bin_R/shelf_1": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.65])), "bin_R/shelf_2": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.4])), "bin_R/bin_L": (RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.0, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0., 1.1, 0.3])), "top_shelf/shelf_1_extract": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.35, 0.65])), "top_shelf/shelf_2_extract": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.35, 0.4])), "shelf_2_extract/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.15, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "shelf_1_extract/top_shelf": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.5, 0.15, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.35, 0.9])), "top_shelf/shelf_1_cross": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2-0.3), [0.7, 0.15, 0.65])), "cross_table/top_shelf_cross": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi), [0.4, 0.4, 0.2]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.15, 0.9])), "shelf_2_cross/top_shelf_cross": (RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2+0.4), [0.7, 0.35, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2-0.4), [0.7, 0.15, 0.9])), } def getConfigurationSeeds(): return { "top_shelf/top_shelf": [0.37080011, 0.41394084, -0.16861973, -0.70789778, -0.37031516, 0.60412162, 0.39982981, -0.37080019, 0.41394089, 0.16861988, -0.70789766, 0.37031506, 0.60412179, -0.39982996], "top_shelf/shelf_1": [0.37080079, 0.41394132, -0.16862043, -0.70789679, -0.37031656, 0.60412327, 0.39982969, -0.93496924, 0.46342534, 0.92801666, -1.45777635, -0.31061724, -0.0657716, -0.06019899], "top_shelf/shelf_2": [0.37086448, 0.41394538, -0.16875166, -0.70789745, -0.37020563, 0.60411217, 0.399785, -0.4416204 , 0.62965228, 0.20598405, -1.73324339, -0.41354372, -0.68738414, 0.17443976], "top_shelf/bin_L": [0.37081989, 0.41394235, -0.16866012, -0.70789737, -0.37028201, 0.60411923, 0.39981634, -0.89837331, -1.1576151 , 1.75505216, -1.37515153, 1.0676443 , 1.56371166, -0.64126346], "shelf_1/top_shelf": [0.93496924, 0.46342534, -0.92801666, -1.45777635, 0.31061724, -0.0657716 , 0.06019899, -0.37080079, 0.41394132, 0.16862043, -0.70789679, 0.37031656, 0.60412327, -0.39982969], "shelf_1/shelf_1": [0.87224109, 0.43096634, -0.82223436, -1.45840049, 0.73813452, -0.08999384, -0.41624203, -0.87556489, 0.43246906, 0.82766047, -1.45838515, -0.72259842, -0.0884963, 0.39840129], "shelf_1/shelf_2": [0.93496866, 0.463425 , -0.92801564, -1.45777634, 0.3106235, -0.06577172, 0.06019173, -0.44158858, 0.62964838, 0.20594112, -1.73324341, -0.41354987, -0.6873923 , 0.17446778], "shelf_1/bin_L": [0.93496918, 0.46342531, -0.92801656, -1.45777637, 0.31061728, -0.06577167, 0.06019927, -0.89837321, -1.15761746, 1.75504915, -1.37515113, 1.06764716, 1.56371454, -0.64126383], "shelf_2/top_shelf": [0.4416204, 0.62965228, -0.20598405, -1.73324339, 0.41354372, -0.68738414, -0.17443976, -0.37086448, 0.41394538, 0.16875166, -0.70789745, 0.37020563, 0.60411217, -0.399785], "shelf_2/shelf_1": [0.44158858, 0.62964838, -0.20594112, -1.73324341, 0.41354987, -0.6873923, -0.17446778, -0.93496866, 0.463425 , 0.92801564, -1.45777634, -0.3106235 , -0.06577172, -0.06019173], "shelf_2/shelf_2": [0.44161313, 0.62965141, -0.20597435, -1.73324346, 0.41354447, -0.68738613, -0.17444557, -0.4416132 , 0.62965142, 0.20597452, -1.73324348, -0.41354416, -0.68738609, 0.17444625], "shelf_2/bin_L": [0.44161528, 0.62965169, -0.20597726, -1.73324347, 0.41354399, -0.68738565, -0.17444283, -1.37292761, -0.68372976, 2.96705973, -1.41521783, 2.96705973, -1.11343251, -3.0140737 ], "bin_R/top_shelf": [0.81207926, -1.25359738, -1.58098625, -1.5155474 , -1.32223687, 1.50549708, -2.38221725, -0.37085114, 0.4139444 , 0.16872443, -0.70789757, 0.37022786, 0.60411401, -0.39979449], "bin_R/shelf_1": [0.81207923, -1.25358454, -1.58100042, -1.51554769, -1.32222337, 1.50548369, -2.3822204 , -0.9349716 , 0.46342674, 0.92802082, -1.45777624, -0.31059455, -0.0657707 , -0.06022391], "bin_R/shelf_2": [0.81207937, -1.25360462, -1.58097816, -1.51554761, -1.32224557, 1.50550485, -2.38221483, -0.44166552, 0.62965782, 0.20604497, -1.7332434 , -0.41353464, -0.6873727 , 0.17439863], "bin_R/bin_L": [-1.73637519, 0.6209681 , 0.24232887, -1.51538355, -0.17977474, 0.92618894, -3.01360257, 1.31861497, 0.72394333, 0.4044295 , -1.37509496, -0.27461997, 1.20038493, 0.18611701], "neutral/neutral": [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], "neutral/shelf_1": [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, -0.93496866, 0.463425 , 0.92801564, -1.45777634, -0.3106235 , -0.06577172, -0.06019173], "neutral/shelf_2": [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, -0.44166552, 0.62965782, 0.20604497, -1.7332434 , -0.41353464, -0.6873727 , 0.17439863], "shelf_1/neutral": [0.93496924, 0.46342534, -0.92801666, -1.45777635, 0.31061724, -0.0657716 , 0.06019899, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], "shelf_2/neutral": [0.44161528, 0.62965169, -0.20597726, -1.73324347, 0.41354399, -0.68738565, -0.17444283, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], "shelf_2_cross/top_shelf_cross": [0.47500706, 0.72909874, 0.01397772, -1.52841372, 0.15392366, -0.591641, -0.12870521, -0.48821156, 0.67762534, 0.02049926, -0.27420758, 0.10620709, 0.72215209, -0.09973172], } # Additional seed points not needed to connect the graph # "neutral/shelf_1_extract": [ 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, -0.35486829, -0.10621117, -0.09276445, -1.94995786, 1.88826556, 0.46922151, -1.98267349], # "neutral/shelf_2_extract": [ 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.3078069 , 0.56765359, -0.86829439, -2.0943951 , 2.53950045, 1.09607546, -2.4169564], # "shelf_1_extract/neutral": [-1.05527083, -0.43710629, 1.15648812, -1.95011062, 0.24422131, -0.07820216, 0.15872416, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], # "shelf_2_extract/neutral": [-0.30739053, 0.5673891 , 0.86772198, -2.0943951 , -2.53946773, 1.09586777, 2.41729532, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], # "cross_table/top_shelf_cross": [ 0.04655887, 0.97997658, 0.52004246, -1.91926412, -1.37518707, -0.88823968, 0.07674699, -0.5921624 , 0.83651867, 0.20513136, -0.00257881, 0.51748756, 0.92012332, -0.51686487], def getDemoConfigurations(): return [ [0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0], [0.69312848, 0.36303784, -0.66625368, -1.49515991, 0.3230085, -0.10942887, -0.09496304, -0.69312891, 0.36303794, 0.66625426, -1.49515975, -0.32300928, -0.10942832, 0.0949629], [0.2014604, 0.66463495, 0.16799372, -1.66212763, -0.09131682, -0.64368844, -0.03645568, -0.38777291, 0.56141139, -0.05760515, -0.47447495, 0.06515541, 0.63627899, -0.02552148], [-1.8487163 , 0.71749397, 0.66464618, -1.4912954 , -0.52882233, 1.0096015 , -2.62844995, 1.43620829, 0.70451542, -0.01532988, -1.34999693, -0.00550105, 1.18684923, -0.14400234], ] def generateDemoConfigurations(plant, context, wsg1_id, wsg2_id): demo_q = [[0.0, -0.2, 0, -1.2, 0, 1.6, 0.0, 0.0, -0.2, 0, -1.2, 0, 1.6, 0.0]] initial_guess = copy(demo_q[0]) demo_q.append(runBimanualIK( plant, context, wsg1_id, wsg2_id, RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.10, 0.65]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2), [0.7, 0.40, 0.65]), initial_guess, (0.01, 0.01))) demo_q.append(runBimanualIK( plant, context, wsg1_id, wsg2_id, RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2+0.4), [0.7, 0.25, 0.4]), RigidTransform(RollPitchYaw(-np.pi+0.1, 0, np.pi/2-0.4), [0.7, 0.20, 0.9]), initial_guess, None)) initial_guess[0] = -np.pi/2 initial_guess[7] = np.pi/2 demo_q.append(runBimanualIK( plant, context, wsg1_id, wsg2_id, RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, -np.pi), [0.09, -0.6, 0.3]), RigidTransform(RollPitchYaw(-np.pi/2+0.1, 0, np.pi), [0.09, 1.1, 0.3]), initial_guess, None)) return demo_q def filterCollsionGeometry(scene_graph, context): filter_manager = scene_graph.collision_filter_manager(context) inspector = scene_graph.model_inspector() iiwa1 = [[], [], [], [], [], [], [], []] iiwa2 = [[], [], [], [], [], [], [], []] wsg1 = [] wsg2 = [] shelf = [] bins = [[], []] table = [] for gid in inspector.GetGeometryIds( GeometrySet(inspector.GetAllGeometryIds()), Role.kProximity): gid_name = inspector.GetName(inspector.GetFrameId(gid)) if "iiwa_1::iiwa_link_" in gid_name: link_num = gid_name[18] iiwa1[int(link_num)].append(gid) elif "iiwa_2::iiwa_link_" in gid_name: link_num = gid_name[18] iiwa2[int(link_num)].append(gid) elif "wsg_1" in gid_name: wsg1.append(gid) elif "wsg_2" in gid_name: wsg2.append(gid) elif "shelves::" in gid_name: shelf.append(gid) elif "binR" in gid_name: bins[0].append(gid) elif "binL" in gid_name: bins[1].append(gid) elif "table" in gid_name: table.append(gid) else: print("Geometry", gid_name, "not assigned to an object.") filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[1] + iiwa1[2]+ iiwa1[3]), GeometrySet(iiwa1[4] + iiwa1[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[3] + iiwa1[4]), GeometrySet(iiwa1[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2] + iiwa1[3] + iiwa1[4] + iiwa1[5] + iiwa1[6]), GeometrySet(iiwa1[7] + wsg1))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1] + iiwa1[2] + iiwa1[3] + iiwa1[4]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[0] + iiwa1[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeWithin( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + shelf))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[1] + iiwa2[2]+ iiwa2[3]), GeometrySet(iiwa2[4] + iiwa2[5]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[3] + iiwa2[4]), GeometrySet(iiwa2[6]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[2] + iiwa2[3] + iiwa2[4] + iiwa2[5] + iiwa2[6]), GeometrySet(iiwa2[7] + wsg2))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(bins[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[1] + iiwa2[2] + iiwa2[3] + iiwa2[4]), GeometrySet(bins[0]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa2[0] + iiwa2[0] + iiwa2[2]), GeometrySet(table))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[2]), GeometrySet(iiwa2[0] + iiwa2[1]))) filter_manager.Apply(CollisionFilterDeclaration().ExcludeBetween( GeometrySet(iiwa1[0] + iiwa1[1]), GeometrySet(iiwa2[2]))) pairs = scene_graph.get_query_output_port().Eval(context).inspector().GetCollisionCandidates() print("Filtered collision pairs from", len(inspector.GetCollisionCandidates()), "to", len(pairs)) # initial_guess = np.concatenate((q0, q0)) # min_dist = (0.01, 0.01)??? def runBimanualIK(plant, context, wsg1_id, wsg2_id, wsg1_pose, wsg2_pose, initial_guess, min_dist=None): hand_frame1 = plant.GetBodyByName("body", wsg1_id).body_frame() hand_frame2 = plant.GetBodyByName("body", wsg2_id).body_frame() ik = InverseKinematics(plant, context) if min_dist is not None: ik.AddMinimumDistanceConstraint(*min_dist) ik.prog().AddBoundingBoxConstraint(plant.GetPositionLowerLimits(), plant.GetPositionUpperLimits(), ik.q()) ik.prog().SetInitialGuess(ik.q(), initial_guess) ik.prog().AddQuadraticCost((ik.q() - initial_guess).dot(ik.q() - initial_guess)) ik.AddPositionConstraint(hand_frame1, [0, 0, 0], plant.world_frame(), wsg1_pose.translation(), wsg1_pose.translation()) ik.AddOrientationConstraint(hand_frame1, RotationMatrix(), plant.world_frame(), wsg1_pose.rotation(), 0.001) ik.AddPositionConstraint(hand_frame2, [0, 0, 0], plant.world_frame(), wsg2_pose.translation(), wsg2_pose.translation()) ik.AddOrientationConstraint(hand_frame2, RotationMatrix(), plant.world_frame(), wsg2_pose.rotation(), 0.001) result = Solve(ik.prog()) return result.GetSolution(ik.q()) def visualizeConfig(diagram, plant, context, q): plant_context = plant.GetMyMutableContextFromRoot(context) plant.SetPositions(plant_context, q) diagram.ForcedPublish(context) def getLinearGcsPath(regions, sequence): path = [sequence[0]] run_time = 0.0 gcs = LinearGCS(regions) gcs.setPaperSolverOptions() gcs.setSolver(MosekSolver()) for start_pt, goal_pt in zip(sequence[:-1], sequence[1:]): gcs.addSourceTarget(start_pt, goal_pt) start_time = time.time() waypoints, results_dict = gcs.SolvePath(True, False, preprocessing=True) if waypoints is None: print(f"Failed between {start_pt} and {goal_pt}") return None print(f"Planned segment in {np.round(time.time() - start_time, 4)}", flush=True) # run_time += results_dict["preprocessing_stats"]['linear_programs'] run_time += results_dict["relaxation_solver_time"] run_time += results_dict["total_rounded_solver_time"] path += waypoints.T[1:].tolist() gcs.ResetGraph() return np.stack(path).T, run_time def getBezierGcsPath(plant, regions, sequence, order, continuity, hdot_min = 1e-3): run_time = [] trajectories = []
gcs = BezierGCS(regions, order, continuity)
0
2023-10-13 00:27:32+00:00
16k
LeapLabTHU/Rank-DETR
projects/dab_deformable_detr/configs/models/dab_deformable_detr_r50.py
[ { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of th...
import copy import torch.nn as nn from detectron2.modeling.backbone import ResNet, BasicStem from detectron2.layers import ShapeSpec from detectron2.config import LazyCall as L from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.neck import ChannelMapper from detrex.layers import PositionEmbeddingSine from projects.dab_deformable_detr.modeling import ( DabDeformableDETR, DabDeformableDetrTransformerEncoder, DabDeformableDetrTransformerDecoder, DabDeformableDetrTransformer, TwoStageCriterion, )
12,988
model = L(DabDeformableDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(DabDeformableDetrTransformer)(
model = L(DabDeformableDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(DabDeformableDetrTransformer)(
encoder=L(DabDeformableDetrTransformerEncoder)(
3
2023-10-12 03:02:25+00:00
16k
ByungKwanLee/Full-Segment-Anything
mask_generator.py
[ { "identifier": "Sam", "path": "modeling/sam.py", "snippet": "class Sam(nn.Module):\n mask_threshold: float = 0.0\n image_format: str = \"RGB\"\n\n def __init__(\n self,\n image_encoder: ImageEncoderViT,\n prompt_encoder: PromptEncoder,\n mask_decoder: MaskDecoder,\n...
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from modeling import Sam from predictor import SamPredictor from utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
12,149
orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=True, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold data["boxes"] = batched_mask_to_box(data["masks"]) # Filter boxes that touch crop boundaries keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h]) if not torch.all(keep_mask): data.filter(keep_mask) # Compress to RLE data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
data["rles"] = mask_to_rle_pytorch(data["masks"])
12
2023-10-13 20:07:42+00:00
16k
sakemin/cog-musicgen-remixer
audiocraft/modules/conditioners.py
[ { "identifier": "ChromaExtractor", "path": "audiocraft/modules/chroma.py", "snippet": "class ChromaExtractor(nn.Module):\n \"\"\"Chroma extraction and quantization.\n\n Args:\n sample_rate (int): Sample rate for the chroma extraction.\n n_chroma (int): Number of chroma bins for the c...
from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from itertools import chain from pathlib import Path from num2words import num2words from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore from torch import nn from torch.nn.utils.rnn import pad_sequence from .chroma import ChromaExtractor from .chord_chroma import ChordExtractor from .streaming import StreamingModule from .transformer import create_sin_embedding from ..data.audio import audio_read from ..data.audio_dataset import SegmentInfo from ..data.audio_utils import convert_audio from ..environment import AudioCraftEnvironment from ..quantization import ResidualVectorQuantizer from ..utils.autocast import TorchAutocast from ..utils.cache import EmbeddingCache from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once from .btc.utils import chords from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio import logging import math import random import re import typing as tp import warnings import einops import spacy import torch import torch.nn.functional as F import numpy as np import laion_clap # type: ignore
13,735
embeds = self.output_proj(embeds) if self.match_len_on_eval: if lengths is not None: for i in range(len(lengths)): if lengths[i] > 0 and lengths[i] < self.duration * self.sample_rate: lengths[i] = torch.Tensor([(self.duration+1) * self.sample_rate]) lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) else: if lengths is not None: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) embeds = (embeds.to(self.device) * mask.unsqueeze(2).to(self.device)) return embeds.to(self.device), mask.to(self.device) class JointEmbeddingConditioner(BaseConditioner): """Joint embedding conditioning supporting both audio or text conditioning. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. autocast_dtype (str): Autocast for the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True, n_q: int = 12, bins: int = 1024, **kwargs): super().__init__(dim=dim, output_dim=output_dim) self.device = device self.attribute = attribute if autocast_dtype is None or device == 'cpu': self.autocast = TorchAutocast(enabled=False) logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # residual vector quantizer to discretize the conditioned embedding self.quantizer: tp.Optional[ResidualVectorQuantizer] = None if quantize: self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs) def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Get joint embedding in latent space from the inputs. Returns: tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding and corresponding empty indexes. """ raise NotImplementedError() def forward(self, x: JointEmbedCondition) -> ConditionType: with self.autocast: embed, empty_idx = self._get_embed(x) if self.quantizer is not None: embed = embed.view(-1, self.dim, 1) q_res = self.quantizer(embed, frame_rate=1) out_embed = q_res.x.view(-1, self.dim) else: out_embed = embed out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim) mask = torch.ones(*out_embed.shape[:2], device=out_embed.device) mask[empty_idx, :] = 0 # zero-out index where the input is non-existant out_embed = (out_embed * mask.unsqueeze(-1)) return out_embed, mask def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: return x class CLAPEmbeddingConditioner(JointEmbeddingConditioner): """Joint Embedding conditioner based on pre-trained CLAP model. This CLAP-based conditioner supports a caching mechanism over the computed embeddings for faster training. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). checkpoint (str): Path to CLAP checkpoint. model_arch (str): CLAP model architecture. enable_fusion (bool): Enable fusion for CLAP model. sample_rate (int): Sample rate used by CLAP model. max_audio_length (float): Maximum audio length for CLAP model. audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence. normalize (bool): Whether to normalize the CLAP embedding. text_p (float): Probability of using text representation instead of audio at train time. batch_size (Optional[int]): Batch size for CLAP embedding computation. autocast_dtype (str): Autocast for the conditioner. cache_path (Optional[str]): Path for pre-computed embeddings caching. kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str, enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int, normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None, autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs): try: except ImportError: raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'") warnings.warn("Sample rate for CLAP conditioner was fixed in version v1.1.0, (from 44.1 to 48 kHz). " "Please retrain all models.")
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask class WavCondition(tp.NamedTuple): wav: torch.Tensor length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] class WavChordTextCondition(tp.NamedTuple): wav: tp.Union[torch.Tensor,str,tp.List[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] bpm : tp.List[tp.Optional[tp.Union[int, float]]] = [] meter : tp.List[tp.Optional[int]] = [] class JointEmbedCondition(tp.NamedTuple): wav: torch.Tensor text: tp.List[tp.Optional[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] @dataclass class ConditioningAttributes: text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) wav: tp.Dict[str, tp.Union[WavCondition,WavChordTextCondition]] = field(default_factory=dict) joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) def __getitem__(self, item): return getattr(self, item) @property def text_attributes(self): return self.text.keys() @property def wav_attributes(self): return self.wav.keys() @property def joint_embed_attributes(self): return self.joint_embed.keys() @property def attributes(self): return { "text": self.text_attributes, "wav": self.wav_attributes, "joint_embed": self.joint_embed_attributes, } def to_flat_dict(self): return { **{f"text.{k}": v for k, v in self.text.items()}, **{f"wav.{k}": v for k, v in self.wav.items()}, **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} } @classmethod def from_flat_dict(cls, x): out = cls() for k, v in x.items(): kind, att = k.split(".") out[kind][att] = v return out class SegmentWithAttributes(SegmentInfo): """Base class for all dataclasses that are used for conditioning. All child classes should implement `to_condition_attributes` that converts the existing attributes to a dataclass of type ConditioningAttributes. """ def to_condition_attributes(self) -> ConditioningAttributes: raise NotImplementedError() def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask def nullify_wav(cond: tp.Union[WavCondition,WavChordTextCondition]) -> tp.Union[WavCondition,WavChordTextCondition]: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ if not isinstance(cond, WavChordTextCondition): null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], ) else: return WavChordTextCondition( wav=['N']* len(cond.wav), length=torch.tensor([0] * len(cond.wav), device=cond.length.device), sample_rate=cond.sample_rate, path=[None], seek_time=[None], bpm = cond.bpm, meter = cond.meter ) def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], ) class Tokenizer: """Base tokenizer implementation (in case we want to introduce more advances tokenizers in the future). """ def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError() class WhiteSpaceTokenizer(Tokenizer): """This tokenizer should be used for natural language descriptions. For example: ["he didn't, know he's going home.", 'shorter sentence'] => [[78, 62, 31, 4, 78, 25, 19, 34], [59, 77, 0, 0, 0, 0, 0, 0]] """ PUNCTUATION = "?:!.,;" def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", lemma: bool = True, stopwords: bool = True) -> None: self.n_bins = n_bins self.pad_idx = pad_idx self.lemma = lemma self.stopwords = stopwords try: self.nlp = spacy.load(language) except IOError: spacy.cli.download(language) # type: ignore self.nlp = spacy.load(language) @tp.no_type_check def __call__(self, texts: tp.List[tp.Optional[str]], return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Take a list of strings and convert them to a tensor of indices. Args: texts (list[str]): List of strings. return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. Returns: tuple[torch.Tensor, torch.Tensor]: - Indices of words in the LUT. - And a mask indicating where the padding tokens are """ output, lengths = [], [] texts = deepcopy(texts) for i, text in enumerate(texts): # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(torch.Tensor([self.pad_idx])) lengths.append(0) continue # convert numbers to words text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore # normalize text text = self.nlp(text) # type: ignore # remove stopwords if self.stopwords: text = [w for w in text if not w.is_stop] # type: ignore # remove punctuation text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore # lemmatize if needed text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore texts[i] = " ".join(text) lengths.append(len(text)) # convert to tensor tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) output.append(tokens) mask = length_to_mask(torch.IntTensor(lengths)).int() padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() if return_text: return padded_output, mask, texts # type: ignore return padded_output, mask class NoopTokenizer(Tokenizer): """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will split it to ["Jeff", "Buckley"] and return an index per word. For example: ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] ["Metal", "Rock", "Classical"] => [0, 223, 51] """ def __init__(self, n_bins: int, pad_idx: int = 0): self.n_bins = n_bins self.pad_idx = pad_idx def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: output, lengths = [], [] for text in texts: # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(self.pad_idx) lengths.append(0) else: output.append(hash_trick(text, self.n_bins)) lengths.append(1) tokens = torch.LongTensor(output).unsqueeze(1) mask = length_to_mask(torch.IntTensor(lengths)).int() return tokens, mask class BaseConditioner(nn.Module): """Base model for all conditioner modules. We allow the output dim to be different than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; 2) make all condition dims consistent. Args: dim (int): Hidden dim of the model. output_dim (int): Output dim of the conditioner. """ def __init__(self, dim: int, output_dim: int): super().__init__() self.dim = dim self.output_dim = output_dim self.output_proj = nn.Linear(dim, output_dim) def tokenize(self, *args, **kwargs) -> tp.Any: """Should be any part of the processing that will lead to a synchronization point, e.g. BPE tokenization with transfer to the GPU. The returned value will be saved and return later when calling forward(). """ raise NotImplementedError() def forward(self, inputs: tp.Any) -> ConditionType: """Gets input that should be used as conditioning (e.g, genre, description or a waveform). Outputs a ConditionType, after the input data was embedded as a dense vector. Returns: ConditionType: - A tensor of size [B, T, D] where B is the batch size, T is the length of the output embedding and D is the dimension of the embedding. - And a mask indicating where the padding tokens. """ raise NotImplementedError() class TextConditioner(BaseConditioner): ... class LUTConditioner(TextConditioner): """Lookup table TextConditioner. Args: n_bins (int): Number of bins. dim (int): Hidden dim of the model (text-encoder/LUT). output_dim (int): Output dim of the conditioner. tokenizer (str): Name of the tokenizer. pad_idx (int, optional): Index for padding token. Defaults to 0. """ def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): super().__init__(dim, output_dim) self.embed = nn.Embedding(n_bins, dim) self.tokenizer: Tokenizer if tokenizer == 'whitespace': self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) elif tokenizer == 'noop': self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) else: raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: device = self.embed.weight.device tokens, mask = self.tokenizer(x) tokens, mask = tokens.to(device), mask.to(device) return tokens, mask def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: tokens, mask = inputs embeds = self.embed(tokens) embeds = self.output_proj(embeds) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class T5Conditioner(TextConditioner): """T5-based TextConditioner. Args: name (str): Name of the T5 model. output_dim (int): Output dim of the conditioner. finetune (bool): Whether to fine-tune T5 at train time. device (str): Device for T5 Conditioner. autocast_dtype (tp.Optional[str], optional): Autocast dtype. word_dropout (float, optional): Word dropout probability. normalize_text (bool, optional): Whether to apply text normalization. """ MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl", "google/flan-t5-xxl"] MODELS_DIMS = { "t5-small": 512, "t5-base": 768, "t5-large": 1024, "t5-3b": 1024, "t5-11b": 1024, "google/flan-t5-small": 512, "google/flan-t5-base": 768, "google/flan-t5-large": 1024, "google/flan-t5-3b": 1024, "google/flan-t5-11b": 1024, } def __init__(self, name: str, output_dim: int, finetune: bool, device: str, autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., normalize_text: bool = False): assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" super().__init__(self.MODELS_DIMS[name], output_dim) self.device = device self.name = name self.finetune = finetune self.word_dropout = word_dropout if autocast_dtype is None or self.device == 'cpu': self.autocast = TorchAutocast(enabled=False) if self.device != 'cpu': logger.warning("T5 has no autocast, this might lead to NaN") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # Let's disable logging temporarily because T5 will vomit some errors otherwise. # thanks https://gist.github.com/simon-weber/7853144 previous_level = logging.root.manager.disable logging.disable(logging.ERROR) with warnings.catch_warnings(): warnings.simplefilter("ignore") try: self.t5_tokenizer = T5Tokenizer.from_pretrained(name) t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) finally: logging.disable(previous_level) if finetune: self.t5 = t5 else: # this makes sure that the t5 models is not part # of the saved checkpoint self.__dict__['t5'] = t5.to(device) self.normalize_text = normalize_text if normalize_text: self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: # if current sample doesn't have a certain attribute, replace with empty string entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: WavCondition) -> WavCondition: """Apply WavConditioner tokenization and populate cache if needed.""" x = super().tokenize(x) no_undefined_paths = all(p is not None for p in x.path) if self.cache is not None and no_undefined_paths: paths = [Path(p) for p in x.path if p is not None] self.cache.populate_embed_cache(paths, x) return x class ChromaChordConditioner(ChromaStemConditioner): """Chord Chroma conditioner based on stems. The ChromaChordConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(output_dim = output_dim, sample_rate = sample_rate, n_chroma = n_chroma, radix2_exp = radix2_exp, duration = duration, match_len_on_eval = match_len_on_eval, eval_wavs = eval_wavs, n_eval_wavs = n_eval_wavs, cache_path = cache_path, device = device) self.winhop = self.chroma.winhop self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('bass'), stem_sources.index('other')]).to(device) self.chroma_len = self._get_chroma_len() self.bar2chromabin = self.sample_rate / self.winhop self.chroma = ChordExtractor(device = device, sample_rate=sample_rate, n_chroma=n_chroma, max_duration = duration, chroma_len = self.chroma_len, winhop = self.winhop).to(device) self.chords = chords.Chords() self.chroma_coefficient = 1 self.continuation_count = 0 # for infinite generation with text chroma #3 Layered MLP projection override ''' self.output_proj = nn.Sequential( nn.Linear(n_chroma, 128), nn.ReLU(), nn.Linear(128, 256), nn.ReLU(), nn.Linear(256, output_dim) ) ''' def _downsampling_factor(self) -> int: return self.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: # print("1515151") return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) # print("2727272") return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) def set_continuation_count(self, sub_duration_ratio, current_iter): self.continuation_count = int(self.chroma_len * sub_duration_ratio * current_iter) @torch.no_grad() def _get_wav_embedding(self, x: tp.Union[WavCondition, WavChordTextCondition]) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ if isinstance(x, WavCondition): sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None: warn_once(logger, "Using precomputed evaluation wavs!") sampled_wav = self._sample_eval_wavs(len(x.wav)) no_undefined_paths = all(p is not None for p in x.path) no_nullified_cond = x.wav.shape[-1] > 1 if sampled_wav is not None: chroma = self._compute_wav_embedding(sampled_wav, self.sample_rate) # print("111111") elif self.cache is not None and no_undefined_paths and no_nullified_cond: paths = [Path(p) for p in x.path if p is not None] chroma = self.cache.get_embed_from_cache(paths, x) # print("222222") #Works here else: assert all(sr == x.sample_rate[0] for sr in x.sample_rate), "All sample rates in batch should be equal." chroma = self._compute_wav_embedding(x.wav, x.sample_rate[0]) # print("333333") #and here in training else: chromas = [] for wav, bpm, meter in zip(x.wav, x.bpm, x.meter): chroma = torch.zeros([self.chroma_len, self.dim]) count = 0 offset = 0 stext = wav.split(" ") barsec = 60/(bpm/meter) timebin = barsec * self.bar2chromabin while count < self.chroma_len: for tokens in stext: if count >= self.chroma_len: break stoken = tokens.split(',') for token in stoken: off_timebin = timebin + offset rounded_timebin = round(off_timebin) offset = off_timebin - rounded_timebin offset = offset/len(stoken) add_step = rounded_timebin//len(stoken) mhot = self.chords.chord(token) rolled = np.roll(mhot[2], mhot[0]) for i in range(count, count + add_step): if self.continuation_count > 0: self.continuation_count -= 1 continue if count >= self.chroma_len: break chroma[i] = torch.Tensor(rolled) count += 1 chromas.append(chroma) chroma = torch.stack(chromas)*self.chroma_coefficient if self.match_len_on_eval: B, T, C = chroma.shape if T > self.chroma_len: chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was truncated to match length! ({T} -> {chroma.shape[1]})") elif T < self.chroma_len: n_repeat = int(math.ceil(self.chroma_len / T)) chroma = chroma.repeat(1, n_repeat, 1) chroma = chroma[:, :self.chroma_len] logger.debug(f"Chroma was repeated to match length! ({T} -> {chroma.shape[1]})") return chroma def tokenize(self, x: tp.Union[WavCondition, WavChordTextCondition]) -> tp.Union[WavCondition, WavChordTextCondition]: if isinstance(x, WavCondition): wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) else: wav, length, sample_rate, path, seek_time, bpm, meter = x return WavChordTextCondition(wav, length.to(self.device), sample_rate, path, seek_time, bpm, meter) def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) #chroma embeds = embeds.to(self.output_proj.weight) # embeds = embeds * (torch.rand(embeds.shape).to(self.device) * 0.3) embeds = self.output_proj(embeds) if self.match_len_on_eval: if lengths is not None: for i in range(len(lengths)): if lengths[i] > 0 and lengths[i] < self.duration * self.sample_rate: lengths[i] = torch.Tensor([(self.duration+1) * self.sample_rate]) lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) else: if lengths is not None: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds) embeds = (embeds.to(self.device) * mask.unsqueeze(2).to(self.device)) return embeds.to(self.device), mask.to(self.device) class JointEmbeddingConditioner(BaseConditioner): """Joint embedding conditioning supporting both audio or text conditioning. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. autocast_dtype (str): Autocast for the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, autocast_dtype: tp.Optional[str] = 'float32', quantize: bool = True, n_q: int = 12, bins: int = 1024, **kwargs): super().__init__(dim=dim, output_dim=output_dim) self.device = device self.attribute = attribute if autocast_dtype is None or device == 'cpu': self.autocast = TorchAutocast(enabled=False) logger.warning("JointEmbeddingConditioner has no autocast, this might lead to NaN.") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"JointEmbeddingConditioner will be evaluated with autocast as {autocast_dtype}.") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # residual vector quantizer to discretize the conditioned embedding self.quantizer: tp.Optional[ResidualVectorQuantizer] = None if quantize: self.quantizer = ResidualVectorQuantizer(dim, n_q=n_q, bins=bins, **kwargs) def _get_embed(self, x: JointEmbedCondition) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Get joint embedding in latent space from the inputs. Returns: tuple[torch.Tensor, torch.Tensor]: Tensor for the latent embedding and corresponding empty indexes. """ raise NotImplementedError() def forward(self, x: JointEmbedCondition) -> ConditionType: with self.autocast: embed, empty_idx = self._get_embed(x) if self.quantizer is not None: embed = embed.view(-1, self.dim, 1) q_res = self.quantizer(embed, frame_rate=1) out_embed = q_res.x.view(-1, self.dim) else: out_embed = embed out_embed = self.output_proj(out_embed).view(-1, 1, self.output_dim) mask = torch.ones(*out_embed.shape[:2], device=out_embed.device) mask[empty_idx, :] = 0 # zero-out index where the input is non-existant out_embed = (out_embed * mask.unsqueeze(-1)) return out_embed, mask def tokenize(self, x: JointEmbedCondition) -> JointEmbedCondition: return x class CLAPEmbeddingConditioner(JointEmbeddingConditioner): """Joint Embedding conditioner based on pre-trained CLAP model. This CLAP-based conditioner supports a caching mechanism over the computed embeddings for faster training. Args: dim (int): Dimension. output_dim (int): Output dimension. device (str): Device. attribute (str): Attribute used by the conditioner. quantize (bool): Whether to quantize the CLAP embedding. n_q (int): Number of residual quantizers (used if quantize is true). bins (int): Quantizers' codebooks size (used if quantize is true). checkpoint (str): Path to CLAP checkpoint. model_arch (str): CLAP model architecture. enable_fusion (bool): Enable fusion for CLAP model. sample_rate (int): Sample rate used by CLAP model. max_audio_length (float): Maximum audio length for CLAP model. audio_stride (float): Stride to use for getting a CLAP embedding on the full sequence. normalize (bool): Whether to normalize the CLAP embedding. text_p (float): Probability of using text representation instead of audio at train time. batch_size (Optional[int]): Batch size for CLAP embedding computation. autocast_dtype (str): Autocast for the conditioner. cache_path (Optional[str]): Path for pre-computed embeddings caching. kwargs: Additional parameters for residual vector quantizer. """ def __init__(self, dim: int, output_dim: int, device: str, attribute: str, quantize: bool, n_q: int, bins: int, checkpoint: tp.Union[str, Path], model_arch: str, enable_fusion: bool, sample_rate: int, max_audio_length: int, audio_stride: int, normalize: bool, text_p: bool, batch_size: tp.Optional[int] = None, autocast_dtype: tp.Optional[str] = 'float32', cache_path: tp.Optional[str] = None, **kwargs): try: except ImportError: raise ImportError("Please install CLAP to use the CLAPEmbeddingConditioner: 'pip install laion_clap'") warnings.warn("Sample rate for CLAP conditioner was fixed in version v1.1.0, (from 44.1 to 48 kHz). " "Please retrain all models.")
checkpoint = AudioCraftEnvironment.resolve_reference_path(checkpoint)
7
2023-10-09 09:55:24+00:00
16k
Texaser/MTN
nerf/network_grid_tcnn.py
[ { "identifier": "trunc_exp", "path": "activation.py", "snippet": "class _trunc_exp(Function):\n def forward(ctx, x):\n def backward(ctx, g):\ndef biased_softplus(x, bias=0):" }, { "identifier": "NeRFRenderer", "path": "nerf/renderer.py", "snippet": "class NeRFRenderer(nn.Module):\n...
import torch import torch.nn as nn import torch.nn.functional as F import numpy as np import tinycudann as tcnn from activation import trunc_exp, biased_softplus from .renderer import NeRFRenderer from encoding import get_encoder from .utils import safe_normalize
13,761
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "interpolation": "Smoothstep", "per_level_scale": np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)), }, dtype=torch.float32, # ENHANCE: default float16 seems unstable... ) self.in_dim = self.encoder.n_output_dims # use torch MLP, as tcnn MLP doesn't impl second-order derivative self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt... self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6) self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) else: self.bg_net = None def common_forward(self, x): # sigma enc = self.encoder((x + self.bound) / (2 * self.bound)).float() h = self.sigma_net(enc) sigma = self.density_activation(h[..., 0] + self.density_blob(x)) albedo = torch.sigmoid(h[..., 1:]) return sigma, albedo def normal(self, x): with torch.enable_grad(): with torch.cuda.amp.autocast(enabled=False): x.requires_grad_(True) sigma, albedo = self.common_forward(x) # query gradient normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3] # normal = self.finite_difference_normal(x)
class MLP(nn.Module): def __init__(self, dim_in, dim_out, dim_hidden, num_layers, bias=True): super().__init__() self.dim_in = dim_in self.dim_out = dim_out self.dim_hidden = dim_hidden self.num_layers = num_layers net = [] for l in range(num_layers): net.append(nn.Linear(self.dim_in if l == 0 else self.dim_hidden, self.dim_out if l == num_layers - 1 else self.dim_hidden, bias=bias)) self.net = nn.ModuleList(net) def forward(self, x): for l in range(self.num_layers): x = self.net[l](x) if l != self.num_layers - 1: x = F.relu(x, inplace=True) return x class NeRFNetwork(NeRFRenderer): def __init__(self, opt, num_layers=3, hidden_dim=64, num_layers_bg=2, hidden_dim_bg=32, ): super().__init__(opt) self.num_layers = num_layers self.hidden_dim = hidden_dim self.encoder = tcnn.Encoding( n_input_dims=3, encoding_config={ "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "interpolation": "Smoothstep", "per_level_scale": np.exp2(np.log2(2048 * self.bound / 16) / (16 - 1)), }, dtype=torch.float32, # ENHANCE: default float16 seems unstable... ) self.in_dim = self.encoder.n_output_dims # use torch MLP, as tcnn MLP doesn't impl second-order derivative self.sigma_net = MLP(self.in_dim, 4, hidden_dim, num_layers, bias=True) self.density_activation = trunc_exp if self.opt.density_activation == 'exp' else biased_softplus # background network if self.opt.bg_radius > 0: self.num_layers_bg = num_layers_bg self.hidden_dim_bg = hidden_dim_bg # use a very simple network to avoid it learning the prompt... self.encoder_bg, self.in_dim_bg = get_encoder('frequency', input_dim=3, multires=6) self.bg_net = MLP(self.in_dim_bg, 3, hidden_dim_bg, num_layers_bg, bias=True) else: self.bg_net = None def common_forward(self, x): # sigma enc = self.encoder((x + self.bound) / (2 * self.bound)).float() h = self.sigma_net(enc) sigma = self.density_activation(h[..., 0] + self.density_blob(x)) albedo = torch.sigmoid(h[..., 1:]) return sigma, albedo def normal(self, x): with torch.enable_grad(): with torch.cuda.amp.autocast(enabled=False): x.requires_grad_(True) sigma, albedo = self.common_forward(x) # query gradient normal = - torch.autograd.grad(torch.sum(sigma), x, create_graph=True)[0] # [N, 3] # normal = self.finite_difference_normal(x)
normal = safe_normalize(normal)
3
2023-10-11 04:06:20+00:00
16k
oracle/guardian-ai
guardian_ai/fairness/metrics/core.py
[ { "identifier": "EqualizedOddsScorer", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "class EqualizedOddsScorer(_ModelFairnessScorer):\n \"\"\"\n Measures the disparity of a model's true positive and false positive rates\n between subgroups and the rest of the subgroups.\n\n Th...
from guardian_ai.fairness.metrics.model import ( EqualizedOddsScorer, ErrorRateScorer, FalseDiscoveryRateScorer, FalseNegativeRateScorer, FalseOmissionRateScorer, FalsePositiveRateScorer, ModelStatisticalParityScorer, TheilIndexScorer, TruePositiveRateScorer, equalized_odds, error_rate, false_discovery_rate, false_negative_rate, false_omission_rate, false_positive_rate, model_statistical_parity, theil_index, true_positive_rate, ) from guardian_ai.utils.exception import GuardianAIValueError
12,843
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ """Core for fairness metrics""" fairness_scorers_dict = { # noqa N816 "statistical_parity": ModelStatisticalParityScorer, "TPR": TruePositiveRateScorer, "FPR": FalsePositiveRateScorer, "FNR": FalseNegativeRateScorer, "FOR": FalseOmissionRateScorer, "FDR": FalseDiscoveryRateScorer,
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ """Core for fairness metrics""" fairness_scorers_dict = { # noqa N816 "statistical_parity": ModelStatisticalParityScorer, "TPR": TruePositiveRateScorer, "FPR": FalsePositiveRateScorer, "FNR": FalseNegativeRateScorer, "FOR": FalseOmissionRateScorer, "FDR": FalseDiscoveryRateScorer,
"error_rate": ErrorRateScorer,
1
2023-10-09 09:48:50+00:00
16k
IST-DASLab/SparseFinetuning
llmfoundry/models/mpt/modeling_mpt.py
[ { "identifier": "attn_bias_shape", "path": "llmfoundry/models/layers/attention.py", "snippet": "def attn_bias_shape(attn_impl: str, n_heads: int, seq_len: int, alibi: bool,\n prefix_lm: bool, causal: bool, use_sequence_id: bool):\n if attn_impl == 'flash':\n return None\n ...
import math import warnings import torch import torch.nn as nn import torch.nn.functional as F from typing import Any, List, Mapping, MutableMapping, Optional, Tuple, Union from composer.metrics import (InContextLearningLMAccuracy, InContextLearningLMExpectedCalibrationError, InContextLearningMCExpectedCalibrationError, InContextLearningMultipleChoiceAccuracy, InContextLearningQAAccuracy) from composer.metrics.nlp import LanguageCrossEntropy, LanguagePerplexity from composer.models import HuggingFaceModel from composer.utils import dist from omegaconf import DictConfig from omegaconf import OmegaConf as om from transformers import PreTrainedModel, PreTrainedTokenizerBase from transformers.modeling_outputs import (BaseModelOutputWithPast, CausalLMOutputWithPast) from llmfoundry.models.layers.attention import attn_bias_shape, build_attn_bias from llmfoundry.models.layers.blocks import MPTBlock from llmfoundry.models.layers.custom_embedding import SharedEmbedding from llmfoundry.models.layers.fc import FC_CLASS_REGISTRY as FC_CLASS_REGISTRY from llmfoundry.models.layers.ffn import \ FFN_CLASS_REGISTRY as FFN_CLASS_REGISTRY from llmfoundry.models.layers.ffn import MPTMLP as MPTMLP from llmfoundry.models.layers.ffn import build_ffn as build_ffn from llmfoundry.models.layers.norm import NORM_CLASS_REGISTRY from llmfoundry.models.mpt.configuration_mpt import MPTConfig from llmfoundry.models.utils.adapt_tokenizer import ( AutoTokenizerForMOD, # type: ignore (see note), adapt_tokenizer_for_denoising, # type: ignore (see note) ) from llmfoundry.models.utils.hf_prefixlm_converter import ( add_bidirectional_mask_if_missing, # type: ignore (see note) convert_hf_causal_lm_to_prefix_lm, # type: ignore (see note) ) from llmfoundry.models.utils.meta_init_context import \ init_empty_weights # type: ignore (see note) from llmfoundry.models.utils.param_init_fns import ( generic_param_init_fn_, # type: ignore (see note) MODEL_INIT_REGISTRY, ) from llmfoundry.models.layers.flash_attn_triton import flash_attn_func as flash_attn_func from flash_attn.losses.cross_entropy import CrossEntropyLoss as FusedCrossEntropyLoss # type: ignore # isort: skip
11,429
**kwargs: Any, ): if inputs_embeds is not None: raise NotImplementedError( 'inputs_embeds is not implemented for MPT yet') attention_mask = kwargs['attention_mask'].bool() if attention_mask[:, -1].sum() != attention_mask.shape[0]: raise NotImplementedError( 'MPT does not support generation with right padding.') if self.transformer.attn_uses_sequence_id and self.training: sequence_id = torch.zeros_like(input_ids[:1]) else: sequence_id = None if past_key_values is not None: input_ids = input_ids[:, -1].unsqueeze(-1) if self.transformer.prefix_lm: # Leverage a convenience of sequential generation! prefix_mask = torch.ones_like(attention_mask) # This requires that we're using the cache if kwargs.get('use_cache') == False: raise NotImplementedError( 'MPT with prefix_lm=True does not support use_cache=False.') else: prefix_mask = None return { 'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True), } @staticmethod def _reorder_cache(past_key_values: List[Tuple[torch.Tensor, torch.Tensor]], beam_idx: torch.LongTensor): """Used by HuggingFace generate when using beam search with kv-caching. See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133 for an example in transformers. """ reordered_past = [] for layer_past in past_key_values: reordered_past += [ tuple( past_state.index_select(0, beam_idx) for past_state in layer_past) ] return reordered_past class ComposerMPTCausalLM(HuggingFaceModel): def __init__( self, om_model_config: DictConfig, tokenizer: Optional[PreTrainedTokenizerBase] = None, ): resolved_om_model_config = om.to_container(om_model_config, resolve=True) hf_config = MPTConfig.from_dict(resolved_om_model_config) model = MPTForCausalLM(hf_config) train_metrics = [LanguageCrossEntropy(), LanguagePerplexity()] eval_metrics = [ LanguageCrossEntropy(), LanguagePerplexity(), InContextLearningLMAccuracy(), InContextLearningMultipleChoiceAccuracy(), InContextLearningQAAccuracy(), InContextLearningLMExpectedCalibrationError(), InContextLearningMCExpectedCalibrationError(), ] super().__init__( model=model, tokenizer=tokenizer, use_logits=True, metrics=train_metrics, eval_metrics=eval_metrics, shift_labels=True, allow_embedding_resizing=True, ) self.n_active_params = sum(p.numel() for p in self.parameters()) loss_fn_config = om_model_config.get('loss_fn', 'fused_crossentropy') if loss_fn_config == 'fused_crossentropy': try: if hf_config.verbose > 1: warnings.warn('Using Fused Cross Entropy Loss.') self.loss_fn = FusedCrossEntropyLoss(ignore_index=-100) except: raise ValueError( 'Fused Cross Entropy is not installed. Either (1) have a CUDA-compatible GPU ' + 'and `pip install .[gpu]` if installing from source or `pip install xentropy-cuda-lib@git+https://github.com/HazyResearch/flash-attention.git@v1.0.3#subdirectory=csrc/xentropy` ' + 'if installing from pypi, or (2) set your config model.loss_fn=torch_crossentropy.' ) elif loss_fn_config == 'torch_crossentropy': self.loss_fn = nn.CrossEntropyLoss(ignore_index=-100) else: raise ValueError( f'Specified loss_fn={self.loss_fn} not recognized. `loss_fn` must be one of [`fused_crossentropy`, `torch_crossentropy`].' ) def get_targets(self, batch: Mapping): targets = torch.roll(batch['labels'], shifts=-1) targets[:, -1] = -100 return targets def forward(self, batch: MutableMapping): if self.model.transformer.prefix_lm:
# Copyright 2022 MosaicML LLM Foundry authors # SPDX-License-Identifier: Apache-2.0 """A simple, flexible implementation of a GPT model. Inspired by https://github.com/karpathy/minGPT/blob/master/mingpt/model.py """ # NOTE: All utils are imported directly even if unused so that # HuggingFace can detect all the needed files to copy into its modules folder. # Otherwise, certain modules are missing. # isort: off try: except: pass # isort: on class MPTPreTrainedModel(PreTrainedModel): config_class = MPTConfig base_model_prefix = 'model' _no_split_modules = ['MPTBlock'] class MPTModel(MPTPreTrainedModel): def __init__(self, config: MPTConfig): config._validate_config() super().__init__(config) self.attn_impl = config.attn_config['attn_impl'] self.prefix_lm = config.attn_config['prefix_lm'] self.attn_uses_sequence_id = config.attn_config['attn_uses_sequence_id'] self.alibi = config.attn_config['alibi'] self.alibi_bias_max = config.attn_config['alibi_bias_max'] self.learned_pos_emb = config.learned_pos_emb if config.init_device == 'mixed': if dist.get_local_rank() == 0: config.init_device = 'cpu' else: config.init_device = 'meta' if config.norm_type.lower() not in NORM_CLASS_REGISTRY.keys(): norm_options = ' | '.join(NORM_CLASS_REGISTRY.keys()) raise NotImplementedError( f'Requested norm type ({config.norm_type}) is not implemented within this repo (Options: {norm_options}).' ) norm_class = NORM_CLASS_REGISTRY[config.norm_type.lower()] # CogView (https://arxiv.org/abs/2105.13290) and GLM-130B (https://arxiv.org/abs/2210.02414) # both report this helping with stabilizing training self.embedding_fraction = config.embedding_fraction self.wte = SharedEmbedding(config.vocab_size, config.d_model, device=config.init_device) if self.learned_pos_emb: self.wpe = torch.nn.Embedding(config.max_seq_len, config.d_model, device=config.init_device) self.emb_drop = nn.Dropout(config.emb_pdrop) self.blocks = nn.ModuleList([ MPTBlock( device=config.init_device, **config.to_dict(), ) for _ in range(config.n_layers) ]) self.norm_f = norm_class(config.d_model, device=config.init_device) if config.init_device != 'meta': print( f'You are using {config.init_device=}, but you can also use config.init_device="meta" with Composer + FSDP for fast initialization.' ) self.apply(self.param_init_fn) self.is_causal = not self.prefix_lm # define attn mask self._attn_bias_initialized = False self.attn_bias = None self.attn_bias_shape = attn_bias_shape( self.attn_impl, config.n_heads, config.max_seq_len, self.alibi, prefix_lm=self.prefix_lm, causal=self.is_causal, use_sequence_id=self.attn_uses_sequence_id, ) if config.no_bias: for module in self.modules(): if hasattr(module, 'bias') and isinstance( module.bias, nn.Parameter): if config.verbose: warnings.warn( f'Removing bias ({module.bias}) from {module}.') module.register_parameter('bias', None) # Print verbose info if config.verbose and config.verbose > 2: print(self) if 'verbose' not in self.config.init_config: self.config.init_config['verbose'] = self.config.verbose if self.config.init_config['verbose'] > 1: init_fn_name = self.config.init_config['name'] warnings.warn(f'Using {init_fn_name} initialization.') def get_input_embeddings(self): return self.wte def set_input_embeddings(self, value: nn.Embedding): self.wte = value @torch.no_grad() def _attn_bias( self, device: torch.device, dtype: torch.dtype, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, ): if not self._attn_bias_initialized: if self.attn_bias_shape: self.attn_bias = torch.zeros(self.attn_bias_shape, device=device, dtype=dtype) self.attn_bias = build_attn_bias( self.attn_impl, self.attn_bias, self.config.n_heads, self.config.max_seq_len, causal=self.is_causal, alibi=self.alibi, alibi_bias_max=self.alibi_bias_max, ) self._attn_bias_initialized = True # flash does not support prefix_lm and will incorporate any # attention_mask inside the attention module if self.attn_impl == 'flash': return self.attn_bias, attention_mask if self.attn_bias is not None: # .to(*args, **kwargs) is a no-op if tensor is already on # specified device or of specificed dtype self.attn_bias = self.attn_bias.to(dtype=dtype, device=device) attn_bias = self.attn_bias # If using torch or triton, we incorporate the prefix_mask (if appropriate) if self.prefix_lm: assert isinstance(attn_bias, torch.Tensor) # pyright assert isinstance(prefix_mask, torch.Tensor) # pyright attn_bias = self._apply_prefix_mask(attn_bias, prefix_mask) # If using torch or triton, we incorporate sequence_id (if appropriate) if self.attn_uses_sequence_id and sequence_id is not None: assert isinstance(attn_bias, torch.Tensor) # pyright attn_bias = self._apply_sequence_id(attn_bias, sequence_id) # If using torch or triton, we incorporate attention_mask. This will output # None in place of attention_mask since it will not be further needed in the # attention modules. if attention_mask is not None: s_k = attention_mask.shape[-1] if attn_bias is None: attn_bias = torch.zeros((1, 1, 1, s_k), device=device, dtype=dtype) else: # clamp to 0 necessary for torch 2.0 compile() _s_k = max(0, attn_bias.size(-1) - s_k) attn_bias = attn_bias[:, :, :, _s_k:] if prefix_mask is not None and (attention_mask.shape != prefix_mask.shape): raise ValueError( f'attention_mask shape={attention_mask.shape} ' + f'and prefix_mask shape={prefix_mask.shape} are not equal.') min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill( ~attention_mask.view(-1, 1, 1, s_k), min_val) return attn_bias, None def _apply_prefix_mask(self, attn_bias: torch.Tensor, prefix_mask: torch.Tensor): s_k, s_q = attn_bias.shape[-2:] if (s_k != self.config.max_seq_len) or (s_q != self.config.max_seq_len): raise ValueError( 'attn_bias does not match the expected shape. ' + f'The last two dimensions should both be {self.config.max_length} ' + f'but are {s_k} and {s_q}.') seq_len = prefix_mask.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError( f'prefix_mask sequence length cannot exceed max_seq_len={self.config.max_seq_len}' ) # select seq_len subset of attn mask attn_bias = attn_bias[..., :seq_len, :seq_len] # Mix the causal max and the bidirectional mask to get the full # allowable attention (i.e. full = not accounting for padding yet) causal = torch.tril( torch.ones((seq_len, seq_len), dtype=torch.bool, device=prefix_mask.device)).view(1, 1, seq_len, seq_len) prefix = prefix_mask.view(-1, 1, 1, seq_len) cannot_attend = ~torch.logical_or(causal, prefix.bool()) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def _apply_sequence_id(self, attn_bias: torch.Tensor, sequence_id: torch.LongTensor): seq_len = sequence_id.shape[-1] if seq_len > self.config.max_seq_len: raise ValueError( f'sequence_id sequence length cannot exceed max_seq_len={self.config.max_seq_len}' ) # select seq_len subset of attn mask attn_bias = attn_bias[..., :seq_len, :seq_len] # Restrict attention to tokens that share the same value # in sequence_id cannot_attend = torch.logical_not( torch.eq( sequence_id.view(-1, seq_len, 1), sequence_id.view(-1, 1, seq_len), )).unsqueeze(1) min_val = torch.finfo(attn_bias.dtype).min attn_bias = attn_bias.masked_fill(cannot_attend, min_val) return attn_bias def forward( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, use_cache: Optional[bool] = None, inputs_embeds: Optional[torch.Tensor] = None, ): return_dict = (return_dict if return_dict is not None else self.config.return_dict) use_cache = (use_cache if use_cache is not None else self.config.use_cache) if attention_mask is not None: attention_mask = attention_mask.bool( ) # type: ignore (TODO to figure out the right type here) if prefix_mask is not None: prefix_mask = prefix_mask.bool( ) # type: ignore (TODO to figure out the right type here) # These args are passed in by keyword in huggingface's generate function # https://github.com/huggingface/transformers/blob/68287689f2f0d8b7063c400230b3766987abf18d/src/transformers/generation/utils.py#L2201-L2206 # but have not yet been fully implemented in MPTModel if not return_dict: raise NotImplementedError( 'return_dict False is not implemented yet for MPT') if output_attentions: if self.attn_impl != 'torch': raise NotImplementedError( 'output_attentions is not implemented for MPT when using attn_impl `flash` or `triton`.' ) if (attention_mask is not None and attention_mask[:, 0].sum() != attention_mask.shape[0] and self.training): raise NotImplementedError( 'MPT does not support training with left padding.') if self.prefix_lm and prefix_mask is None: raise ValueError( 'prefix_mask is a required argument when MPT is configured with prefix_lm=True.' ) # Raise a not implemented error if input_embeds is not None (this is an arg in huggingface transformers and we need to support it for PEFT) if inputs_embeds is not None: raise NotImplementedError( 'inputs_embeds is not implemented for MPT.') if self.training: if self.attn_uses_sequence_id and sequence_id is None: raise ValueError( 'sequence_id is a required argument when MPT is configured with attn_uses_sequence_id=True ' + 'and the model is in train mode.') elif (self.attn_uses_sequence_id is False) and (sequence_id is not None): warnings.warn( 'MPT received non-None input for `sequence_id` but is configured with attn_uses_sequence_id=False. ' + 'This input will be ignored. If you want the model to use `sequence_id`, set attn_uses_sequence_id to True.' ) S = input_ids.size(1) assert ( S <= self.config.max_seq_len ), f'Cannot forward input with seq_len={S}, this model only supports seq_len<={self.config.max_seq_len}' tok_emb = self.wte(input_ids) # type: ignore if self.learned_pos_emb: past_position = 0 if past_key_values is not None: if len(past_key_values) != self.config.n_layers: raise ValueError( f'past_key_values must provide a past_key_value for each attention ' + f'layer in the network ({len(past_key_values)=}; {self.config.n_layers=}).' ) # For attn_impl: triton and flash the past key tensor spec is (batch, seq, dim). # For attn_impl: torch the past key tensor spec is (batch, heads, head_dim, seq). # Here we shift position embedding using the `seq` dim of the past key past_position = past_key_values[0][0].size(1) if self.attn_impl == 'torch': past_position = past_key_values[0][0].size(3) if S + past_position > self.config.max_seq_len: raise ValueError( f'Cannot forward input with past sequence length {past_position} and current sequence length ' + f'{S + 1}, this model only supports total sequence length <= {self.config.max_seq_len}.' ) pos = torch.arange( past_position, S + past_position, dtype=torch.long, device=input_ids.device, ).unsqueeze(0) if attention_mask is not None: # adjust the position indices to account for padding tokens pos = torch.clamp( pos - torch.cumsum((~attention_mask).to(torch.int32), dim=1)[:, past_position:], min=0, ) pos_emb = self.wpe(pos) # type: ignore x = tok_emb + pos_emb else: # ALiBi and NoPE use this path (RoPE will also use this path if / when enabled) x = tok_emb if self.embedding_fraction == 1: x = self.emb_drop(x) # type: ignore else: # this implementation is proposed on page 7 of the GLM-130B paper https://arxiv.org/abs/2210.02414 x_shrunk = (x * self.embedding_fraction) + ( x.detach() * (1 - self.embedding_fraction)) assert isinstance(self.emb_drop, nn.Module) # pyright x = self.emb_drop(x_shrunk) attn_bias, attention_mask = self._attn_bias( device=x.device, dtype=torch.float32, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, ) # initialize the past key values cache if it should be used if use_cache and past_key_values is None: past_key_values = [() for _ in range(self.config.n_layers) ] # type: ignore all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None for b_idx, block in enumerate(self.blocks): # type: ignore if output_hidden_states: assert all_hidden_states is not None # pyright all_hidden_states = all_hidden_states + (x,) past_key_value = (past_key_values[b_idx] if past_key_values is not None else None) x, attn_weights, past_key_value = block( x, past_key_value=past_key_value, attn_bias=attn_bias, attention_mask=attention_mask, is_causal=self.is_causal, ) if past_key_values is not None: past_key_values[b_idx] = past_key_value if output_attentions: assert all_self_attns is not None # pyright all_self_attns = all_self_attns + (attn_weights,) x = self.norm_f(x) # type: ignore # add hidden states from the last decoder layer if output_hidden_states: assert all_hidden_states is not None # pyright all_hidden_states = all_hidden_states + (x,) return BaseModelOutputWithPast( last_hidden_state=x, past_key_values=past_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) # Param Initialization, needed for device='meta' fast initialization def param_init_fn(self, module: nn.Module): init_fn_name = self.config.init_config['name'] MODEL_INIT_REGISTRY[init_fn_name]( module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config, ) # FSDP Wrap function def fsdp_wrap_fn(self, module: nn.Module): return isinstance(module, MPTBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module: nn.Module): return isinstance(module, MPTBlock) class MPTForCausalLM(MPTPreTrainedModel): def __init__(self, config: MPTConfig): super().__init__(config) if not config.tie_word_embeddings: raise ValueError( 'MPTForCausalLM only supports tied word embeddings') print(f'Instantiating an MPTForCausalLM model from {__file__}') self.transformer: MPTModel = MPTModel(config) for child in self.transformer.children(): if isinstance(child, torch.nn.ModuleList): continue if isinstance(child, torch.nn.Module): child._fsdp_wrap = True # enables scaling output logits; similar to a softmax "temperature" # PaLM paper uses scale 1/sqrt(config.d_model) self.logit_scale = None if config.logit_scale is not None: logit_scale = config.logit_scale if isinstance(logit_scale, str): if logit_scale == 'inv_sqrt_d_model': logit_scale = 1 / math.sqrt(config.d_model) else: raise ValueError( f"{logit_scale=} is not recognized as an option; use numeric value or 'inv_sqrt_d_model'." ) self.logit_scale = logit_scale def get_input_embeddings(self): return self.transformer.wte def set_input_embeddings(self, value: Union[SharedEmbedding, nn.Embedding]): self.transformer.wte = value def get_output_embeddings(self): return self.transformer.wte def set_output_embeddings(self, new_embeddings: Union[SharedEmbedding, nn.Embedding]): self.transformer.wte = new_embeddings def set_decoder(self, decoder: MPTModel): self.transformer = decoder def get_decoder(self): return self.transformer def forward( self, input_ids: torch.LongTensor, past_key_values: Optional[List[Tuple[torch.FloatTensor]]] = None, attention_mask: Optional[torch.ByteTensor] = None, prefix_mask: Optional[torch.ByteTensor] = None, sequence_id: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, return_dict: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, use_cache: Optional[bool] = None, inputs_embeds: Optional[torch.FloatTensor] = None, ): return_dict = (return_dict if return_dict is not None else self.config.return_dict) use_cache = (use_cache if use_cache is not None else self.config.use_cache) # if input_embeds is not none, raise a not implemented error if inputs_embeds is not None: raise NotImplementedError( 'inputs_embeds has to be None (for hf/peft support).') # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, prefix_mask=prefix_mask, sequence_id=sequence_id, return_dict=return_dict, output_attentions=output_attentions, output_hidden_states=output_hidden_states, use_cache=use_cache, ) # move outputs to same device as weights for token embedding # needed to support HF `device_map` logits = self.transformer.wte( outputs.last_hidden_state.to(self.transformer.wte.weight.device), True, ) if self.logit_scale is not None: if self.logit_scale == 0: warnings.warn( f'Multiplying logits by {self.logit_scale=}. This will produce uniform (uninformative) outputs.' ) logits *= self.logit_scale loss = None if labels is not None: _labels = torch.roll(labels, shifts=-1) _labels[:, -1] = -100 loss = F.cross_entropy( logits.view(-1, logits.size(-1)), _labels.to(logits.device).view(-1), ) return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Param Initialization, needed for device='meta' fast initialization def param_init_fn(self, module: nn.Module): init_fn_name = self.config.init_config['name'] MODEL_INIT_REGISTRY[init_fn_name]( module=module, n_layers=self.config.n_layers, d_model=self.config.d_model, **self.config.init_config, ) # FSDP Wrap function def fsdp_wrap_fn(self, module: nn.Module): return isinstance(module, MPTBlock) # Activation Checkpointing def activation_checkpointing_fn(self, module: nn.Module): return isinstance(module, MPTBlock) def prepare_inputs_for_generation( self, input_ids: torch.Tensor, past_key_values: Optional[List[Tuple[torch.Tensor, torch.Tensor]]] = None, inputs_embeds: Optional[torch.Tensor] = None, **kwargs: Any, ): if inputs_embeds is not None: raise NotImplementedError( 'inputs_embeds is not implemented for MPT yet') attention_mask = kwargs['attention_mask'].bool() if attention_mask[:, -1].sum() != attention_mask.shape[0]: raise NotImplementedError( 'MPT does not support generation with right padding.') if self.transformer.attn_uses_sequence_id and self.training: sequence_id = torch.zeros_like(input_ids[:1]) else: sequence_id = None if past_key_values is not None: input_ids = input_ids[:, -1].unsqueeze(-1) if self.transformer.prefix_lm: # Leverage a convenience of sequential generation! prefix_mask = torch.ones_like(attention_mask) # This requires that we're using the cache if kwargs.get('use_cache') == False: raise NotImplementedError( 'MPT with prefix_lm=True does not support use_cache=False.') else: prefix_mask = None return { 'input_ids': input_ids, 'attention_mask': attention_mask, 'prefix_mask': prefix_mask, 'sequence_id': sequence_id, 'past_key_values': past_key_values, 'use_cache': kwargs.get('use_cache', True), } @staticmethod def _reorder_cache(past_key_values: List[Tuple[torch.Tensor, torch.Tensor]], beam_idx: torch.LongTensor): """Used by HuggingFace generate when using beam search with kv-caching. See https://github.com/huggingface/transformers/blob/3ec7a47664ebe40c40f4b722f6bb1cd30c3821ec/src/transformers/models/gpt2/modeling_gpt2.py#L1122-L1133 for an example in transformers. """ reordered_past = [] for layer_past in past_key_values: reordered_past += [ tuple( past_state.index_select(0, beam_idx) for past_state in layer_past) ] return reordered_past class ComposerMPTCausalLM(HuggingFaceModel): def __init__( self, om_model_config: DictConfig, tokenizer: Optional[PreTrainedTokenizerBase] = None, ): resolved_om_model_config = om.to_container(om_model_config, resolve=True) hf_config = MPTConfig.from_dict(resolved_om_model_config) model = MPTForCausalLM(hf_config) train_metrics = [LanguageCrossEntropy(), LanguagePerplexity()] eval_metrics = [ LanguageCrossEntropy(), LanguagePerplexity(), InContextLearningLMAccuracy(), InContextLearningMultipleChoiceAccuracy(), InContextLearningQAAccuracy(), InContextLearningLMExpectedCalibrationError(), InContextLearningMCExpectedCalibrationError(), ] super().__init__( model=model, tokenizer=tokenizer, use_logits=True, metrics=train_metrics, eval_metrics=eval_metrics, shift_labels=True, allow_embedding_resizing=True, ) self.n_active_params = sum(p.numel() for p in self.parameters()) loss_fn_config = om_model_config.get('loss_fn', 'fused_crossentropy') if loss_fn_config == 'fused_crossentropy': try: if hf_config.verbose > 1: warnings.warn('Using Fused Cross Entropy Loss.') self.loss_fn = FusedCrossEntropyLoss(ignore_index=-100) except: raise ValueError( 'Fused Cross Entropy is not installed. Either (1) have a CUDA-compatible GPU ' + 'and `pip install .[gpu]` if installing from source or `pip install xentropy-cuda-lib@git+https://github.com/HazyResearch/flash-attention.git@v1.0.3#subdirectory=csrc/xentropy` ' + 'if installing from pypi, or (2) set your config model.loss_fn=torch_crossentropy.' ) elif loss_fn_config == 'torch_crossentropy': self.loss_fn = nn.CrossEntropyLoss(ignore_index=-100) else: raise ValueError( f'Specified loss_fn={self.loss_fn} not recognized. `loss_fn` must be one of [`fused_crossentropy`, `torch_crossentropy`].' ) def get_targets(self, batch: Mapping): targets = torch.roll(batch['labels'], shifts=-1) targets[:, -1] = -100 return targets def forward(self, batch: MutableMapping): if self.model.transformer.prefix_lm:
add_bidirectional_mask_if_missing(batch)
12
2023-10-09 15:32:15+00:00
16k
jiangjiechen/auction-arena
auction_workflow.py
[ { "identifier": "Auctioneer", "path": "src/auctioneer_base.py", "snippet": "class Auctioneer(BaseModel):\n enable_discount: bool = False\n items: List[Item] = []\n cur_item: Item = None\n highest_bidder: Bidder = None\n highest_bid: int = -1\n bidding_history = defaultdict(list) # hist...
import os import time import gradio as gr import ujson as json import traceback import argparse from typing import List from tqdm import tqdm from src.auctioneer_base import Auctioneer from src.bidder_base import Bidder, bidders_to_chatbots, bidding_multithread from utils import trace_back from src.item_base import create_items from src.bidder_base import create_bidders from transformers import GPT2TokenizerFast
12,741
LOG_DIR = 'logs' enable_gr = gr.update(interactive=True) disable_gr = gr.update(interactive=False) def monitor_all(bidder_list: List[Bidder]): return sum([bidder.to_monitors() for bidder in bidder_list], [])
LOG_DIR = 'logs' enable_gr = gr.update(interactive=True) disable_gr = gr.update(interactive=False) def monitor_all(bidder_list: List[Bidder]): return sum([bidder.to_monitors() for bidder in bidder_list], [])
def parse_bid_price(auctioneer: Auctioneer, bidder: Bidder, msg: str):
0
2023-10-08 09:30:57+00:00
16k
SH1ROd/Bert-VITS2-Integration-train-txt-infer
train_ms.py
[ { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from...
import os import json import argparse import itertools import math import torch import shutil import torch.multiprocessing as mp import torch.distributed as dist import logging import commons import utils from torch import nn, optim from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
10,901
_, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
logging.getLogger('numba').setLevel(logging.WARNING) torch.backends.cudnn.benchmark = True torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True torch.set_float32_matmul_precision('medium') global_step = 0 def main(): """Assume Single Node Multi GPUs Training Only""" assert torch.cuda.is_available(), "CPU training is not allowed." n_gpus = torch.cuda.device_count() os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '65280' hps = utils.get_hparams() role='' for t in hps.data.spk2id.items(): role=t[0] if not hps.cont: folder_path = f"./logs/{role}" if not os.path.exists(folder_path): os.makedirs(folder_path) print(f"文件夹 '{role}' 已创建在 './logs/' 目录下。") else: print(f"文件夹 '{role}' 已经存在于 './logs/' 目录下。") shutil.copy('./pretrained_models/D_0.pth',f'./logs/{role}/D_0.pth') shutil.copy('./pretrained_models/G_0.pth',f'./logs/{role}/G_0.pth') shutil.copy('./pretrained_models/DUR_0.pth',f'./logs/{role}/DUR_0.pth') mp.spawn(run, nprocs=n_gpus, args=(n_gpus, hps, role)) def run(rank, n_gpus, hps, role): global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) dist.init_process_group(backend= 'gloo' if os.name == 'nt' else 'nccl', init_method='env://', world_size=n_gpus, rank=rank) torch.manual_seed(hps.train.seed) torch.cuda.set_device(rank) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader(train_dataset, num_workers=2, shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler) if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn) if "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas == True: print("Using noise scaled MAS for VITS2") use_noise_scaled_mas = True mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") use_noise_scaled_mas = False mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator == True: print("Using duration discriminator for VITS2") use_duration_discriminator = True net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(rank) if "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder == True: if hps.data.n_speakers == 0: raise ValueError("n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model") use_spk_conditioned_encoder = True else: print("Using normal encoder for VITS1") use_spk_conditioned_encoder = False net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial = mas_noise_scale_initial, noise_scale_delta = noise_scale_delta, **hps.model).cuda(rank) freeze_enc = getattr(hps.model, "freeze_enc", False) if freeze_enc: print("freeze encoder !!!") for param in net_g.enc_p.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[rank], find_unused_parameters=True) net_d = DDP(net_d, device_ids=[rank], find_unused_parameters=True) if net_dur_disc is not None: net_dur_disc = DDP(net_dur_disc, device_ids=[rank], find_unused_parameters=True) pretrain_dir = None if pretrain_dir is None: try: if net_dur_disc is not None: _, optim_dur_disc, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=not hps.cont) _, optim_g, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=not hps.cont) _, optim_d, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=not hps.cont) epoch_str = max(epoch_str, 1) global_step = (epoch_str - 1) * len(train_loader) except Exception as e: print(e) epoch_str = 1 global_step = 0 else: _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "G_*.pth"), net_g, optim_g, True) _, _, _, epoch_str = utils.load_checkpoint(utils.latest_checkpoint_path(pretrain_dir, "D_*.pth"), net_d, optim_d, True) scheduler_g = torch.optim.lr_scheduler.ExponentialLR(optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) scheduler_d = torch.optim.lr_scheduler.ExponentialLR(optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR(optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str-2) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.fp16_run) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, eval_loader], logger, [writer, writer_eval],role=role) else: train_and_evaluate(rank, epoch, hps, [net_g, net_d, net_dur_disc], [optim_g, optim_d, optim_dur_disc], [scheduler_g, scheduler_d, scheduler_dur_disc], scaler, [train_loader, None], None, None, role=role) scheduler_g.step() scheduler_d.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate(rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, role): net_g, net_d, net_dur_disc = nets optim_g, optim_d, optim_dur_disc = optims scheduler_g, scheduler_d, scheduler_dur_disc = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, (x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert) in tqdm(enumerate(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(rank, non_blocking=True), x_lengths.cuda(rank, non_blocking=True) spec, spec_lengths = spec.cuda(rank, non_blocking=True), spec_lengths.cuda(rank, non_blocking=True) y, y_lengths = y.cuda(rank, non_blocking=True), y_lengths.cuda(rank, non_blocking=True) speakers = speakers.cuda(rank, non_blocking=True) tone = tone.cuda(rank, non_blocking=True) language = language.cuda(rank, non_blocking=True) bert = bert.cuda(rank, non_blocking=True) with autocast(enabled=hps.train.fp16_run): y_hat, l_length, attn, ids_slice, x_mask, z_mask, \ (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_) = net_g(x, x_lengths, spec, spec_lengths, speakers, tone, language, bert) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax) y_mel = commons.slice_segments(mel, ids_slice, hps.train.segment_size // hps.data.hop_length) y_hat_mel = mel_spectrogram_torch( y_hat.squeeze(1), hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.hop_length, hps.data.win_length, hps.data.mel_fmin, hps.data.mel_fmax ) y = commons.slice_segments(y, ids_slice * hps.data.hop_length, hps.train.segment_size) # slice # Discriminator y_d_hat_r, y_d_hat_g, _, _ = net_d(y, y_hat.detach()) with autocast(enabled=False): loss_disc, losses_disc_r, losses_disc_g = discriminator_loss(y_d_hat_r, y_d_hat_g) loss_disc_all = loss_disc if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x.detach(), x_mask.detach(), logw.detach(), logw_.detach()) with autocast(enabled=False): # TODO: I think need to mean using the mask, but for now, just mean all loss_dur_disc, losses_dur_disc_r, losses_dur_disc_g = discriminator_loss(y_dur_hat_r, y_dur_hat_g) loss_dur_disc_all = loss_dur_disc optim_dur_disc.zero_grad() scaler.scale(loss_dur_disc_all).backward() scaler.unscale_(optim_dur_disc) grad_norm_dur_disc = commons.clip_grad_value_(net_dur_disc.parameters(), None) scaler.step(optim_dur_disc) optim_d.zero_grad() scaler.scale(loss_disc_all).backward() scaler.unscale_(optim_d) grad_norm_d = commons.clip_grad_value_(net_d.parameters(), None) scaler.step(optim_d) with autocast(enabled=hps.train.fp16_run): # Generator y_d_hat_r, y_d_hat_g, fmap_r, fmap_g = net_d(y, y_hat) if net_dur_disc is not None: y_dur_hat_r, y_dur_hat_g = net_dur_disc(hidden_x, x_mask, logw, logw_) with autocast(enabled=False): loss_dur = torch.sum(l_length.float()) loss_mel = F.l1_loss(y_mel, y_hat_mel) * hps.train.c_mel loss_kl = kl_loss(z_p, logs_q, m_p, logs_p, z_mask) * hps.train.c_kl
loss_fm = feature_loss(fmap_r, fmap_g)
8
2023-10-10 02:23:23+00:00
16k
sakemin/cog-musicgen-chord
audiocraft/modules/conditioners.py
[ { "identifier": "ChromaExtractor", "path": "audiocraft/modules/chroma.py", "snippet": "class ChromaExtractor(nn.Module):\n \"\"\"Chroma extraction and quantization.\n\n Args:\n sample_rate (int): Sample rate for the chroma extraction.\n n_chroma (int): Number of chroma bins for the c...
from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from itertools import chain from pathlib import Path from num2words import num2words from transformers import RobertaTokenizer, T5EncoderModel, T5Tokenizer # type: ignore from torch import nn from torch.nn.utils.rnn import pad_sequence from .chroma import ChromaExtractor from .chord_chroma import ChordExtractor from .streaming import StreamingModule from .transformer import create_sin_embedding from ..data.audio import audio_read from ..data.audio_dataset import SegmentInfo from ..data.audio_utils import convert_audio from ..environment import AudioCraftEnvironment from ..quantization import ResidualVectorQuantizer from ..utils.autocast import TorchAutocast from ..utils.cache import EmbeddingCache from ..utils.utils import collate, hash_trick, length_to_mask, load_clap_state_dict, warn_once from .btc.utils import chords from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio from demucs import pretrained from audiocraft.data.audio_dataset import AudioDataset from demucs.apply import apply_model from demucs.audio import convert_audio import logging import math import random import re import typing as tp import warnings import einops import spacy import torch import torch.nn.functional as F import numpy as np import laion_clap # type: ignore
13,735
if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None:
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. logger = logging.getLogger(__name__) TextCondition = tp.Optional[str] # a text condition can be a string or None (if doesn't exist) ConditionType = tp.Tuple[torch.Tensor, torch.Tensor] # condition, mask class WavCondition(tp.NamedTuple): wav: torch.Tensor length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] class WavChordTextCondition(tp.NamedTuple): wav: tp.Union[torch.Tensor,str,tp.List[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] bpm : tp.List[tp.Optional[tp.Union[int, float]]] = [] meter : tp.List[tp.Optional[int]] = [] class JointEmbedCondition(tp.NamedTuple): wav: torch.Tensor text: tp.List[tp.Optional[str]] length: torch.Tensor sample_rate: tp.List[int] path: tp.List[tp.Optional[str]] = [] seek_time: tp.List[tp.Optional[float]] = [] @dataclass class ConditioningAttributes: text: tp.Dict[str, tp.Optional[str]] = field(default_factory=dict) wav: tp.Dict[str, tp.Union[WavCondition,WavChordTextCondition]] = field(default_factory=dict) joint_embed: tp.Dict[str, JointEmbedCondition] = field(default_factory=dict) def __getitem__(self, item): return getattr(self, item) @property def text_attributes(self): return self.text.keys() @property def wav_attributes(self): return self.wav.keys() @property def joint_embed_attributes(self): return self.joint_embed.keys() @property def attributes(self): return { "text": self.text_attributes, "wav": self.wav_attributes, "joint_embed": self.joint_embed_attributes, } def to_flat_dict(self): return { **{f"text.{k}": v for k, v in self.text.items()}, **{f"wav.{k}": v for k, v in self.wav.items()}, **{f"joint_embed.{k}": v for k, v in self.joint_embed.items()} } @classmethod def from_flat_dict(cls, x): out = cls() for k, v in x.items(): kind, att = k.split(".") out[kind][att] = v return out class SegmentWithAttributes(SegmentInfo): """Base class for all dataclasses that are used for conditioning. All child classes should implement `to_condition_attributes` that converts the existing attributes to a dataclass of type ConditioningAttributes. """ def to_condition_attributes(self) -> ConditioningAttributes: raise NotImplementedError() def nullify_condition(condition: ConditionType, dim: int = 1): """Transform an input condition to a null condition. The way it is done by converting it to a single zero vector similarly to how it is done inside WhiteSpaceTokenizer and NoopTokenizer. Args: condition (ConditionType): A tuple of condition and mask (tuple[torch.Tensor, torch.Tensor]) dim (int): The dimension that will be truncated (should be the time dimension) WARNING!: dim should not be the batch dimension! Returns: ConditionType: A tuple of null condition and mask """ assert dim != 0, "dim cannot be the batch dimension!" assert isinstance(condition, tuple) and \ isinstance(condition[0], torch.Tensor) and \ isinstance(condition[1], torch.Tensor), "'nullify_condition' got an unexpected input type!" cond, mask = condition B = cond.shape[0] last_dim = cond.dim() - 1 out = cond.transpose(dim, last_dim) out = 0. * out[..., :1] out = out.transpose(dim, last_dim) mask = torch.zeros((B, 1), device=out.device).int() assert cond.dim() == out.dim() return out, mask def nullify_wav(cond: tp.Union[WavCondition,WavChordTextCondition]) -> tp.Union[WavCondition,WavChordTextCondition]: """Transform a WavCondition to a nullified WavCondition. It replaces the wav by a null tensor, forces its length to 0, and replaces metadata by dummy attributes. Args: cond (WavCondition): Wav condition with wav, tensor of shape [B, T]. Returns: WavCondition: Nullified wav condition. """ if not isinstance(cond, WavChordTextCondition): null_wav, _ = nullify_condition((cond.wav, torch.zeros_like(cond.wav)), dim=cond.wav.dim() - 1) return WavCondition( wav=null_wav, length=torch.tensor([0] * cond.wav.shape[0], device=cond.wav.device), sample_rate=cond.sample_rate, path=[None] * cond.wav.shape[0], seek_time=[None] * cond.wav.shape[0], ) else: return WavChordTextCondition( wav=['N']* len(cond.wav), length=torch.tensor([0] * len(cond.wav), device=cond.length.device), sample_rate=cond.sample_rate, path=[None], seek_time=[None], bpm = cond.bpm, meter = cond.meter ) def nullify_joint_embed(embed: JointEmbedCondition) -> JointEmbedCondition: """Nullify the joint embedding condition by replacing it by a null tensor, forcing its length to 0, and replacing metadata by dummy attributes. Args: cond (JointEmbedCondition): Joint embedding condition with wav and text, wav tensor of shape [B, C, T]. """ null_wav, _ = nullify_condition((embed.wav, torch.zeros_like(embed.wav)), dim=embed.wav.dim() - 1) return JointEmbedCondition( wav=null_wav, text=[None] * len(embed.text), length=torch.LongTensor([0]).to(embed.wav.device), sample_rate=embed.sample_rate, path=[None] * embed.wav.shape[0], seek_time=[0] * embed.wav.shape[0], ) class Tokenizer: """Base tokenizer implementation (in case we want to introduce more advances tokenizers in the future). """ def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: raise NotImplementedError() class WhiteSpaceTokenizer(Tokenizer): """This tokenizer should be used for natural language descriptions. For example: ["he didn't, know he's going home.", 'shorter sentence'] => [[78, 62, 31, 4, 78, 25, 19, 34], [59, 77, 0, 0, 0, 0, 0, 0]] """ PUNCTUATION = "?:!.,;" def __init__(self, n_bins: int, pad_idx: int = 0, language: str = "en_core_web_sm", lemma: bool = True, stopwords: bool = True) -> None: self.n_bins = n_bins self.pad_idx = pad_idx self.lemma = lemma self.stopwords = stopwords try: self.nlp = spacy.load(language) except IOError: spacy.cli.download(language) # type: ignore self.nlp = spacy.load(language) @tp.no_type_check def __call__(self, texts: tp.List[tp.Optional[str]], return_text: bool = False) -> tp.Tuple[torch.Tensor, torch.Tensor]: """Take a list of strings and convert them to a tensor of indices. Args: texts (list[str]): List of strings. return_text (bool, optional): Whether to return text as additional tuple item. Defaults to False. Returns: tuple[torch.Tensor, torch.Tensor]: - Indices of words in the LUT. - And a mask indicating where the padding tokens are """ output, lengths = [], [] texts = deepcopy(texts) for i, text in enumerate(texts): # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(torch.Tensor([self.pad_idx])) lengths.append(0) continue # convert numbers to words text = re.sub(r"(\d+)", lambda x: num2words(int(x.group(0))), text) # type: ignore # normalize text text = self.nlp(text) # type: ignore # remove stopwords if self.stopwords: text = [w for w in text if not w.is_stop] # type: ignore # remove punctuation text = [w for w in text if w.text not in self.PUNCTUATION] # type: ignore # lemmatize if needed text = [getattr(t, "lemma_" if self.lemma else "text") for t in text] # type: ignore texts[i] = " ".join(text) lengths.append(len(text)) # convert to tensor tokens = torch.Tensor([hash_trick(w, self.n_bins) for w in text]) output.append(tokens) mask = length_to_mask(torch.IntTensor(lengths)).int() padded_output = pad_sequence(output, padding_value=self.pad_idx).int().t() if return_text: return padded_output, mask, texts # type: ignore return padded_output, mask class NoopTokenizer(Tokenizer): """This tokenizer should be used for global conditioners such as: artist, genre, key, etc. The difference between this and WhiteSpaceTokenizer is that NoopTokenizer does not split strings, so "Jeff Buckley" will get it's own index. Whereas WhiteSpaceTokenizer will split it to ["Jeff", "Buckley"] and return an index per word. For example: ["Queen", "ABBA", "Jeff Buckley"] => [43, 55, 101] ["Metal", "Rock", "Classical"] => [0, 223, 51] """ def __init__(self, n_bins: int, pad_idx: int = 0): self.n_bins = n_bins self.pad_idx = pad_idx def __call__(self, texts: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: output, lengths = [], [] for text in texts: # if current sample doesn't have a certain attribute, replace with pad token if text is None: output.append(self.pad_idx) lengths.append(0) else: output.append(hash_trick(text, self.n_bins)) lengths.append(1) tokens = torch.LongTensor(output).unsqueeze(1) mask = length_to_mask(torch.IntTensor(lengths)).int() return tokens, mask class BaseConditioner(nn.Module): """Base model for all conditioner modules. We allow the output dim to be different than the hidden dim for two reasons: 1) keep our LUTs small when the vocab is large; 2) make all condition dims consistent. Args: dim (int): Hidden dim of the model. output_dim (int): Output dim of the conditioner. """ def __init__(self, dim: int, output_dim: int): super().__init__() self.dim = dim self.output_dim = output_dim self.output_proj = nn.Linear(dim, output_dim) def tokenize(self, *args, **kwargs) -> tp.Any: """Should be any part of the processing that will lead to a synchronization point, e.g. BPE tokenization with transfer to the GPU. The returned value will be saved and return later when calling forward(). """ raise NotImplementedError() def forward(self, inputs: tp.Any) -> ConditionType: """Gets input that should be used as conditioning (e.g, genre, description or a waveform). Outputs a ConditionType, after the input data was embedded as a dense vector. Returns: ConditionType: - A tensor of size [B, T, D] where B is the batch size, T is the length of the output embedding and D is the dimension of the embedding. - And a mask indicating where the padding tokens. """ raise NotImplementedError() class TextConditioner(BaseConditioner): ... class LUTConditioner(TextConditioner): """Lookup table TextConditioner. Args: n_bins (int): Number of bins. dim (int): Hidden dim of the model (text-encoder/LUT). output_dim (int): Output dim of the conditioner. tokenizer (str): Name of the tokenizer. pad_idx (int, optional): Index for padding token. Defaults to 0. """ def __init__(self, n_bins: int, dim: int, output_dim: int, tokenizer: str, pad_idx: int = 0): super().__init__(dim, output_dim) self.embed = nn.Embedding(n_bins, dim) self.tokenizer: Tokenizer if tokenizer == 'whitespace': self.tokenizer = WhiteSpaceTokenizer(n_bins, pad_idx=pad_idx) elif tokenizer == 'noop': self.tokenizer = NoopTokenizer(n_bins, pad_idx=pad_idx) else: raise ValueError(f"unrecognized tokenizer `{tokenizer}`.") def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Tuple[torch.Tensor, torch.Tensor]: device = self.embed.weight.device tokens, mask = self.tokenizer(x) tokens, mask = tokens.to(device), mask.to(device) return tokens, mask def forward(self, inputs: tp.Tuple[torch.Tensor, torch.Tensor]) -> ConditionType: tokens, mask = inputs embeds = self.embed(tokens) embeds = self.output_proj(embeds) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class T5Conditioner(TextConditioner): """T5-based TextConditioner. Args: name (str): Name of the T5 model. output_dim (int): Output dim of the conditioner. finetune (bool): Whether to fine-tune T5 at train time. device (str): Device for T5 Conditioner. autocast_dtype (tp.Optional[str], optional): Autocast dtype. word_dropout (float, optional): Word dropout probability. normalize_text (bool, optional): Whether to apply text normalization. """ MODELS = ["t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", "google/flan-t5-small", "google/flan-t5-base", "google/flan-t5-large", "google/flan-t5-xl", "google/flan-t5-xxl"] MODELS_DIMS = { "t5-small": 512, "t5-base": 768, "t5-large": 1024, "t5-3b": 1024, "t5-11b": 1024, "google/flan-t5-small": 512, "google/flan-t5-base": 768, "google/flan-t5-large": 1024, "google/flan-t5-3b": 1024, "google/flan-t5-11b": 1024, } def __init__(self, name: str, output_dim: int, finetune: bool, device: str, autocast_dtype: tp.Optional[str] = 'float32', word_dropout: float = 0., normalize_text: bool = False): assert name in self.MODELS, f"Unrecognized t5 model name (should in {self.MODELS})" super().__init__(self.MODELS_DIMS[name], output_dim) self.device = device self.name = name self.finetune = finetune self.word_dropout = word_dropout if autocast_dtype is None or self.device == 'cpu': self.autocast = TorchAutocast(enabled=False) if self.device != 'cpu': logger.warning("T5 has no autocast, this might lead to NaN") else: dtype = getattr(torch, autocast_dtype) assert isinstance(dtype, torch.dtype) logger.info(f"T5 will be evaluated with autocast as {autocast_dtype}") self.autocast = TorchAutocast(enabled=True, device_type=self.device, dtype=dtype) # Let's disable logging temporarily because T5 will vomit some errors otherwise. # thanks https://gist.github.com/simon-weber/7853144 previous_level = logging.root.manager.disable logging.disable(logging.ERROR) with warnings.catch_warnings(): warnings.simplefilter("ignore") try: self.t5_tokenizer = T5Tokenizer.from_pretrained(name) t5 = T5EncoderModel.from_pretrained(name).train(mode=finetune) finally: logging.disable(previous_level) if finetune: self.t5 = t5 else: # this makes sure that the t5 models is not part # of the saved checkpoint self.__dict__['t5'] = t5.to(device) self.normalize_text = normalize_text if normalize_text: self.text_normalizer = WhiteSpaceTokenizer(1, lemma=True, stopwords=True) def tokenize(self, x: tp.List[tp.Optional[str]]) -> tp.Dict[str, torch.Tensor]: # if current sample doesn't have a certain attribute, replace with empty string entries: tp.List[str] = [xi if xi is not None else "" for xi in x] if self.normalize_text: _, _, entries = self.text_normalizer(entries, return_text=True) if self.word_dropout > 0. and self.training: new_entries = [] for entry in entries: words = [word for word in entry.split(" ") if random.random() >= self.word_dropout] new_entries.append(" ".join(words)) entries = new_entries empty_idx = torch.LongTensor([i for i, xi in enumerate(entries) if xi == ""]) inputs = self.t5_tokenizer(entries, return_tensors='pt', padding=True).to(self.device) mask = inputs['attention_mask'] mask[empty_idx, :] = 0 # zero-out index where the input is non-existant return inputs def forward(self, inputs: tp.Dict[str, torch.Tensor]) -> ConditionType: mask = inputs['attention_mask'] with torch.set_grad_enabled(self.finetune), self.autocast: embeds = self.t5(**inputs).last_hidden_state embeds = self.output_proj(embeds.to(self.output_proj.weight)) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class WaveformConditioner(BaseConditioner): """Base class for all conditioners that take a waveform as input. Classes that inherit must implement `_get_wav_embedding` that outputs a continuous tensor, and `_downsampling_factor` that returns the down-sampling factor of the embedding model. Args: dim (int): The internal representation dimension. output_dim (int): Output dimension. device (tp.Union[torch.device, str]): Device. """ def __init__(self, dim: int, output_dim: int, device: tp.Union[torch.device, str]): super().__init__(dim, output_dim) self.device = device # if False no masking is done, used in ChromaStemConditioner when completing by periodicity a sample. self._use_masking = True def tokenize(self, x: WavCondition) -> WavCondition: wav, length, sample_rate, path, seek_time = x assert length is not None return WavCondition(wav.to(self.device), length.to(self.device), sample_rate, path, seek_time) def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Gets as input a WavCondition and returns a dense embedding.""" raise NotImplementedError() def _downsampling_factor(self): """Returns the downsampling factor of the embedding model.""" raise NotImplementedError() def forward(self, x: WavCondition) -> ConditionType: """Extract condition embedding and mask from a waveform and its metadata. Args: x (WavCondition): Waveform condition containing raw waveform and metadata. Returns: ConditionType: a dense vector representing the conditioning along with its mask """ wav, lengths, *_ = x with torch.no_grad(): embeds = self._get_wav_embedding(x) embeds = embeds.to(self.output_proj.weight) embeds = self.output_proj(embeds) if lengths is not None and self._use_masking: lengths = lengths / self._downsampling_factor() mask = length_to_mask(lengths, max_len=embeds.shape[1]).int() # type: ignore else: mask = torch.ones_like(embeds[..., 0]) embeds = (embeds * mask.unsqueeze(-1)) return embeds, mask class ChromaStemConditioner(WaveformConditioner): """Chroma conditioner based on stems. The ChromaStemConditioner uses DEMUCS to first filter out drums and bass, as the drums and bass often dominate the chroma leading to the chroma features not containing information about the melody. Args: output_dim (int): Output dimension for the conditioner. sample_rate (int): Sample rate for the chroma extractor. n_chroma (int): Number of chroma bins for the chroma extractor. radix2_exp (int): Size of stft window for the chroma extractor (power of 2, e.g. 12 -> 2^12). duration (int): duration used during training. This is later used for correct padding in case we are using chroma as prefix. match_len_on_eval (bool, optional): if True then all chromas are padded to the training duration. Defaults to False. eval_wavs (str, optional): path to a dataset manifest with waveform, this waveforms are used as conditions during eval (for cases where we don't want to leak test conditions like MusicCaps). Defaults to None. n_eval_wavs (int, optional): limits the number of waveforms used for conditioning. Defaults to 0. device (tp.Union[torch.device, str], optional): Device for the conditioner. **kwargs: Additional parameters for the chroma extractor. """ def __init__(self, output_dim: int, sample_rate: int, n_chroma: int, radix2_exp: int, duration: float, match_len_on_eval: bool = True, eval_wavs: tp.Optional[str] = None, n_eval_wavs: int = 0, cache_path: tp.Optional[tp.Union[str, Path]] = None, device: tp.Union[torch.device, str] = 'cpu', **kwargs): super().__init__(dim=n_chroma, output_dim=output_dim, device=device) self.autocast = TorchAutocast(enabled=device != 'cpu', device_type=self.device, dtype=torch.float32) self.sample_rate = sample_rate self.match_len_on_eval = match_len_on_eval if match_len_on_eval: self._use_masking = False self.duration = duration self.__dict__['demucs'] = pretrained.get_model('htdemucs').to(device) stem_sources: list = self.demucs.sources # type: ignore self.stem_indices = torch.LongTensor([stem_sources.index('vocals'), stem_sources.index('other')]).to(device) self.chroma = ChromaExtractor(sample_rate=sample_rate, n_chroma=n_chroma, radix2_exp=radix2_exp, **kwargs).to(device) self.chroma_len = self._get_chroma_len() self.eval_wavs: tp.Optional[torch.Tensor] = self._load_eval_wavs(eval_wavs, n_eval_wavs) self.cache = None if cache_path is not None: self.cache = EmbeddingCache(Path(cache_path) / 'wav', self.device, compute_embed_fn=self._get_full_chroma_for_cache, extract_embed_fn=self._extract_chroma_chunk) def _downsampling_factor(self) -> int: return self.chroma.winhop def _load_eval_wavs(self, path: tp.Optional[str], num_samples: int) -> tp.Optional[torch.Tensor]: """Load pre-defined waveforms from a json. These waveforms will be used for chroma extraction during evaluation. This is done to make the evaluation on MusicCaps fair (we shouldn't see the chromas of MusicCaps). """ if path is None: return None logger.info(f"Loading evaluation wavs from {path}") dataset: AudioDataset = AudioDataset.from_meta( path, segment_duration=self.duration, min_audio_duration=self.duration, sample_rate=self.sample_rate, channels=1) if len(dataset) > 0: eval_wavs = dataset.collater([dataset[i] for i in range(num_samples)]).to(self.device) logger.info(f"Using {len(eval_wavs)} evaluation wavs for chroma-stem conditioner") return eval_wavs else: raise ValueError("Could not find evaluation wavs, check lengths of wavs") def reset_eval_wavs(self, eval_wavs: tp.Optional[torch.Tensor]) -> None: self.eval_wavs = eval_wavs def has_eval_wavs(self) -> bool: return self.eval_wavs is not None def _sample_eval_wavs(self, num_samples: int) -> torch.Tensor: """Sample wavs from a predefined list.""" assert self.eval_wavs is not None, "Cannot sample eval wavs as no eval wavs provided." total_eval_wavs = len(self.eval_wavs) out = self.eval_wavs if num_samples > total_eval_wavs: out = self.eval_wavs.repeat(num_samples // total_eval_wavs + 1, 1, 1) return out[torch.randperm(len(out))][:num_samples] def _get_chroma_len(self) -> int: """Get length of chroma during training.""" dummy_wav = torch.zeros((1, int(self.sample_rate * self.duration)), device=self.device) dummy_chr = self.chroma(dummy_wav) return dummy_chr.shape[1] @torch.no_grad() def _get_stemmed_wav(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Get parts of the wav that holds the melody, extracting the main stems from the wav.""" with self.autocast: wav = convert_audio( wav, sample_rate, self.demucs.samplerate, self.demucs.audio_channels) # type: ignore stems = apply_model(self.demucs, wav, device=self.device) stems = stems[:, self.stem_indices] # extract relevant stems for melody conditioning mix_wav = stems.sum(1) # merge extracted stems to single waveform mix_wav = convert_audio(mix_wav, self.demucs.samplerate, self.sample_rate, 1) # type: ignore return mix_wav @torch.no_grad() def _extract_chroma(self, wav: torch.Tensor) -> torch.Tensor: """Extract chroma features from the waveform.""" with self.autocast: return self.chroma(wav) @torch.no_grad() def _compute_wav_embedding(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor: """Compute wav embedding, applying stem and chroma extraction.""" # avoid 0-size tensors when we are working with null conds if wav.shape[-1] == 1: return self._extract_chroma(wav) stems = self._get_stemmed_wav(wav, sample_rate) chroma = self._extract_chroma(stems) return chroma @torch.no_grad() def _get_full_chroma_for_cache(self, path: tp.Union[str, Path], x: WavCondition, idx: int) -> torch.Tensor: """Extract chroma from the whole audio waveform at the given path.""" wav, sr = audio_read(path) wav = wav[None].to(self.device) wav = convert_audio(wav, sr, self.sample_rate, to_channels=1) chroma = self._compute_wav_embedding(wav, self.sample_rate)[0] return chroma def _extract_chroma_chunk(self, full_chroma: torch.Tensor, x: WavCondition, idx: int) -> torch.Tensor: """Extract a chunk of chroma from the full chroma derived from the full waveform.""" wav_length = x.wav.shape[-1] seek_time = x.seek_time[idx] assert seek_time is not None, ( "WavCondition seek_time is required " "when extracting chroma chunks from pre-computed chroma.") full_chroma = full_chroma.float() frame_rate = self.sample_rate / self._downsampling_factor() target_length = int(frame_rate * wav_length / self.sample_rate) index = int(frame_rate * seek_time) out = full_chroma[index: index + target_length] out = F.pad(out[None], (0, 0, 0, target_length - out.shape[0]))[0] return out.to(self.device) @torch.no_grad() def _get_wav_embedding(self, x: WavCondition) -> torch.Tensor: """Get the wav embedding from the WavCondition. The conditioner will either extract the embedding on-the-fly computing it from the condition wav directly or will rely on the embedding cache to load the pre-computed embedding if relevant. """ sampled_wav: tp.Optional[torch.Tensor] = None if not self.training and self.eval_wavs is not None:
warn_once(logger, "Using precomputed evaluation wavs!")
15
2023-10-09 09:52:24+00:00
16k
RVC-Project/Retrieval-based-Voice-Conversion
rvc/modules/vc/modules.py
[ { "identifier": "Config", "path": "rvc/configs/config.py", "snippet": "class Config:\n def __new__(cls):\n if not hasattr(cls, \"_instance\"):\n cls._instance = super().__new__(cls)\n return cls._instance\n\n def __init__(self):\n self.device: str = \"cuda:0\"\n ...
import logging import os import traceback import numpy as np import soundfile as sf import torch from collections import OrderedDict from io import BytesIO from pathlib import Path from rvc.configs.config import Config from rvc.lib.audio import load_audio, wav2 from rvc.lib.infer_pack.models import ( SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono, SynthesizerTrnMs768NSFsid, SynthesizerTrnMs768NSFsid_nono, ) from rvc.modules.vc.pipeline import Pipeline from rvc.modules.vc.utils import *
11,833
logger: logging.Logger = logging.getLogger(__name__) class VC: def __init__(self): self.n_spk: any = None self.tgt_sr: int | None = None self.net_g = None self.pipeline: Pipeline | None = None self.cpt: OrderedDict | None = None self.version: str | None = None self.if_f0: int | None = None self.version: str | None = None self.hubert_model: any = None
logger: logging.Logger = logging.getLogger(__name__) class VC: def __init__(self): self.n_spk: any = None self.tgt_sr: int | None = None self.net_g = None self.pipeline: Pipeline | None = None self.cpt: OrderedDict | None = None self.version: str | None = None self.if_f0: int | None = None self.version: str | None = None self.hubert_model: any = None
self.config = Config()
0
2023-10-14 09:52:31+00:00
16k
zhijie-group/LOVECon
video_diffusion/trainer/ddpm_trainer.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Opti...
from typing import Union from einops import rearrange from transformers import CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from ..models.unet_3d_condition import UNetPseudo3DConditionModel from video_diffusion.pipelines.stable_diffusion import SpatioTemporalStableDiffusionPipeline import torch import torch.nn.functional as F
11,723
class DDPMTrainer(SpatioTemporalStableDiffusionPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
class DDPMTrainer(SpatioTemporalStableDiffusionPipeline): def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNetPseudo3DConditionModel,
0
2023-10-09 14:38:28+00:00
16k
mlpc-ucsd/MaskCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "maskclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NA...
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from maskclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, ) import warnings import copy import itertools import logging import os import torch import detectron2.utils.comm as comm
11,525
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MaskFormer Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings('ignore', category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance":
mapper = MaskFormerInstanceDatasetMapper(cfg, True)
3
2023-10-13 02:32:25+00:00
16k
mlpc-ucsd/MasQCLIP
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "masqclip/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NA...
import copy import itertools import logging import os import torch import detectron2.utils.comm as comm import warnings from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import MetadataCatalog, build_detection_train_loader from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from masqclip import ( COCOInstanceNewBaselineDatasetMapper, COCOPanopticNewBaselineDatasetMapper, InstanceSegEvaluator, MaskFormerInstanceDatasetMapper, MaskFormerPanopticDatasetMapper, MaskFormerSemanticDatasetMapper, SemanticSegmentorWithTTA, add_maskformer2_config, add_masqclip_config, )
12,391
mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...") model = SemanticSegmentorWithTTA(cfg, model) evaluators = [ cls.build_evaluator( cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") ) for name in cfg.DATASETS.TEST ] res = cls.test(cfg, model, evaluators) res = OrderedDict({k + "_TTA": v for k, v in res.items()}) return res def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved """ MasQCLIP Training Script. """ # MasQCLIP warnings.filterwarnings("ignore") class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to MaskFormer. """ @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if evaluator_type in ["sem_seg", "ade20k_panoptic_seg"]: evaluator_list.append( SemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, ) ) # instance segmentation if evaluator_type == "coco": evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) # panoptic segmentation if evaluator_type in [ "coco_panoptic_seg", "ade20k_panoptic_seg", "cityscapes_panoptic_seg", "mapillary_vistas_panoptic_seg", ]: if cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON: evaluator_list.append(COCOPanopticEvaluator(dataset_name, output_folder)) # COCO if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(COCOEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "coco_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Mapillary Vistas if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) if evaluator_type == "mapillary_vistas_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: evaluator_list.append(SemSegEvaluator(dataset_name, distributed=True, output_dir=output_folder)) # Cityscapes if evaluator_type == "cityscapes_instance": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesInstanceEvaluator(dataset_name) if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." return CityscapesSemSegEvaluator(dataset_name) if evaluator_type == "cityscapes_panoptic_seg": if cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesSemSegEvaluator(dataset_name)) if cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." evaluator_list.append(CityscapesInstanceEvaluator(dataset_name)) # ADE20K if evaluator_type == "ade20k_panoptic_seg" and cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON: evaluator_list.append(InstanceSegEvaluator(dataset_name, output_dir=output_folder)) # LVIS if evaluator_type == "lvis": return LVISEvaluator(dataset_name, output_dir=output_folder) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper if cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_semantic": mapper = MaskFormerSemanticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Panoptic segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_panoptic": mapper = MaskFormerPanopticDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # Instance segmentation dataset mapper elif cfg.INPUT.DATASET_MAPPER_NAME == "mask_former_instance": mapper = MaskFormerInstanceDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco instance segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_instance_lsj": mapper = COCOInstanceNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) # coco panoptic segmentation lsj new baseline elif cfg.INPUT.DATASET_MAPPER_NAME == "coco_panoptic_lsj": mapper = COCOPanopticNewBaselineDatasetMapper(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) else: mapper = None return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain(*[x["params"] for x in self.param_groups]) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test_with_TTA(cls, cfg, model): logger = logging.getLogger("detectron2.trainer") # In the end of training, run an evaluation with TTA. logger.info("Running inference with test-time augmentation ...") model = SemanticSegmentorWithTTA(cfg, model) evaluators = [ cls.build_evaluator( cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA") ) for name in cfg.DATASETS.TEST ] res = cls.test(cfg, model, evaluators) res = OrderedDict({k + "_TTA": v for k, v in res.items()}) return res def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg)
add_masqclip_config(cfg)
1
2023-10-13 02:43:53+00:00
16k
ielab/llm-rankers
run.py
[ { "identifier": "SearchResult", "path": "rankers/rankers.py", "snippet": "class SearchResult:\n docid: str\n score: float\n text: str" }, { "identifier": "PointwiseLlmRanker", "path": "rankers/pointwise.py", "snippet": "class PointwiseLlmRanker(LlmRanker):\n\n def __init__(se...
import logging import ir_datasets import argparse import sys import json import time import random from pyserini.search.lucene import LuceneSearcher from pyserini.search._base import get_topics from rankers.rankers import SearchResult from rankers.pointwise import PointwiseLlmRanker, MonoT5LlmRanker from rankers.setwise import SetwiseLlmRanker, OpenAiSetwiseLlmRanker from rankers.pairwise import PairwiseLlmRanker, DuoT5LlmRanker, OpenAiPairwiseLlmRanker from rankers.listwise import OpenAiListwiseLlmRanker, ListwiseLlmRanker from tqdm import tqdm
13,975
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key: ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, window_size=args.listwise.window_size, step_size=args.listwise.step_size, num_repeat=args.listwise.num_repeat) else:
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key: ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, window_size=args.listwise.window_size, step_size=args.listwise.step_size, num_repeat=args.listwise.num_repeat) else:
ranker = ListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path,
9
2023-10-14 01:39:38+00:00
16k
amazon-science/tabsyn
baselines/tabddpm/train.py
[ { "identifier": "make_dataset", "path": "utils_train.py", "snippet": "def make_dataset(\n data_path: str,\n T: src.Transformations,\n task_type,\n change_val: bool,\n concat = True,\n):\n\n # classification\n if task_type == 'binclass' or task_type == 'multiclass':\n X_cat = ...
import os import sys import time import torch import numpy as np import pandas as pd import src from copy import deepcopy from utils_train import make_dataset, update_ema from baselines.tabddpm.models.modules import MLPDiffusion from baselines.tabddpm.models.gaussian_multinomial_distribution import GaussianMultinomialDiffusion
12,837
self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x) loss = loss_multi + loss_gauss loss.backward() self.optimizer.step() return loss_multi, loss_gauss def run_loop(self): step = 0 curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 self.print_every = 1 self.log_every = 1 best_loss = np.inf print('Steps: ', self.steps) while step < self.steps: start_time = time.time() x = next(self.train_iter)[0] batch_loss_multi, batch_loss_gauss = self._run_step(x) self._anneal_lr(step) curr_count += len(x) curr_loss_multi += batch_loss_multi.item() * len(x) curr_loss_gauss += batch_loss_gauss.item() * len(x) if (step + 1) % self.log_every == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) if np.isnan(gloss): print('Finding Nan') break if (step + 1) % self.print_every == 0: print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] np.set_printoptions(suppress=True) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 if mloss + gloss < best_loss: best_loss = mloss + gloss torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, 'model.pt')) if (step + 1) % 10000 == 0: torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, f'model_{step+1}.pt')) # update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) step += 1 # end_time = time.time() # print('Time: ', end_time - start_time) def train( model_save_path, real_data_path, steps = 1000, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, task_type = 'binclass', model_type = 'mlp', model_params = None, num_timesteps = 1000, gaussian_loss_type = 'mse', scheduler = 'cosine', T_dict = None, num_numerical_features = 0, device = torch.device('cuda:0'), seed = 0, change_val = False ): real_data_path = os.path.normpath(real_data_path) # zero.improve_reproducibility(seed) T = src.Transformations(**T_dict) dataset = make_dataset( real_data_path, T, task_type = task_type, change_val = False, ) K = np.array(dataset.get_category_sizes('train')) if len(K) == 0 or T_dict['cat_encoding'] == 'one-hot': K = np.array([0]) num_numerical_features = dataset.X_num['train'].shape[1] if dataset.X_num is not None else 0 d_in = np.sum(K) + num_numerical_features model_params['d_in'] = d_in print(d_in) print(model_params) model = get_model( model_type, model_params, num_numerical_features, category_sizes=dataset.get_category_sizes('train') ) model.to(device) print(model) train_loader = src.prepare_fast_dataloader(dataset, split='train', batch_size=batch_size)
def get_model( model_name, model_params, n_num_features, category_sizes ): print(model_name) if model_name == 'mlp': model = MLPDiffusion(**model_params) else: raise "Unknown model!" return model class Trainer: def __init__(self, diffusion, train_iter, lr, weight_decay, steps, model_save_path, device=torch.device('cuda:1')): self.diffusion = diffusion self.ema_model = deepcopy(self.diffusion._denoise_fn) for param in self.ema_model.parameters(): param.detach_() self.train_iter = train_iter self.steps = steps self.init_lr = lr self.optimizer = torch.optim.AdamW(self.diffusion.parameters(), lr=lr, weight_decay=weight_decay) self.device = device self.loss_history = pd.DataFrame(columns=['step', 'mloss', 'gloss', 'loss']) self.model_save_path = model_save_path columns = list(np.arange(5)*200) columns[0] = 1 columns = ['step'] + columns self.log_every = 50 self.print_every = 1 self.ema_every = 1000 def _anneal_lr(self, step): frac_done = step / self.steps lr = self.init_lr * (1 - frac_done) for param_group in self.optimizer.param_groups: param_group["lr"] = lr def _run_step(self, x): x = x.to(self.device) self.optimizer.zero_grad() loss_multi, loss_gauss = self.diffusion.mixed_loss(x) loss = loss_multi + loss_gauss loss.backward() self.optimizer.step() return loss_multi, loss_gauss def run_loop(self): step = 0 curr_loss_multi = 0.0 curr_loss_gauss = 0.0 curr_count = 0 self.print_every = 1 self.log_every = 1 best_loss = np.inf print('Steps: ', self.steps) while step < self.steps: start_time = time.time() x = next(self.train_iter)[0] batch_loss_multi, batch_loss_gauss = self._run_step(x) self._anneal_lr(step) curr_count += len(x) curr_loss_multi += batch_loss_multi.item() * len(x) curr_loss_gauss += batch_loss_gauss.item() * len(x) if (step + 1) % self.log_every == 0: mloss = np.around(curr_loss_multi / curr_count, 4) gloss = np.around(curr_loss_gauss / curr_count, 4) if np.isnan(gloss): print('Finding Nan') break if (step + 1) % self.print_every == 0: print(f'Step {(step + 1)}/{self.steps} MLoss: {mloss} GLoss: {gloss} Sum: {mloss + gloss}') self.loss_history.loc[len(self.loss_history)] =[step + 1, mloss, gloss, mloss + gloss] np.set_printoptions(suppress=True) curr_count = 0 curr_loss_gauss = 0.0 curr_loss_multi = 0.0 if mloss + gloss < best_loss: best_loss = mloss + gloss torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, 'model.pt')) if (step + 1) % 10000 == 0: torch.save(self.diffusion._denoise_fn.state_dict(), os.path.join(self.model_save_path, f'model_{step+1}.pt')) # update_ema(self.ema_model.parameters(), self.diffusion._denoise_fn.parameters()) step += 1 # end_time = time.time() # print('Time: ', end_time - start_time) def train( model_save_path, real_data_path, steps = 1000, lr = 0.002, weight_decay = 1e-4, batch_size = 1024, task_type = 'binclass', model_type = 'mlp', model_params = None, num_timesteps = 1000, gaussian_loss_type = 'mse', scheduler = 'cosine', T_dict = None, num_numerical_features = 0, device = torch.device('cuda:0'), seed = 0, change_val = False ): real_data_path = os.path.normpath(real_data_path) # zero.improve_reproducibility(seed) T = src.Transformations(**T_dict) dataset = make_dataset( real_data_path, T, task_type = task_type, change_val = False, ) K = np.array(dataset.get_category_sizes('train')) if len(K) == 0 or T_dict['cat_encoding'] == 'one-hot': K = np.array([0]) num_numerical_features = dataset.X_num['train'].shape[1] if dataset.X_num is not None else 0 d_in = np.sum(K) + num_numerical_features model_params['d_in'] = d_in print(d_in) print(model_params) model = get_model( model_type, model_params, num_numerical_features, category_sizes=dataset.get_category_sizes('train') ) model.to(device) print(model) train_loader = src.prepare_fast_dataloader(dataset, split='train', batch_size=batch_size)
diffusion = GaussianMultinomialDiffusion(
3
2023-10-10 18:06:31+00:00
16k
ThomasMrY/DisDiff
ldm/models/diffusion/ddpm_kl.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import torch.nn.functional as F import pytorch_lightning as pl import copy import os import pandas as pd from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from ldm.modules.diffusionmodules.util import return_wrap
11,446
def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) self.ce_loss = nn.CrossEntropyLoss(reduction = "none") if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) self.register_buffer("shift_coef", - to_torch(np.sqrt(alphas)) * (1. - self.alphas_cumprod_prev) / torch.sqrt(1. - self.alphas_cumprod)) self.register_buffer("ddim_coef", -self.sqrt_one_minus_alphas_cumprod) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") self.load_epoch = sd['epoch'] self.load_step = sd["global_step"] if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.ddim_coef, t, x.shape)) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=eps_pred) elif self.parameterization == "x0": x_recon = eps_pred if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) eps_pred = return_wrap(model_out, extract_into_tensor(self.shift_coef, t, x_start.shape)) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(eps_pred, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): pass # _, loss_dict_no_ema = self.shared_step(batch) # with self.ema_scope(): # _, loss_dict_ema = self.shared_step(batch) # loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} # self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) # self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, dis_loss_flag = False, detach_flag = False, train_enc_flag = False, dis_weight = 1.0, kl_weight = 0.0005, dis_loss_type = "IM", kl_loss_flag = False, *args, **kwargs): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key self.dis_loss_flag = dis_loss_flag self.detach_flag = detach_flag self.train_enc_flag = train_enc_flag self.dis_weight = dis_weight self.dis_loss_type = dis_loss_type self.kl_loss_flag = kl_loss_flag self.kl_weight = kl_weight try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() # def on_train_batch_start(self, batch, batch_idx, dataloader_idx): def on_train_batch_start(self, batch, batch_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if hasattr(self.model.diffusion_model,"scale_factor"): del self.scale_factor self.register_buffer('scale_factor', self.model.diffusion_model.scale_factor) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING Pre-Trained STD-RESCALING ###") else: del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) else: c = xc if bs is not None: c = c[:bs] else: c = None xc = None out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False, sampled_concept= None): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, sampled_concept = sampled_concept, **cond) # if isinstance(x_recon, tuple) and not return_ids: # return x_recon[0] # else: # return x_recon return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
5
2023-10-07 09:58:07+00:00
16k
wiio12/LEGO-Prover
lego_prover/prover.py
[ { "identifier": "IsabelleEnv", "path": "lego_prover/env/isa_bridge.py", "snippet": "class IsabelleEnv(gym.Env):\n def __init__(\n self,\n logger=None,\n isabelle_path=\"/Users/wiio/Isabelle2022\",\n working_dir=\"miniF2F\",\n interactive_file=\"miniF2F/interactive.t...
import os import random import re import time import multiprocessing as mp import tiktoken import lego_prover.utils as U import logging from lego_prover.env.isa_bridge import IsabelleEnv from .agents import ActionAgent from .agents import CurriculumAgent from .agents import SkillManager from langchain.schema import HumanMessage
11,559
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name self.env = IsabelleEnv( logger=self.logger, isabelle_path=isabelle_path, server_port=server_port ) self.action_agent_model_name = model_name self.tokenizer_encoder = tiktoken.encoding_for_model( self.action_agent_model_name) self.ckpt_dir = ckpt_dir self.temperature = temperature # init agents self.action_agent = ActionAgent( logger=self.logger, model_name=model_name, temperature=temperature, request_timeout=openai_api_request_timeout, ckpt_dir=ckpt_dir, ) self.action_agent_task_max_retries = action_agent_task_max_retries
class Prover: def __init__( self, rank: int = None, isabelle_path: str = None, server_port: int = 8000, model_name: str = "gpt-4", temperature: int = 0, action_agent_task_max_retries: int = 4, curriculum_task_type: str = "simple_curriculum", curriculum_agent_lock = U.WithEmpty(), skill_manager_lock = U.WithEmpty(), chroma_bridge = None, openai_api_request_timeout: int = 6000, ckpt_dir: str = "ckpt", resume: bool = False, miniF2F_tasks: mp.Queue = None, ): """ Initializes a new instance of the Prover class. Args: rank (int): The rank of the prover process. isabelle_path (str): The path to the Isabelle directory. server_port (int): The port number for the server. model_name (str): The name of the OpenAI model to use. temperature (int): The temperature for sampling the LLM. action_agent_task_max_retries (int): The maximum number of retries for an action agent task. curriculum_task_type (str): The type of curriculum task to use. curriculum_agent_lock: The lock for the curriculum agent. skill_manager_lock: The lock for the skill manager. chroma_bridge: The ChromaBridge object for controlling the keyboard and mouse. openai_api_request_timeout (int): The timeout for OpenAI API requests. ckpt_dir (str): The directory for saving checkpoints. resume (bool): Whether to resume from the checkpoint. miniF2F_tasks (mp.Queue): The queue for miniF2F tasks. """ # init env self.rank = rank self.logger = logging.getLogger(f'prover-{rank}') self.logger.info(f"lego_prover running in rank {rank}") self.model_name = model_name self.env = IsabelleEnv( logger=self.logger, isabelle_path=isabelle_path, server_port=server_port ) self.action_agent_model_name = model_name self.tokenizer_encoder = tiktoken.encoding_for_model( self.action_agent_model_name) self.ckpt_dir = ckpt_dir self.temperature = temperature # init agents self.action_agent = ActionAgent( logger=self.logger, model_name=model_name, temperature=temperature, request_timeout=openai_api_request_timeout, ckpt_dir=ckpt_dir, ) self.action_agent_task_max_retries = action_agent_task_max_retries
self.curriculum_agent = CurriculumAgent(
2
2023-10-09 04:23:43+00:00
16k
LiyaoTang/ERDA
models/build_models.py
[ { "identifier": "load_config", "path": "config/utils.py", "snippet": "def load_config(cfg_path=None, dataset_name=None, cfg_name=None, cfg_group=None, reload=True):\n # cfg from path\n if cfg_path is not None:\n update = None\n if os.path.isfile(cfg_path):\n # update on th...
import os, re, sys, copy, warnings import tensorflow as tf from collections import defaultdict from config import log_config, load_config, get_block_cfg from utils.logger import print_dict from .heads import resnet_classification_head, resnet_scene_segmentation_head, resnet_multi_part_segmentation_head from .backbone import resnet_backbone from .blocks import get_block_ops, apply_block_ops from .head import apply_head_ops from .utils import tf_scope from .basic_operators import * from ops import TF_OPS
10,804
# main_n: loss_dict.pop(main_n), **loss_dict, } head_dict['loss'] = loss_dict return loss_dict class SceneSegModel(Model): def __init__(self, flat_inputs, is_training, config, scope=None, verbose=True): self.config = config self.is_training = is_training self.scope = scope self.verbose = verbose with tf.variable_scope('inputs'): self.inputs = self.get_inputs(flat_inputs) self.num_layers = config.num_layers self.labels = self.inputs['point_labels'] self.down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.stage_list = self.inputs['stage_list'] = {'down': self.down_list, 'up': self.up_list} self.head_dict = self.inputs['head_dict'] = {'loss': {}, 'result': {}, 'config': {}} for i, p in enumerate(self.inputs['points']): # fill points self.down_list[i]['p_out'] = p # up 0 = the most upsampled, num_layers-1 the upsampled pt from the most downsampled self.up_list[i]['p_out'] = p if i < self.num_layers - 1 else None if config.dense_by_conv: dense_layer.config = config with tf.variable_scope('model'): fdim = config.first_features_dim r = config.first_subsampling_dl * config.density_parameter features = self.inputs['features'] F = resnet_backbone(config, self.inputs, features, base_radius=r, base_fdim=fdim, bottleneck_ratio=config.bottleneck_ratio, depth=config.depth, is_training=is_training, init=config.init, weight_decay=config.weight_decay, activation_fn=config.activation_fn, bn=True, bn_momentum=config.bn_momentum, bn_eps=config.bn_eps) F_up, head = resnet_scene_segmentation_head(config, self.inputs, F, base_fdim=fdim, is_training=is_training, init=config.init, weight_decay=config.weight_decay, activation_fn=config.activation_fn, bn=True, bn_momentum=config.bn_momentum, bn_eps=config.bn_eps) for i, p in enumerate(self.inputs['points']): # fill features self.down_list[i]['f_out'] = F[i] # F_up reversed - 0 = the most upsampled, num_layers-1 the upsampled pt from the most downsampled self.up_list[i]['f_out'] = F_up[i] if i < len(F_up) else None self.up_list[-1] = self.down_list[-1] # align the most-downsampled layer if head is not None: latent, logits = head self.up_list[0]['latent'] = latent self.up_list[0]['logits'] = logits self.head_dict = self.build_head(self.config.arch_out, verbose=verbose) self.loss_dict = self.build_loss(scope) return class ModelBuilder(Model): def __init__(self, flat_inputs, is_training, config, scope=None, verbose=True): self.config = config self.is_training = is_training self.scope = scope # variable scope - potential sharing across devices (e.g. gpus) self.verbose = verbose with tf.variable_scope('inputs'): self.inputs = self.get_inputs(flat_inputs) self.num_layers = config.num_layers self.labels = self.inputs['point_labels'] with tf.variable_scope('model'): self.head_dict = self.build_model_plain_split() self.loss_dict = self.build_loss(scope=scope) return def build_model_plain_split(self): """ detect down-/up-sample via ops => architecture = [ops, ...] """ config = self.config self.down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] if self.num_layers > 0 else self.down_list self.stage_list = {'down': self.down_list, 'up': self.up_list} self.head_dict = {'loss': {}, 'result': {}, 'config': {}} inputs = self.inputs inputs['stage_list'] = self.stage_list inputs['head_dict'] = self.head_dict # split arch: input -> main -> output if '__input__' in config.architecture and '__output__' in config.architecture: arch_in = config.architecture[:config.architecture.index('__input__')] arch_main = config.architecture[len(arch_in) + 1:config.architecture.index('__output__')] arch_out = config.architecture[config.architecture.index('__output__') + 1:] else: arch_in = config.arch_in arch_main = config.arch_main arch_out = config.arch_out assert len(arch_in) and len(arch_out), f'invalid split of architecture {config.architecture}' arch_in = [get_block_cfg(blk) if isinstance(blk, str) else blk for blk in arch_in] arch_main = [get_block_cfg(blk) if isinstance(blk, str) else blk for blk in arch_main] arch_out = [load_config(dataset_name='head', cfg_name=a) for a in arch_out] # arch input features = inputs['features'] self.prepare_points('', 0, inputs, config) arch_in_dims = config.arch_in_dims if config.arch_in_dims else [config.first_features_dim] * len(arch_in) if self.verbose: print(f'\n\n==== inputs') print_dict(inputs, prefix='\t', except_k=['stage_list']) print('\n\n==== arch input') for block_i, (block_cfg, d_out) in enumerate(zip(arch_in, arch_in_dims)): with tf.variable_scope(f'input/{block_cfg.name}_{block_i}'):
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) class Model(object): def get_inputs(self, inputs): config = self.config if isinstance(inputs, dict): pass else: flat_inputs = inputs self.inputs = dict() self.inputs['points'] = flat_inputs[:config.num_layers] self.inputs['neighbors'] = flat_inputs[config.num_layers:2 * config.num_layers] self.inputs['pools'] = flat_inputs[2 * config.num_layers:3 * config.num_layers] self.inputs['upsamples'] = flat_inputs[3 * config.num_layers:4 * config.num_layers] ind = 4 * config.num_layers self.inputs['features'] = flat_inputs[ind] ind += 1 self.inputs['batch_weights'] = flat_inputs[ind] ind += 1 self.inputs['in_batches'] = flat_inputs[ind] ind += 1 self.inputs['out_batches'] = flat_inputs[ind] ind += 1 self.inputs['point_labels'] = flat_inputs[ind] ind += 1 self.inputs['augment_scales'] = flat_inputs[ind] ind += 1 self.inputs['augment_rotations'] = flat_inputs[ind] ind += 1 self.inputs['point_inds'] = flat_inputs[ind] ind += 1 self.inputs['cloud_inds'] = flat_inputs[ind] inputs = self.inputs for k in ['points', 'neighbors', 'pools', 'upsamples']: inputs[k] = [i if i is not None and i.shape.as_list()[0] != 0 else None for i in inputs[k]] inputs['sample_idx'] = { 'down': inputs['pools'], 'up': inputs['upsamples'] } if 'batches_len' in inputs: if 'batches_stack' not in inputs: inputs['batches_stack'] = [inputs['in_batches']] + [None] * (config.num_layers - 2) + [inputs['out_batches']] if 'batches_ind' not in inputs: inputs['batches_ind'] = [inputs['in_batch_inds']] + [None] * (config.num_layers - 1) if '_glb' not in inputs: inputs['_glb'] = {} # per-model/device global storage # inputs['assert_ops'] = [] return inputs def get_result(self): # keys=['logits', 'probs', 'labels'] # head_rst = {h: {k: d[k] for k in keys if k in d} for h, d in self.head_dict['result'].items()} head_rst = self.head_dict['result'] rst = { # {head/task: {probs, labels}, ..., 'inputs': input related} **head_rst, 'inputs': { 'point_inds': self.inputs['point_inds'], 'cloud_inds': self.inputs['cloud_inds'], } } for k in ['batches_len']: if k in self.inputs: rst['inputs'][k] = self.inputs[k] return rst def get_loss(self): return self.loss_dict """ TODO: to check - multiple keys indexing the inputs['point_labels'] should be having the same id in rst - ensure only one tensor passed from gpu to cpu <= """ @tf_scope def build_backbone(self, features, block_list, verbose=True): # building backbone blocks inputs = self.inputs config = self.config num_layers = config.num_layers def is_new_stage(blk): if any([k in blk for k in ['pool', 'strided']]): return 'down' elif any([k in blk for k in ['upsample']]): return 'up' else: return '' if 'stage_list' not in inputs: down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(num_layers)] up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(num_layers)] if num_layers > 0 else down_list stage_list = {'down': down_list, 'up': up_list} else: stage_list = inputs['stage_list'] down_list, up_list = stage_list['down'], stage_list['up'] inputs['stage_list'] = stage_list # backbone - init setting stage_i = 0 block_i = 0 stage_sc = 'down' F_list = down_list F_list[stage_i]['p_sample'] = inputs['points'][stage_i] F_list[stage_i]['f_sample'] = features d_out = config.architecture_dims[0] if verbose: print(f'\n\n==== {stage_sc}_{stage_i} - arch main') for block_cfg in block_list: block_n = block_cfg.name stage_n = is_new_stage(block_n) # change stage - indexing the stage after down/up-sampling ops if stage_n: if verbose: print('---- pts & features') print_dict(F_list[stage_i], prefix='\t') # update if stage_n == 'down': stage_i += 1 elif stage_n == 'up': stage_i -= 1 else: raise NotImplementedError(f'non supported stage name {stage_n}') # prepare block_i = 0 stage_sc = stage_n F_list = stage_list[stage_n] d_out = config.architecture_dims[stage_i] kr = config.kr_search[stage_i] self.prepare_points(stage_n, stage_i, inputs, config, name=f'{stage_sc}_{stage_i}') if verbose: print(f'\n\n==== {stage_sc}_{stage_i} - arch main') print_dict({k: v[stage_i] for k, v in inputs.items() if isinstance(v, tuple)}, prefix='\t') print(f'\td_out = {d_out}; kr = {kr}\n') if verbose: log_config(block_cfg) # special block if block_n.startswith('__') and block_n.endswith('__'): if block_n == '__up__': block_i = 0 stage_sc = 'up' F_list = up_list F_list[stage_i]['p_sample'] = inputs['points'][stage_i] F_list[stage_i]['f_sample'] = features else: raise ValueError(f'not supported special block {block_n}') # block ops else: with tf.variable_scope(f'{stage_sc}_{stage_i}/{block_n}_{block_i}'): block_ops = get_block_ops(block_n) features = block_ops(features, d_out, inputs, stage_n, stage_i, block_cfg, config, self.is_training) block_i += 1 if verbose: print(f'{block_n}_{block_i}\t{features}') # save the sampled pt/feature (1st block to sample the p_in/f_in of a stage) # NOTE update of inputs done in the ops - e.g. changing pt dyanmically based on feature & spatial sampling in inputs if stage_n: F_list[stage_i]['p_sample'] = inputs['points'][stage_i] F_list[stage_i]['f_sample'] = features # save as last block F_list[stage_i]['p_out'] = inputs['points'][stage_i] F_list[stage_i]['f_out'] = features # align most downsampled stage in up-down? if all(v == None for k, v in up_list[-1].items()): up_list[-1] = down_list[-1] if verbose: print('---- pts & features') print_dict(F_list[stage_i], prefix='\t') print_dict({'\nstage list =': stage_list}) return stage_list @tf_scope def prepare_points(self, stage_n, stage_i, inputs, config): # fixed sampling & searching on points - preparing inputs for next stage # (may otherwise be specified as block) stage_list = inputs['stage_list'] assert stage_n in ['up', 'down', ''], f'should not invoke prepare_points with stage_n=\'{stage_n}\'' # if config.debug: # print_dict(inputs, head=f'{stage_n}-{stage_i}') # print(stage_n == 'down' and inputs['points'][stage_i] is None and config.sample in TF_OPS.fix_sample) # print(stage_n == 'down' and inputs['neighbors'][stage_i] is None and config.search in TF_OPS.fix_search) # print(stage_n == 'down' and inputs['sample_idx']['down'][stage_i] is None and config.search in TF_OPS.fix_search) # print(stage_n == 'up' and inputs['sample_idx']['up'][stage_i] is None and config.search in TF_OPS.fix_search) # downsampling if stage_n == 'down' and inputs['points'][stage_i] is None and config.sample in TF_OPS.fix_sample: stage_last = stage_i - 1 # last downsampled stage # stage_last = len([i for i in inputs['points'] if i is not None]) points = stage_list['down'][stage_last]['p_out'] batches_len = inputs['batches_len'][stage_last] if 'batches_len' in inputs else None r = config.r_sample[stage_last] rst = TF_OPS.tf_fix_sample(points, r, config.sample, batches_len, verbose=False, name=config.sample) if 'batches_len' in inputs: inputs['points'][stage_i], inputs['batches_len'][stage_i] = rst else: inputs['points'][stage_i] = rst # neighborhood search if inputs['neighbors'][stage_i] is None and config.search in TF_OPS.fix_search: points = inputs['points'][stage_i] # current stage batches_len = inputs['batches_len'][stage_i] if 'batches_len' in inputs else None kr = config.kr_search[stage_i] inputs['neighbors'][stage_i] = TF_OPS.tf_fix_search(points, points, kr, config.search, batches_len, batches_len, name=config.search) # downsampling - pool if stage_n == 'down' and inputs['sample_idx']['down'][stage_i - 1] is None and config.search in TF_OPS.fix_search: stage_last = stage_i - 1 # last downsampled stage queries, supports = inputs['points'][stage_i], stage_list['down'][stage_last]['p_out'] queries_len = supports_len = None if 'batches_len' in inputs: queries_len, supports_len = inputs['batches_len'][stage_i], inputs['batches_len'][stage_last] kr = config.kr_sample[stage_last] inputs['sample_idx']['down'][stage_last] = TF_OPS.tf_fix_search(queries, supports, kr, config.search, queries_len, supports_len, name=f'{config.search}_down') # upsampling - unpool elif stage_n == 'up' and inputs['sample_idx']['up'][stage_i + 1] is None and config.search in TF_OPS.fix_search: stage_last = stage_i + 1 - config.num_layers # last upsampled stage # stage_last = [i for i, stage_d in enumerate(stage_list['up']) if stage_d['p_out'] is not None] # stage_last = stage_last[0] if stage_last else -1 queries = stage_list['down'][stage_i]['p_out'] supports = stage_list['up'][stage_last]['p_out'] supports = supports if supports is not None else stage_list['down'][-1]['p_out'] # or, the most downsampled queries_len = supports_len = None if 'batches_len' in inputs: queries_len, supports_len = inputs['batches_len'][stage_i], inputs['batches_len'][stage_last] kr = config.kr_sample_up[stage_last] inputs['sample_idx']['up'][stage_last] = TF_OPS.tf_fix_search(queries, supports, kr, config.search, queries_len, supports_len, name=f'{config.search}_up') # if self.config.debug: # print_dict(inputs, head=f'{stage_n}-{stage_i} - prepared', except_k='stage_list') # print('-' * 60) return @tf_scope def build_head(self, head_list, verbose=True): # building ouput heads & losses head_dict = self.inputs['head_dict'] if 'head_dict' in self.inputs else {'loss': {}, 'result': {}, 'config': {}} head_list = head_list if isinstance(head_list, (tuple, list)) else [head_list] head_list = [load_config(dataset_name='head', cfg_name=h) if isinstance(h, str) else h for h in head_list] if verbose: print('\n\n==== arch output') for head_cfg in head_list: if verbose: log_config(head_cfg) # if self.config.debug: # print_dict(self.inputs) with tf.variable_scope(f'output/{head_cfg.head_n}'): head_rst = apply_head_ops(self.inputs, head_cfg, self.config, self.is_training) if verbose: print_dict(head_rst) # loss head_k = head_cfg.task if head_cfg.task else head_cfg.head_n # head for specified task, or head_n as key by default loss_keys = ['loss',] for k in loss_keys: head_rst_d = head_rst[k] if isinstance(head_rst[k], dict) else {head_k: head_rst[k]} # use returned dict if provided joint = head_dict[k].keys() & head_rst_d.keys() assert len(joint) == 0, f'head rst {k} has overlapping keys {joint}' head_dict[k].update(head_rst_d) # result rst_keys = ['logits', 'probs', 'labels',] head_rst_d = {k: head_rst[k] for k in head_rst if k not in loss_keys} assert head_cfg.head_n not in head_dict['result'], f'duplicate head {head_cfg.head_n} in dict' assert set(head_rst_d.keys()).issuperset(set(rst_keys)), f'must include keys {rst_keys}, but given {head_rst_d.keys()}' head_dict['result'][head_cfg.head_n] = head_rst_d if head_k and head_k != head_cfg.head_n: # get the task head - flat & overridable if head_k in head_dict['result']: warnings.warn(f'duplicate task head {head_k} in dict, override by {head_cfg.head_n}') head_dict['result'][head_k] = {k: head_rst_d[k][head_k] if isinstance(head_rst_d[k], dict) else head_rst_d[k] for k in head_rst_d} # config head_dict['config'][head_cfg.head_n] = head_cfg head_dict['config'][head_k] = head_cfg if verbose: print('\n\n') return head_dict @tf_scope def build_loss(self, scope=None, head_dict=None): # finalizing loss_dict if head_dict is None: head_dict = self.head_dict loss_dict = head_dict['loss'] sum_fn = tf.accumulate_n if len(self.config.gpu_devices) else tf.add_n # accumulate_n seems not working with cpu-only # get the collection, filtering by 'scope' l2_loss = tf.get_collection('weight_losses', scope) if l2_loss and self.config.optimizer not in ['adamW']: loss_dict['l2_loss'] = sum_fn(l2_loss, name='l2_loss') # L2 # sum total loss loss = sum_fn(list(loss_dict.values()), name='loss') # reconstruct loss dict - reorder & incldue total loss main_n = {'seg': ['S3DIS', 'ScanNet', 'Semantic3D', 'NPM3D', 'ShapeNet', 'PartNet', 'SensatUrban', 'SemanticKITTI']} main_n = {v: k for k, lst in main_n.items() for v in lst}[self.config.dataset] loss_dict = { 'loss': loss, # # should have one and only one 'main' loss # # TODO: may introduce cls & seg head at the same time? => each task a main? # main_n: loss_dict.pop(main_n), **loss_dict, } head_dict['loss'] = loss_dict return loss_dict class SceneSegModel(Model): def __init__(self, flat_inputs, is_training, config, scope=None, verbose=True): self.config = config self.is_training = is_training self.scope = scope self.verbose = verbose with tf.variable_scope('inputs'): self.inputs = self.get_inputs(flat_inputs) self.num_layers = config.num_layers self.labels = self.inputs['point_labels'] self.down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.stage_list = self.inputs['stage_list'] = {'down': self.down_list, 'up': self.up_list} self.head_dict = self.inputs['head_dict'] = {'loss': {}, 'result': {}, 'config': {}} for i, p in enumerate(self.inputs['points']): # fill points self.down_list[i]['p_out'] = p # up 0 = the most upsampled, num_layers-1 the upsampled pt from the most downsampled self.up_list[i]['p_out'] = p if i < self.num_layers - 1 else None if config.dense_by_conv: dense_layer.config = config with tf.variable_scope('model'): fdim = config.first_features_dim r = config.first_subsampling_dl * config.density_parameter features = self.inputs['features'] F = resnet_backbone(config, self.inputs, features, base_radius=r, base_fdim=fdim, bottleneck_ratio=config.bottleneck_ratio, depth=config.depth, is_training=is_training, init=config.init, weight_decay=config.weight_decay, activation_fn=config.activation_fn, bn=True, bn_momentum=config.bn_momentum, bn_eps=config.bn_eps) F_up, head = resnet_scene_segmentation_head(config, self.inputs, F, base_fdim=fdim, is_training=is_training, init=config.init, weight_decay=config.weight_decay, activation_fn=config.activation_fn, bn=True, bn_momentum=config.bn_momentum, bn_eps=config.bn_eps) for i, p in enumerate(self.inputs['points']): # fill features self.down_list[i]['f_out'] = F[i] # F_up reversed - 0 = the most upsampled, num_layers-1 the upsampled pt from the most downsampled self.up_list[i]['f_out'] = F_up[i] if i < len(F_up) else None self.up_list[-1] = self.down_list[-1] # align the most-downsampled layer if head is not None: latent, logits = head self.up_list[0]['latent'] = latent self.up_list[0]['logits'] = logits self.head_dict = self.build_head(self.config.arch_out, verbose=verbose) self.loss_dict = self.build_loss(scope) return class ModelBuilder(Model): def __init__(self, flat_inputs, is_training, config, scope=None, verbose=True): self.config = config self.is_training = is_training self.scope = scope # variable scope - potential sharing across devices (e.g. gpus) self.verbose = verbose with tf.variable_scope('inputs'): self.inputs = self.get_inputs(flat_inputs) self.num_layers = config.num_layers self.labels = self.inputs['point_labels'] with tf.variable_scope('model'): self.head_dict = self.build_model_plain_split() self.loss_dict = self.build_loss(scope=scope) return def build_model_plain_split(self): """ detect down-/up-sample via ops => architecture = [ops, ...] """ config = self.config self.down_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] self.up_list = [{'p_sample': None, 'f_sample': None, 'p_out': None, 'f_out': None} for i in range(self.num_layers)] if self.num_layers > 0 else self.down_list self.stage_list = {'down': self.down_list, 'up': self.up_list} self.head_dict = {'loss': {}, 'result': {}, 'config': {}} inputs = self.inputs inputs['stage_list'] = self.stage_list inputs['head_dict'] = self.head_dict # split arch: input -> main -> output if '__input__' in config.architecture and '__output__' in config.architecture: arch_in = config.architecture[:config.architecture.index('__input__')] arch_main = config.architecture[len(arch_in) + 1:config.architecture.index('__output__')] arch_out = config.architecture[config.architecture.index('__output__') + 1:] else: arch_in = config.arch_in arch_main = config.arch_main arch_out = config.arch_out assert len(arch_in) and len(arch_out), f'invalid split of architecture {config.architecture}' arch_in = [get_block_cfg(blk) if isinstance(blk, str) else blk for blk in arch_in] arch_main = [get_block_cfg(blk) if isinstance(blk, str) else blk for blk in arch_main] arch_out = [load_config(dataset_name='head', cfg_name=a) for a in arch_out] # arch input features = inputs['features'] self.prepare_points('', 0, inputs, config) arch_in_dims = config.arch_in_dims if config.arch_in_dims else [config.first_features_dim] * len(arch_in) if self.verbose: print(f'\n\n==== inputs') print_dict(inputs, prefix='\t', except_k=['stage_list']) print('\n\n==== arch input') for block_i, (block_cfg, d_out) in enumerate(zip(arch_in, arch_in_dims)): with tf.variable_scope(f'input/{block_cfg.name}_{block_i}'):
features = apply_block_ops(features, d_out, inputs, '', 0, block_cfg, config, self.is_training)
9
2023-10-13 08:03:07+00:00
16k
YingqingHe/ScaleCrafter-ptl
scripts/txt2img.py
[ { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n ra...
import argparse, os, sys import cv2 import torch import numpy as np import intel_extension_for_pytorch as ipex from omegaconf import OmegaConf from PIL import Image from tqdm import tqdm, trange from itertools import islice from einops import rearrange from torchvision.utils import make_grid from pytorch_lightning import seed_everything from torch import autocast from contextlib import nullcontext from imwatermark import WatermarkEncoder from ldm.util import instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from tiled_decode import tiled_vae_decoding
11,428
print(f"reading prompts from {opt.from_file}") with open(opt.from_file, "r") as f: data = f.read().splitlines() data = [p for p in data for i in range(opt.repeat)] data = list(chunk(data, batch_size)) sample_path = os.path.join(outpath, "samples") os.makedirs(sample_path, exist_ok=True) sample_count = 0 base_count = len(os.listdir(sample_path)) grid_count = len(os.listdir(outpath)) - 1 start_code = None if opt.fixed_code: start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) if opt.torchscript or opt.ipex: transformer = model.cond_stage_model.model unet = model.model.diffusion_model decoder = model.first_stage_model.decoder additional_context = torch.cpu.amp.autocast() if opt.bf16 else nullcontext() shape = [opt.C, opt.H // opt.f, opt.W // opt.f] if opt.bf16 and not opt.torchscript and not opt.ipex: raise ValueError('Bfloat16 is supported only for torchscript+ipex') if opt.bf16 and unet.dtype != torch.bfloat16: raise ValueError("Use configs/stable-diffusion/intel/ configs with bf16 enabled if " + "you'd like to use bfloat16 with CPU.") if unet.dtype == torch.float16 and device == torch.device("cpu"): raise ValueError("Use configs/stable-diffusion/intel/ configs for your model if you'd like to run it on CPU.") if opt.ipex: bf16_dtype = torch.bfloat16 if opt.bf16 else None transformer = transformer.to(memory_format=torch.channels_last) transformer = ipex.optimize(transformer, level="O1", inplace=True) unet = unet.to(memory_format=torch.channels_last) unet = ipex.optimize(unet, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) decoder = decoder.to(memory_format=torch.channels_last) decoder = ipex.optimize(decoder, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) if opt.torchscript: with torch.no_grad(), additional_context: # get UNET scripted if unet.use_checkpoint: raise ValueError("Gradient checkpoint won't work with tracing. " + "Use configs/stable-diffusion/intel/ configs for your model or disable checkpoint in your config.") img_in = torch.ones(2, 4, 96, 96, dtype=torch.float32) t_in = torch.ones(2, dtype=torch.int64) context = torch.ones(2, 77, 1024, dtype=torch.float32) scripted_unet = torch.jit.trace(unet, (img_in, t_in, context)) scripted_unet = torch.jit.optimize_for_inference(scripted_unet) print(type(scripted_unet)) model.model.scripted_diffusion_model = scripted_unet # get Decoder for first stage model scripted samples_ddim = torch.ones(1, 4, 96, 96, dtype=torch.float32) scripted_decoder = torch.jit.trace(decoder, (samples_ddim)) scripted_decoder = torch.jit.optimize_for_inference(scripted_decoder) print(type(scripted_decoder)) model.first_stage_model.decoder = scripted_decoder prompts = data[0] print("Running a forward pass to initialize optimizations") uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) with torch.no_grad(), additional_context: for _ in range(3): c = model.get_learned_conditioning(prompts) samples_ddim, _ = sampler.sample(S=5, conditioning=c, batch_size=batch_size, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) print("Running a forward pass for decoder") for _ in range(3): x_samples_ddim = model.decode_first_stage(samples_ddim) precision_scope = autocast if opt.precision=="autocast" or opt.bf16 else nullcontext with torch.no_grad(), \ precision_scope(opt.device), \ model.ema_scope(): all_samples = list() for n in trange(opt.n_iter, desc="Sampling"): for prompts in tqdm(data, desc="data"): uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] samples, _ = sampler.sample(S=opt.steps, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code, # redilation dilate=opt.dilate, dilate_tau=opt.dilate_tau, dilate_skip=opt.dilate_skip, progressive_dilate=opt.progressive_dilate ) if opt.tiled_decoding: bb,cc,hh,ww = samples.shape
sys.path.insert(0, os.getcwd()) torch.set_grad_enabled(False) def chunk(it, size): it = iter(it) return iter(lambda: tuple(islice(it, size)), ()) def load_model_from_config(config, ckpt, device=torch.device("cuda"), verbose=False): print(f"Loading model from {ckpt}") pl_sd = torch.load(ckpt, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") sd = pl_sd["state_dict"] model = instantiate_from_config(config.model) m, u = model.load_state_dict(sd, strict=False) if len(m) > 0 and verbose: print("missing keys:") print(m) if len(u) > 0 and verbose: print("unexpected keys:") print(u) if device == torch.device("cuda"): model.cuda() elif device == torch.device("cpu"): model.cpu() model.cond_stage_model.device = "cpu" else: raise ValueError(f"Incorrect device name. Received: {device}") model.eval() return model def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--prompt", type=str, nargs="?", default="a professional photograph of an astronaut riding a triceratops", help="the prompt to render" ) parser.add_argument( "--outdir", type=str, nargs="?", help="dir to write results to", default="outputs/txt2img-samples" ) parser.add_argument( "--steps", type=int, default=50, help="number of ddim sampling steps", ) parser.add_argument( "--plms", action='store_true', help="use plms sampling", ) parser.add_argument( "--dpm", action='store_true', help="use DPM (2) sampler", ) parser.add_argument( "--fixed_code", action='store_true', help="if enabled, uses the same starting code across all samples ", ) parser.add_argument( "--ddim_eta", type=float, default=0.0, help="ddim eta (eta=0.0 corresponds to deterministic sampling", ) parser.add_argument( "--n_iter", type=int, default=3, help="sample this often", ) parser.add_argument( "--H", type=int, default=512, help="image height, in pixel space", ) parser.add_argument( "--W", type=int, default=512, help="image width, in pixel space", ) parser.add_argument( "--C", type=int, default=4, help="latent channels", ) parser.add_argument( "--f", type=int, default=8, help="downsampling factor, most often 8 or 16", ) parser.add_argument( "--n_samples", type=int, default=3, help="how many samples to produce for each given prompt. A.k.a batch size", ) parser.add_argument( "--n_rows", type=int, default=0, help="rows in the grid (default: n_samples)", ) parser.add_argument( "--scale", type=float, default=9.0, help="unconditional guidance scale: eps = eps(x, empty) + scale * (eps(x, cond) - eps(x, empty))", ) parser.add_argument( "--from-file", type=str, help="if specified, load prompts from this file, separated by newlines", ) parser.add_argument( "--config", type=str, default="configs/stable-diffusion/v2-inference.yaml", help="path to config which constructs model", ) parser.add_argument( "--ckpt", type=str, help="path to checkpoint of model", ) parser.add_argument( "--seed", type=int, default=42, help="the seed (for reproducible sampling)", ) parser.add_argument( "--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast" ) parser.add_argument( "--repeat", type=int, default=1, help="repeat each prompt in file this often", ) parser.add_argument( "--device", type=str, help="Device on which Stable Diffusion will be run", choices=["cpu", "cuda"], default="cpu" ) parser.add_argument( "--torchscript", action='store_true', help="Use TorchScript", ) parser.add_argument( "--ipex", action='store_true', help="Use Intel® Extension for PyTorch*", ) parser.add_argument( "--bf16", action='store_true', help="Use bfloat16", ) # redilation parser.add_argument( "--dilate", type=int, default=None, help="redilation factor", ) parser.add_argument( "--dilate_tau", type=int, default=None, help="timestep control, larger means more dilations", ) parser.add_argument( "--dilate_skip", type=int, default=None, help="layer control, larger means less dilations", ) parser.add_argument( "--progressive_dilate", action='store_true', help="Use progressive dilate", ) parser.add_argument( "--tiled_decoding", action='store_true', help="Use progressive dilate", ) parser.add_argument( "--overlap", type=int, default=24, help="length of overlapped regions", ) parser.add_argument( "--sync_gn", action='store_true', help="Use sync_gn", ) opt = parser.parse_args() return opt def put_watermark(img, wm_encoder=None): if wm_encoder is not None: img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR) img = wm_encoder.encode(img, 'dwtDct') img = Image.fromarray(img[:, :, ::-1]) return img def main(opt): seed_everything(opt.seed) config = OmegaConf.load(f"{opt.config}") device = torch.device("cuda") if opt.device == "cuda" else torch.device("cpu") if opt.tiled_decoding: config.model.params.first_stage_config.params.tiled = True if opt.sync_gn: config.model.params.first_stage_config.params.ddconfig.sync_gn = True model = load_model_from_config(config, f"{opt.ckpt}", device) if opt.plms: sampler = PLMSSampler(model, device=device) elif opt.dpm: sampler = DPMSolverSampler(model, device=device) else: sampler = DDIMSampler(model, device=device) os.makedirs(opt.outdir, exist_ok=True) outpath = opt.outdir print("Creating invisible watermark encoder (see https://github.com/ShieldMnt/invisible-watermark)...") wm = "SDV2" wm_encoder = WatermarkEncoder() wm_encoder.set_watermark('bytes', wm.encode('utf-8')) batch_size = opt.n_samples n_rows = opt.n_rows if opt.n_rows > 0 else batch_size if not opt.from_file: prompt = opt.prompt assert prompt is not None data = [batch_size * [prompt]] else: print(f"reading prompts from {opt.from_file}") with open(opt.from_file, "r") as f: data = f.read().splitlines() data = [p for p in data for i in range(opt.repeat)] data = list(chunk(data, batch_size)) sample_path = os.path.join(outpath, "samples") os.makedirs(sample_path, exist_ok=True) sample_count = 0 base_count = len(os.listdir(sample_path)) grid_count = len(os.listdir(outpath)) - 1 start_code = None if opt.fixed_code: start_code = torch.randn([opt.n_samples, opt.C, opt.H // opt.f, opt.W // opt.f], device=device) if opt.torchscript or opt.ipex: transformer = model.cond_stage_model.model unet = model.model.diffusion_model decoder = model.first_stage_model.decoder additional_context = torch.cpu.amp.autocast() if opt.bf16 else nullcontext() shape = [opt.C, opt.H // opt.f, opt.W // opt.f] if opt.bf16 and not opt.torchscript and not opt.ipex: raise ValueError('Bfloat16 is supported only for torchscript+ipex') if opt.bf16 and unet.dtype != torch.bfloat16: raise ValueError("Use configs/stable-diffusion/intel/ configs with bf16 enabled if " + "you'd like to use bfloat16 with CPU.") if unet.dtype == torch.float16 and device == torch.device("cpu"): raise ValueError("Use configs/stable-diffusion/intel/ configs for your model if you'd like to run it on CPU.") if opt.ipex: bf16_dtype = torch.bfloat16 if opt.bf16 else None transformer = transformer.to(memory_format=torch.channels_last) transformer = ipex.optimize(transformer, level="O1", inplace=True) unet = unet.to(memory_format=torch.channels_last) unet = ipex.optimize(unet, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) decoder = decoder.to(memory_format=torch.channels_last) decoder = ipex.optimize(decoder, level="O1", auto_kernel_selection=True, inplace=True, dtype=bf16_dtype) if opt.torchscript: with torch.no_grad(), additional_context: # get UNET scripted if unet.use_checkpoint: raise ValueError("Gradient checkpoint won't work with tracing. " + "Use configs/stable-diffusion/intel/ configs for your model or disable checkpoint in your config.") img_in = torch.ones(2, 4, 96, 96, dtype=torch.float32) t_in = torch.ones(2, dtype=torch.int64) context = torch.ones(2, 77, 1024, dtype=torch.float32) scripted_unet = torch.jit.trace(unet, (img_in, t_in, context)) scripted_unet = torch.jit.optimize_for_inference(scripted_unet) print(type(scripted_unet)) model.model.scripted_diffusion_model = scripted_unet # get Decoder for first stage model scripted samples_ddim = torch.ones(1, 4, 96, 96, dtype=torch.float32) scripted_decoder = torch.jit.trace(decoder, (samples_ddim)) scripted_decoder = torch.jit.optimize_for_inference(scripted_decoder) print(type(scripted_decoder)) model.first_stage_model.decoder = scripted_decoder prompts = data[0] print("Running a forward pass to initialize optimizations") uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) with torch.no_grad(), additional_context: for _ in range(3): c = model.get_learned_conditioning(prompts) samples_ddim, _ = sampler.sample(S=5, conditioning=c, batch_size=batch_size, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code) print("Running a forward pass for decoder") for _ in range(3): x_samples_ddim = model.decode_first_stage(samples_ddim) precision_scope = autocast if opt.precision=="autocast" or opt.bf16 else nullcontext with torch.no_grad(), \ precision_scope(opt.device), \ model.ema_scope(): all_samples = list() for n in trange(opt.n_iter, desc="Sampling"): for prompts in tqdm(data, desc="data"): uc = None if opt.scale != 1.0: uc = model.get_learned_conditioning(batch_size * [""]) if isinstance(prompts, tuple): prompts = list(prompts) c = model.get_learned_conditioning(prompts) shape = [opt.C, opt.H // opt.f, opt.W // opt.f] samples, _ = sampler.sample(S=opt.steps, conditioning=c, batch_size=opt.n_samples, shape=shape, verbose=False, unconditional_guidance_scale=opt.scale, unconditional_conditioning=uc, eta=opt.ddim_eta, x_T=start_code, # redilation dilate=opt.dilate, dilate_tau=opt.dilate_tau, dilate_skip=opt.dilate_skip, progressive_dilate=opt.progressive_dilate ) if opt.tiled_decoding: bb,cc,hh,ww = samples.shape
x_samples = tiled_vae_decoding(model, samples, window_size=hh//2, overlap=opt.overlap, sync_gn=opt.sync_gn)
4
2023-10-11 10:57:55+00:00
16k
bilibini/Lovely_Image_Downloader
py/Python38/site-packages/charset_normalizer/cd.py
[ { "identifier": "FREQUENCIES", "path": "py/Python38/site-packages/charset_normalizer/constant.py", "snippet": "FREQUENCIES: Dict[str, List[str]] = {\n \"English\": [\n \"e\",\n \"a\",\n \"t\",\n \"i\",\n \"o\",\n \"n\",\n \"s\",\n \"r\",\n ...
import importlib from codecs import IncrementalDecoder from collections import Counter from functools import lru_cache from typing import Counter as TypeCounter, Dict, List, Optional, Tuple from .constant import ( FREQUENCIES, KO_NAMES, LANGUAGE_SUPPORTED_COUNT, TOO_SMALL_SEQUENCE, ZH_NAMES, ) from .md import is_suspiciously_successive_range from .models import CoherenceMatches from .utils import ( is_accentuated, is_latin, is_multi_byte_encoding, is_unicode_range_secondary, unicode_range, )
11,252
if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
def encoding_unicode_range(iana_name: str) -> List[str]: """ Return associated unicode ranges in a single byte code page. """ if is_multi_byte_encoding(iana_name): raise IOError("Function not supported on multi-byte code page") decoder = importlib.import_module( "encodings.{}".format(iana_name) ).IncrementalDecoder p: IncrementalDecoder = decoder(errors="ignore") seen_ranges: Dict[str, int] = {} character_count: int = 0 for i in range(0x40, 0xFF): chunk: str = p.decode(bytes([i])) if chunk: character_range: Optional[str] = unicode_range(chunk) if character_range is None: continue if is_unicode_range_secondary(character_range) is False: if character_range not in seen_ranges: seen_ranges[character_range] = 0 seen_ranges[character_range] += 1 character_count += 1 return sorted( [ character_range for character_range in seen_ranges if seen_ranges[character_range] / character_count >= 0.15 ] ) def unicode_range_languages(primary_range: str) -> List[str]: """ Return inferred languages used with a unicode range. """ languages: List[str] = [] for language, characters in FREQUENCIES.items(): for character in characters: if unicode_range(character) == primary_range: languages.append(language) break return languages @lru_cache() def encoding_languages(iana_name: str) -> List[str]: """ Single-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ unicode_ranges: List[str] = encoding_unicode_range(iana_name) primary_range: Optional[str] = None for specified_range in unicode_ranges: if "Latin" not in specified_range: primary_range = specified_range break if primary_range is None: return ["Latin Based"] return unicode_range_languages(primary_range) @lru_cache() def mb_encoding_languages(iana_name: str) -> List[str]: """ Multi-byte encoding language association. Some code page are heavily linked to particular language(s). This function does the correspondence. """ if ( iana_name.startswith("shift_") or iana_name.startswith("iso2022_jp") or iana_name.startswith("euc_j") or iana_name == "cp932" ): return ["Japanese"] if iana_name.startswith("gb") or iana_name in ZH_NAMES: return ["Chinese"] if iana_name.startswith("iso2022_kr") or iana_name in KO_NAMES: return ["Korean"] return [] @lru_cache(maxsize=LANGUAGE_SUPPORTED_COUNT) def get_target_features(language: str) -> Tuple[bool, bool]: """ Determine main aspects from a supported language if it contains accents and if is pure Latin. """ target_have_accents: bool = False target_pure_latin: bool = True for character in FREQUENCIES[language]: if not target_have_accents and is_accentuated(character): target_have_accents = True if target_pure_latin and is_latin(character) is False: target_pure_latin = False return target_have_accents, target_pure_latin def alphabet_languages( characters: List[str], ignore_non_latin: bool = False ) -> List[str]: """ Return associated languages associated to given characters. """ languages: List[Tuple[str, float]] = [] source_have_accents = any(is_accentuated(character) for character in characters) for language, language_characters in FREQUENCIES.items(): target_have_accents, target_pure_latin = get_target_features(language) if ignore_non_latin and target_pure_latin is False: continue if target_have_accents is False and source_have_accents: continue character_count: int = len(language_characters) character_match_count: int = len( [c for c in language_characters if c in characters] ) ratio: float = character_match_count / character_count if ratio >= 0.2: languages.append((language, ratio)) languages = sorted(languages, key=lambda x: x[1], reverse=True) return [compatible_language[0] for compatible_language in languages] def characters_popularity_compare( language: str, ordered_characters: List[str] ) -> float: """ Determine if a ordered characters list (by occurrence from most appearance to rarest) match a particular language. The result is a ratio between 0. (absolutely no correspondence) and 1. (near perfect fit). Beware that is function is not strict on the match in order to ease the detection. (Meaning close match is 1.) """ if language not in FREQUENCIES: raise ValueError("{} not available".format(language)) character_approved_count: int = 0 FREQUENCIES_language_set = set(FREQUENCIES[language]) ordered_characters_count: int = len(ordered_characters) target_language_characters_count: int = len(FREQUENCIES[language]) large_alphabet: bool = target_language_characters_count > 26 for character, character_rank in zip( ordered_characters, range(0, ordered_characters_count) ): if character not in FREQUENCIES_language_set: continue character_rank_in_language: int = FREQUENCIES[language].index(character) expected_projection_ratio: float = ( target_language_characters_count / ordered_characters_count ) character_rank_projection: int = int(character_rank * expected_projection_ratio) if ( large_alphabet is False and abs(character_rank_projection - character_rank_in_language) > 4 ): continue if ( large_alphabet is True and abs(character_rank_projection - character_rank_in_language) < target_language_characters_count / 3 ): character_approved_count += 1 continue characters_before_source: List[str] = FREQUENCIES[language][ 0:character_rank_in_language ] characters_after_source: List[str] = FREQUENCIES[language][ character_rank_in_language: ] characters_before: List[str] = ordered_characters[0:character_rank] characters_after: List[str] = ordered_characters[character_rank:] before_match_count: int = len( set(characters_before) & set(characters_before_source) ) after_match_count: int = len( set(characters_after) & set(characters_after_source) ) if len(characters_before_source) == 0 and before_match_count <= 4: character_approved_count += 1 continue if len(characters_after_source) == 0 and after_match_count <= 4: character_approved_count += 1 continue if ( before_match_count / len(characters_before_source) >= 0.4 or after_match_count / len(characters_after_source) >= 0.4 ): character_approved_count += 1 continue return character_approved_count / len(ordered_characters) def alpha_unicode_split(decoded_sequence: str) -> List[str]: """ Given a decoded text sequence, return a list of str. Unicode range / alphabet separation. Ex. a text containing English/Latin with a bit a Hebrew will return two items in the resulting list; One containing the latin letters and the other hebrew. """ layers: Dict[str, str] = {} for character in decoded_sequence: if character.isalpha() is False: continue character_range: Optional[str] = unicode_range(character) if character_range is None: continue layer_target_range: Optional[str] = None for discovered_range in layers: if (
is_suspiciously_successive_range(discovered_range, character_range)
5
2023-10-11 09:08:57+00:00
16k
MTgeophysics/mtpy-v2
mtpy/modeling/occam1d/startup.py
[ { "identifier": "Occam1DData", "path": "mtpy/modeling/occam1d/data.py", "snippet": "class Occam1DData(object):\n \"\"\"\n reads and writes occam 1D data files\n\n ===================== =====================================================\n Attributes Description\n ===========...
from pathlib import Path from mtpy.modeling.occam1d import Occam1DData, Occam1DModel import time import numpy as np
12,107
self.startup_fn = None self.rough_type = 1 self.max_iter = 20 self.target_rms = 1 self.start_rho = 100 self.description = "1D_Occam_Inv" self.start_lagrange = 5.0 self.start_rough = 1.0e7 self.debug_level = 1 self.start_iter = 0 self.start_misfit = 100 self.min_max_bounds = None self.model_step = None self._startup_fn = "OccamStartup1D" self._ss = " " * 3 for key, value in kwargs.items(): setattr(self, key, value) @property def data_fn(self): return self._data_fn @data_fn.setter def data_fn(self, fn): if fn is not None: self._data_fn = Path(fn) else: self._data_fn = None @property def model_fn(self): return self._model_fn @model_fn.setter def model_fn(self, fn): if fn is not None: self._model_fn = Path(fn) else: self._model_fn = None def write_startup_file(self, save_path=None, **kwargs): """ Make a 1D input file for Occam 1D Arguments: --------- **savepath** : full path to save input file to, if just path then saved as savepath/input **model_fn** : full path to model file, if None then assumed to be in savepath/model.mod **data_fn** : full path to data file, if None then assumed to be in savepath/TE.dat or TM.dat **rough_type** : roughness type. *default* = 0 **max_iter** : maximum number of iterations. *default* = 20 **target_rms** : target rms value. *default* = 1.0 **start_rho** : starting resistivity value on linear scale. *default* = 100 **description** : description of the inversion. **start_lagrange** : starting Lagrange multiplier for smoothness. *default* = 5 **start_rough** : starting roughness value. *default* = 1E7 **debuglevel** : something to do with how Fortran debuggs the code Almost always leave at *default* = 1 **start_iter** : the starting iteration number, handy if the starting model is from a previous run. *default* = 0 **start_misfit** : starting misfit value. *default* = 100 Returns: -------- **Occam1D.inputfn** : full path to input file. :Example: :: >>> old = occam.Occam1D() >>> old.make1DdataFile('MT01',edipath=r"/home/Line1", >>> savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> mode='TE') >>> Wrote Data File: /home/Occam1D/Line1/Inv1_TE/MT01TE.dat >>> >>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> nlayers=50,bottomlayer=10000,z1layer=50) >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D >>> >>> old.make1DInputFile(rhostart=10,targetrms=1.5,maxiter=15) >>> Wrote Input File: /home/Occam1D/Line1/Inv1_TE/Input1D """ if save_path is not None: self.save_path = save_path if not self.save_path.is_dir(): self.save_path.mkdir() self.startup_fn = self.save_path.joinpath(self._startup_fn) # --> read data file if self.data_fn is None: raise IOError("Need to input data file name.") else: data = Occam1DData() data.read_data_file(self.data_fn) # --> read model file if self.model_fn is None: raise IOError("Need to input model file name.") else:
# -*- coding: utf-8 -*- """ Created on Mon Oct 30 13:32:42 2023 @author: jpeacock """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Occam1DStartup(object): """ read and write input files for Occam1D ====================== ==================================================== Attributes Description ====================== ==================================================== _ss string spacing _startup_fn basename of startup file *default* is OccamStartup1D data_fn full path to data file debug_level debug level *default* is 1 description description of inversion for your self *default* is 1D_Occam_Inv max_iter maximum number of iterations *default* is 20 model_fn full path to model file rough_type roughness type *default* is 1 save_path full path to save files to start_iter first iteration number *default* is 0 start_lagrange starting lagrange number on log scale *default* is 5 start_misfit starting misfit value *default* is 100 start_rho starting resistivity value (halfspace) in log scale *default* is 100 start_rough starting roughness (ignored by Occam1D) *default* is 1E7 startup_fn full path to startup file target_rms target rms *default* is 1.0 ====================== ==================================================== """ def __init__(self, data_fn=None, model_fn=None, **kwargs): self.data_fn = data_fn self.model_fn = model_fn if self.data_fn is not None: self.save_path = self.data_fn.parent elif self.model_fn is not None: self.save_path = self.model_fn.parent self.startup_fn = None self.rough_type = 1 self.max_iter = 20 self.target_rms = 1 self.start_rho = 100 self.description = "1D_Occam_Inv" self.start_lagrange = 5.0 self.start_rough = 1.0e7 self.debug_level = 1 self.start_iter = 0 self.start_misfit = 100 self.min_max_bounds = None self.model_step = None self._startup_fn = "OccamStartup1D" self._ss = " " * 3 for key, value in kwargs.items(): setattr(self, key, value) @property def data_fn(self): return self._data_fn @data_fn.setter def data_fn(self, fn): if fn is not None: self._data_fn = Path(fn) else: self._data_fn = None @property def model_fn(self): return self._model_fn @model_fn.setter def model_fn(self, fn): if fn is not None: self._model_fn = Path(fn) else: self._model_fn = None def write_startup_file(self, save_path=None, **kwargs): """ Make a 1D input file for Occam 1D Arguments: --------- **savepath** : full path to save input file to, if just path then saved as savepath/input **model_fn** : full path to model file, if None then assumed to be in savepath/model.mod **data_fn** : full path to data file, if None then assumed to be in savepath/TE.dat or TM.dat **rough_type** : roughness type. *default* = 0 **max_iter** : maximum number of iterations. *default* = 20 **target_rms** : target rms value. *default* = 1.0 **start_rho** : starting resistivity value on linear scale. *default* = 100 **description** : description of the inversion. **start_lagrange** : starting Lagrange multiplier for smoothness. *default* = 5 **start_rough** : starting roughness value. *default* = 1E7 **debuglevel** : something to do with how Fortran debuggs the code Almost always leave at *default* = 1 **start_iter** : the starting iteration number, handy if the starting model is from a previous run. *default* = 0 **start_misfit** : starting misfit value. *default* = 100 Returns: -------- **Occam1D.inputfn** : full path to input file. :Example: :: >>> old = occam.Occam1D() >>> old.make1DdataFile('MT01',edipath=r"/home/Line1", >>> savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> mode='TE') >>> Wrote Data File: /home/Occam1D/Line1/Inv1_TE/MT01TE.dat >>> >>> old.make1DModelFile(savepath=r"/home/Occam1D/Line1/Inv1_TE", >>> nlayers=50,bottomlayer=10000,z1layer=50) >>> Wrote Model file: /home/Occam1D/Line1/Inv1_TE/Model1D >>> >>> old.make1DInputFile(rhostart=10,targetrms=1.5,maxiter=15) >>> Wrote Input File: /home/Occam1D/Line1/Inv1_TE/Input1D """ if save_path is not None: self.save_path = save_path if not self.save_path.is_dir(): self.save_path.mkdir() self.startup_fn = self.save_path.joinpath(self._startup_fn) # --> read data file if self.data_fn is None: raise IOError("Need to input data file name.") else: data = Occam1DData() data.read_data_file(self.data_fn) # --> read model file if self.model_fn is None: raise IOError("Need to input model file name.") else:
model = Occam1DModel()
1
2023-10-11 22:24:50+00:00
16k
Jacoo-ai/HIC-Yolov5
detect.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None, inplace=True, fuse=True):\n from models.yolo import Detect, Model\n\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ens...
import argparse import os import sys import cv2 import numpy as np import torch import torch.backends.cudnn as cudnn import onnxruntime import tensorflow as tf from pathlib import Path from models.experimental import attempt_load from utils.datasets import LoadImages, LoadStreams from utils.general import apply_classifier, check_img_size, check_imshow, check_requirements, check_suffix, colorstr, \ increment_path, non_max_suppression, print_args, save_one_box, scale_coords, set_logging, \ strip_optimizer, xyxy2xywh from utils.plots import Annotator, colors from utils.torch_utils import load_classifier, select_device, time_sync
11,167
t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --source path/to/img.jpg --weights yolov5s.pt --img 640 """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5m.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=640, # inference size (pixels) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith( ('rtsp://', 'rtmp://', 'http://', 'https://')) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Initialize set_logging() device = select_device(device) half &= device.type != 'cpu' # half precision only supported on CUDA # Load model w = str(weights[0] if isinstance(weights, list) else weights) classify, suffix, suffixes = False, Path(w).suffix.lower(), ['.pt', '.onnx', '.tflite', '.pb', ''] check_suffix(w, suffixes) # check weights have acceptable suffix pt, onnx, tflite, pb, saved_model = (suffix == x for x in suffixes) # backend booleans stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults if pt: model = torch.jit.load(w) if 'torchscript' in w else attempt_load(weights, map_location=device) stride = int(model.stride.max()) # model stride names = model.module.names if hasattr(model, 'module') else model.names # get class names if half: model.half() # to FP16 if classify: # second-stage classifier modelc = load_classifier(name='resnet50', n=2) # initialize modelc.load_state_dict(torch.load('resnet50.pt', map_location=device)['model']).to(device).eval() elif onnx: if dnn: # check_requirements(('opencv-python>=4.5.4',)) net = cv2.dnn.readNetFromONNX(w) else: check_requirements(('onnx', 'onnxruntime')) session = onnxruntime.InferenceSession(w, None) else: # TensorFlow models check_requirements(('tensorflow>=2.4.1',)) if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt def wrap_frozen_graph(gd, inputs, outputs): x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=""), []) # wrapped import return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs), tf.nest.map_structure(x.graph.as_graph_element, outputs)) graph_def = tf.Graph().as_graph_def() graph_def.ParseFromString(open(w, 'rb').read()) frozen_func = wrap_frozen_graph(gd=graph_def, inputs="x:0", outputs="Identity:0") elif saved_model: model = tf.keras.models.load_model(w) elif tflite: interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model interpreter.allocate_tensors() # allocate input_details = interpreter.get_input_details() # inputs output_details = interpreter.get_output_details() # outputs int8 = input_details[0]['dtype'] == np.uint8 # is TFLite quantized uint8 model imgsz = check_img_size(imgsz, s=stride) # check image size # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference if pt and device.type != 'cpu': model(torch.zeros(1, 3, *imgsz).to(device).type_as(next(model.parameters()))) # run once dt, seen = [0.0, 0.0, 0.0], 0 for path, img, im0s, vid_cap in dataset: t1 = time_sync() if onnx: img = img.astype('float32') else: img = torch.from_numpy(img).to(device) img = img.half() if half else img.float() # uint8 to fp16/32 img = img / 255.0 # 0 - 255 to 0.0 - 1.0 if len(img.shape) == 3: img = img[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference if pt: visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(img, augment=augment, visualize=visualize)[0] elif onnx: if dnn: net.setInput(img) pred = torch.tensor(net.forward()) else: pred = torch.tensor(session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})) else: # tensorflow model (tflite, pb, saved_model) imn = img.permute(0, 2, 3, 1).cpu().numpy() # image in numpy if pb: pred = frozen_func(x=tf.constant(imn)).numpy() elif saved_model: pred = model(imn, training=False).numpy() elif tflite: if int8: scale, zero_point = input_details[0]['quantization'] imn = (imn / scale + zero_point).astype(np.uint8) # de-scale interpreter.set_tensor(input_details[0]['index'], imn) interpreter.invoke() pred = interpreter.get_tensor(output_details[0]['index']) if int8: scale, zero_point = output_details[0]['quantization'] pred = (pred.astype(np.float32) - zero_point) * scale # re-scale pred[..., 0] *= imgsz[1] # x pred[..., 1] *= imgsz[0] # y pred[..., 2] *= imgsz[1] # w pred[..., 3] *= imgsz[0] # h pred = torch.tensor(pred) t3 = time_sync() dt[1] += t3 - t2 # NMS pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) if classify: pred = apply_classifier(pred, modelc, img, im0s) # Process predictions for i, det in enumerate(pred): # per image seen += 1 if webcam: # batch_size >= 1 p, s, im0, frame = path[i], f'{i}: ', im0s[i].copy(), dataset.count else: p, s, im0, frame = path, '', im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # img.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt s += '%gx%g ' % img.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale boxes from img_size to im0 size det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round() # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *xyxy, conf, cls in reversed(det): if save_txt: # Write to file xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add bbox to image c = int(cls) # integer class # label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') label = None annotator.box_label(xyxy, label, color=colors(c, True)) if save_crop: save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) # Print time (inference-only) print(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
strip_optimizer(weights) # update model (to fix SourceChangeWarning)
15
2023-10-12 08:52:01+00:00
16k
OmicsML/scDiff
scdiff/model.py
[ { "identifier": "Decoder", "path": "scdiff/modules/diffusion_model/decoder.py", "snippet": "class Decoder(nn.Module):\n def __init__(self, dim, out_dim, dropout=0., norm_type=\"layernorm\", num_layers=1, cond_num_dict=None,\n cond_emb_dim=None, cond_mask_ratio=0., act=\"gelu\", out_ac...
import warnings import anndata as ad import numpy as np import pandas as pd import torch import torch.nn as nn import torch.nn.functional as F import pytorch_lightning as pl from contextlib import contextmanager from functools import partial from einops.layers.torch import Rearrange from scipy.sparse import csr_matrix from torch.optim.lr_scheduler import LambdaLR from tqdm import tqdm from scdiff.modules.diffusion_model import Decoder, Embedder, Encoder from scdiff.evaluate import ( denoising_eval, evaluate_annotation, perturbation_eval, calculate_batch_r_squared, ) from scdiff.modules.ema import LitEma from scdiff.modules.layers.attention import BasicTransformerBlock from scdiff.modules.layers.basic import FeedForward from scdiff.modules.layers.scmodel import EmbeddingDict from scdiff.utils.diffusion import MaskedEncoderConditioner, timestep_embedding from scdiff.utils.diffusion import make_beta_schedule from scdiff.utils.misc import as_1d_vec, exists, count_params, instantiate_from_config from scdiff.utils.misc import default from scdiff.utils.modules import create_activation, create_norm from scdiff.utils.modules import extract_into_tensor, init_weights, mean_flat, noise_like
12,115
raise ValueError(f"Unknwon condition embedder type {cond_emb_type}") else: self.cond_embed = None self.encoder = Encoder(depth, decoder_embed_dim, decoder_num_heads, decoder_dim_head, dropout=dropout, cond_type=cond_type, cond_cat_input=cond_cat_input) # self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim)) self.decoder_embed_type = decoder_embed_type assert decoder_embed_type in ['linear', 'embedder', 'encoder'] if decoder_embed_type == 'linear': self.decoder_embed = nn.Linear(self.in_dim, decoder_embed_dim) elif decoder_embed_type == 'embedder': self.decoder_embed = Embedder(pretrained_gene_list, decoder_embed_dim, 'layernorm', dropout=dropout) elif decoder_embed_type == 'encoder': self.decoder_embed = self.embedder self.mask_decoder_conditioner = MaskedEncoderConditioner( decoder_embed_dim, mult=4, use_ratio=mask_dec_cond_ratio, use_se=mask_dec_cond_se, use_semlp=mask_dec_cond_semlp, concat=mask_dec_cond_concat, disable=not mask_dec_cond) self.decoder_norm = create_norm(norm_layer, decoder_embed_dim) self.decoder = Decoder(decoder_embed_dim, self.in_dim, dropout, post_cond_norm, post_cond_layers, post_cond_num_dict, act=activation, cond_emb_dim=decoder_embed_dim, cond_mask_ratio=post_cond_mask_ratio) # -------------------------------------------------------------------------- self.initialize_weights() def initialize_weights(self): # initialize linear and normalization layers self.apply(init_weights) # TODO: move to DDPM and get mask from there (masking is indepdent on forward)? def random_masking(self, x): # mask: 0 keep, 1 drop cell_mask_ratio = self.cell_mask_ratio feat_mask_ratio = self.feat_mask_ratio N, D = x.shape # batch, dim if self.mask_mode == "v1": x_masked = x.clone() # apply cell masking len_keep = int(N * (1 - cell_mask_ratio)) perm = np.random.permutation(N) idx_keep = perm[:len_keep] # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([N, D], device=x.device) mask[idx_keep] = 0 # apply feature masking on the remaining part if feat_mask_ratio > 0: if self.mask_strategy == 'random': feat_mask = mask[idx_keep] feat_mask[torch.rand(len_keep, D) <= feat_mask_ratio] = 1 mask[idx_keep] = feat_mask elif self.mask_strategy == 'none_pad': for i in idx_keep: row = x_masked[i] non_padding_idx = torch.nonzero(row - self.pad_value)[0] n_mask = int(len(non_padding_idx) * feat_mask_ratio) mask_idx = np.random.choice(non_padding_idx, n_mask, replace=False) mask[i][mask_idx] = 1 else: raise NotImplementedError(f'Unsupported mask strategy: {self.mask_strategy}') x_masked[mask.bool()] = self.mask_value elif self.mask_mode == "v2": if feat_mask_ratio != 0: warnings.warn( "v2 mask disregards feat_mask_ratio, which is currently " f"set to {feat_mask_ratio!r}.", UserWarning, stacklevel=2, ) mask_ratios = torch.rand(N, 1, device=x.device) mask_ratios[torch.rand(N) < self.cell_mask_ratio] = 1 mask = torch.rand_like(x) < mask_ratios x_masked = torch.zeros_like(x).masked_scatter(~mask, x) return x_masked, mask def forward_encoder(self, x, pe_input=None, input_gene_list=None, input_gene_idx=None): # embed input input_gene_list = default(input_gene_list, self.input_gene_list) input_gene_idx = default(input_gene_idx, self.input_gene_idx) x, gene_idx = self.embedder(x, pe_input, input_gene_list, input_gene_idx) if self.blocks is None: hist = [None] * self.depth elif self.encoder_type in ("mlpparallel", "ffnparallel"): hist = [self.post_encoder_layer(blk(x)) for blk in self.blocks] else: hist = [] for blk in self.blocks: # apply context encoder blocks x = blk(x) hist.append(self.post_encoder_layer(x)) return hist, gene_idx def forward_decoder(self, x, context_list, timesteps=None, pe_input=None, conditions=None, input_gene_list=None, input_gene_idx=None, aug_graph=None, return_latent=False, mask=None): # embed tokens if self.decoder_embed_type == 'linear': x = self.decoder_embed(x) else: input_gene_list = default(input_gene_list, self.input_gene_list) input_gene_idx = default(input_gene_idx, self.input_gene_idx) x, _ = self.decoder_embed(x, pe_input, input_gene_list, input_gene_idx) # apply masked conditioner x = self.mask_decoder_conditioner(x, mask) # calculate time embedding if timesteps is not None and not self.no_time_embed: timesteps = timesteps.repeat(x.shape[0]) if len(timesteps) == 1 else timesteps
""" Wild mixture of: https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/facebookresearch/mae/blob/efb2a8062c206524e35e47d04501ed4f544c0ae8 Thank you! """ RESCALE_FACTOR = np.log(1e4) class DiffusionModel(nn.Module): def __init__(self, pretrained_gene_list, input_gene_list=None, dropout=0., cell_mask_ratio=0.75, mask_context=True, encoder_type='stackffn', embed_dim=1024, depth=4, dim_head=64, num_heads=4, feat_mask_ratio=0., decoder_embed_dim=512, decoder_embed_type='linear', decoder_num_heads=4, decoder_dim_head=64, cond_dim=None, cond_tokens=1, cond_type='crossattn', cond_strategy='full_mix', cond_emb_type='linear', cond_num_dict=None, cond_mask_ratio=0.5, cond_cat_input=False, post_cond_num_dict=None, post_cond_layers=2, post_cond_norm='layernorm', post_cond_mask_ratio=0.0, norm_layer='layernorm', mlp_time_embed=False, no_time_embed=False, activation='gelu', mask_strategy='random', mask_mode='v1', mask_dec_cond=False, mask_dec_cond_ratio=False, mask_dec_cond_se=False, mask_dec_cond_semlp=False, mask_dec_cond_concat=False, mask_value=0, pad_value=0, decoder_mask=None, text_emb=None, text_emb_file=None, freeze_text_emb=True, text_proj_type='linear', text_proj_act=None, stackfnn_glu_flag=False, text_proj_hidden_dim=512, text_proj_num_layers=2, text_proj_norm=None, cond_emb_norm=None, num_perts=None, gears_flag=False, gears_hidden_size=64, gears_mode="single", gears_mlp_layers=2, gears_norm=None, num_go_gnn_layers=1): super().__init__() self.depth = depth # -------------------------------------------------------------------------- # MAE masking options self.cell_mask_ratio = cell_mask_ratio self.feat_mask_ratio = feat_mask_ratio self.mask_context = mask_context self.mask_mode = mask_mode self.mask_strategy = mask_strategy self.mask_value = mask_value self.pad_value = pad_value self.decoder_mask = decoder_mask # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE encoder specifics activation = create_activation(activation) # self.in_dim = len(input_gene_list) if input_gene_list is not None else len(pretrained_gene_list) self.in_dim = len(pretrained_gene_list) if pretrained_gene_list is not None else len(input_gene_list) self.pretrained_gene_list = pretrained_gene_list self.input_gene_list = input_gene_list pretrained_gene_index = dict(zip(self.pretrained_gene_list, list(range(len(self.pretrained_gene_list))))) self.input_gene_idx = torch.tensor([ pretrained_gene_index[o] for o in self.input_gene_list if o in pretrained_gene_index ]).long() if self.input_gene_list is not None else None assert embed_dim == decoder_embed_dim # XXX: this seems to be required for MAE (see forward dec)? full_embed_dim = embed_dim * cond_tokens self.post_encoder_layer = Rearrange('b (n d) -> b n d', n=cond_tokens, d=embed_dim) self.embedder = Embedder(pretrained_gene_list, full_embed_dim, 'layernorm', dropout=dropout) self.encoder_type = encoder_type if encoder_type == 'attn': self.blocks = nn.ModuleList([ BasicTransformerBlock(full_embed_dim, num_heads, dim_head, self_attn=True, cross_attn=False, dropout=dropout, qkv_bias=True, final_act=activation) for _ in range(depth)]) elif encoder_type in ('mlp', 'mlpparallel'): self.blocks = nn.ModuleList([ nn.Sequential( nn.Linear(full_embed_dim, full_embed_dim), activation, create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type in ('stackffn', 'ffnparallel'): self.blocks = nn.ModuleList([ # FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout) nn.Sequential( FeedForward(full_embed_dim, mult=4, glu=False, dropout=dropout), create_norm(norm_layer, full_embed_dim), ) for _ in range(depth)]) elif encoder_type == 'none': self.blocks = None else: raise ValueError(f'Unknown encoder type {encoder_type}') # self.encoder_proj = nn.Linear(full_embed_dim, latent_dim) # self.norm = create_norm(norm_layer, full_embed_dim) # -------------------------------------------------------------------------- # -------------------------------------------------------------------------- # MAE decoder specifics self.subset_output = True self.decoder_embed_dim = decoder_embed_dim self.time_embed = nn.Sequential( nn.Linear(decoder_embed_dim, 4 * decoder_embed_dim), nn.SiLU(), nn.Linear(4 * decoder_embed_dim, decoder_embed_dim), ) if mlp_time_embed else nn.Identity() self.no_time_embed = no_time_embed self.cond_type = cond_type assert cond_strategy in ("full_mix", "pre_mix") self.cond_strategy = cond_strategy self.cond_emb_type = cond_emb_type self.cond_tokens = cond_tokens self.cond_cat_input = cond_cat_input if cond_dim is not None or cond_num_dict is not None: if cond_emb_type == 'linear': assert cond_dim is not None self.cond_embed = nn.Sequential( nn.Linear(cond_dim, decoder_embed_dim * cond_tokens), Rearrange('b (n d) -> b n d', n=cond_tokens, d=decoder_embed_dim), ) elif cond_emb_type == 'embedding': assert cond_num_dict is not None self.cond_embed = EmbeddingDict(cond_num_dict, decoder_embed_dim, depth, cond_tokens, mask_ratio=cond_mask_ratio, text_emb=text_emb, text_emb_file=text_emb_file, norm_layer=cond_emb_norm, freeze_text_emb=freeze_text_emb, text_proj_type=text_proj_type, text_proj_num_layers=text_proj_num_layers, stackfnn_glu_flag=stackfnn_glu_flag, text_proj_hidden_dim=text_proj_hidden_dim, text_proj_act=text_proj_act, text_proj_norm=text_proj_norm, # text_proj_dropout=dropout, G_go=G_go, # G_go_weight=G_go_weight, num_perts=num_perts, text_proj_dropout=dropout, gears_flag=gears_flag, num_perts=num_perts, gears_hidden_size=gears_hidden_size, gears_mode=gears_mode, gears_mlp_layers=gears_mlp_layers, gears_norm=gears_norm, num_go_gnn_layers=num_go_gnn_layers) elif cond_emb_type == 'none': self.cond_embed = None else: raise ValueError(f"Unknwon condition embedder type {cond_emb_type}") else: self.cond_embed = None self.encoder = Encoder(depth, decoder_embed_dim, decoder_num_heads, decoder_dim_head, dropout=dropout, cond_type=cond_type, cond_cat_input=cond_cat_input) # self.mask_token = nn.Parameter(torch.zeros(1, decoder_embed_dim)) self.decoder_embed_type = decoder_embed_type assert decoder_embed_type in ['linear', 'embedder', 'encoder'] if decoder_embed_type == 'linear': self.decoder_embed = nn.Linear(self.in_dim, decoder_embed_dim) elif decoder_embed_type == 'embedder': self.decoder_embed = Embedder(pretrained_gene_list, decoder_embed_dim, 'layernorm', dropout=dropout) elif decoder_embed_type == 'encoder': self.decoder_embed = self.embedder self.mask_decoder_conditioner = MaskedEncoderConditioner( decoder_embed_dim, mult=4, use_ratio=mask_dec_cond_ratio, use_se=mask_dec_cond_se, use_semlp=mask_dec_cond_semlp, concat=mask_dec_cond_concat, disable=not mask_dec_cond) self.decoder_norm = create_norm(norm_layer, decoder_embed_dim) self.decoder = Decoder(decoder_embed_dim, self.in_dim, dropout, post_cond_norm, post_cond_layers, post_cond_num_dict, act=activation, cond_emb_dim=decoder_embed_dim, cond_mask_ratio=post_cond_mask_ratio) # -------------------------------------------------------------------------- self.initialize_weights() def initialize_weights(self): # initialize linear and normalization layers self.apply(init_weights) # TODO: move to DDPM and get mask from there (masking is indepdent on forward)? def random_masking(self, x): # mask: 0 keep, 1 drop cell_mask_ratio = self.cell_mask_ratio feat_mask_ratio = self.feat_mask_ratio N, D = x.shape # batch, dim if self.mask_mode == "v1": x_masked = x.clone() # apply cell masking len_keep = int(N * (1 - cell_mask_ratio)) perm = np.random.permutation(N) idx_keep = perm[:len_keep] # generate the binary mask: 0 is keep, 1 is remove mask = torch.ones([N, D], device=x.device) mask[idx_keep] = 0 # apply feature masking on the remaining part if feat_mask_ratio > 0: if self.mask_strategy == 'random': feat_mask = mask[idx_keep] feat_mask[torch.rand(len_keep, D) <= feat_mask_ratio] = 1 mask[idx_keep] = feat_mask elif self.mask_strategy == 'none_pad': for i in idx_keep: row = x_masked[i] non_padding_idx = torch.nonzero(row - self.pad_value)[0] n_mask = int(len(non_padding_idx) * feat_mask_ratio) mask_idx = np.random.choice(non_padding_idx, n_mask, replace=False) mask[i][mask_idx] = 1 else: raise NotImplementedError(f'Unsupported mask strategy: {self.mask_strategy}') x_masked[mask.bool()] = self.mask_value elif self.mask_mode == "v2": if feat_mask_ratio != 0: warnings.warn( "v2 mask disregards feat_mask_ratio, which is currently " f"set to {feat_mask_ratio!r}.", UserWarning, stacklevel=2, ) mask_ratios = torch.rand(N, 1, device=x.device) mask_ratios[torch.rand(N) < self.cell_mask_ratio] = 1 mask = torch.rand_like(x) < mask_ratios x_masked = torch.zeros_like(x).masked_scatter(~mask, x) return x_masked, mask def forward_encoder(self, x, pe_input=None, input_gene_list=None, input_gene_idx=None): # embed input input_gene_list = default(input_gene_list, self.input_gene_list) input_gene_idx = default(input_gene_idx, self.input_gene_idx) x, gene_idx = self.embedder(x, pe_input, input_gene_list, input_gene_idx) if self.blocks is None: hist = [None] * self.depth elif self.encoder_type in ("mlpparallel", "ffnparallel"): hist = [self.post_encoder_layer(blk(x)) for blk in self.blocks] else: hist = [] for blk in self.blocks: # apply context encoder blocks x = blk(x) hist.append(self.post_encoder_layer(x)) return hist, gene_idx def forward_decoder(self, x, context_list, timesteps=None, pe_input=None, conditions=None, input_gene_list=None, input_gene_idx=None, aug_graph=None, return_latent=False, mask=None): # embed tokens if self.decoder_embed_type == 'linear': x = self.decoder_embed(x) else: input_gene_list = default(input_gene_list, self.input_gene_list) input_gene_idx = default(input_gene_idx, self.input_gene_idx) x, _ = self.decoder_embed(x, pe_input, input_gene_list, input_gene_idx) # apply masked conditioner x = self.mask_decoder_conditioner(x, mask) # calculate time embedding if timesteps is not None and not self.no_time_embed: timesteps = timesteps.repeat(x.shape[0]) if len(timesteps) == 1 else timesteps
time_embed = self.time_embed(timestep_embedding(timesteps, self.decoder_embed_dim))
12
2023-10-13 14:20:34+00:00
16k
weavel-ai/promptmodel-python
promptmodel/chat_model.py
[ { "identifier": "DevClient", "path": "promptmodel/dev_app.py", "snippet": "class DevClient:\n \"\"\"DevClient main class\"\"\"\n\n def __init__(self):\n self.function_models: List[FunctionModelInterface] = []\n self.chat_models: List[ChatModelInterface] = []\n\n def register(self,...
from dataclasses import dataclass from typing import Any, Dict, List, Optional, Coroutine, Union from uuid import uuid4 from litellm import ModelResponse from promptmodel import DevClient from promptmodel.llms.llm_proxy import LLMProxy from promptmodel.utils import logger from promptmodel.utils.config_utils import ( read_config, upsert_config, check_connection_status_decorator, ) from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.types.response import LLMStreamResponse, LLMResponse, ChatModelConfig from promptmodel.types.enums import InstanceType from promptmodel.types.request import ChatLogRequest from promptmodel.apis.base import AsyncAPIClient import sys
12,376
# Find an instance of Client among global variables for var_name, var_val in global_vars.items(): if isinstance(var_val, DevClient): return var_val return None class ChatModel(metaclass=RegisteringMeta): """ Args: name (_type_): _description_ version (Optional[ Union[str, int] ], optional): Choose which FunctionModel version to use. Defaults to "deploy". It can be "deploy", "latest", or version number. api_key (Optional[str], optional): API key for the LLM. Defaults to None. If None, use api_key in .env file. """ def __init__( self, name, session_uuid: str = None, version: Optional[Union[str, int]] = "deploy", api_key: Optional[str] = None, ): self.name = name self.api_key = api_key self.llm_proxy = LLMProxy(name, version) self.version = version self.recent_log_uuid = None if session_uuid is None: self.session_uuid = str(uuid4()) instruction, version_details, chat_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, None, version) ) config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return else: run_async_in_sync( self.llm_proxy._async_make_session_cloud( self.session_uuid, version_details["uuid"], ) ) else: self.session_uuid = session_uuid @check_connection_status_decorator def get_config( self, *args, **kwargs, ) -> ChatModelConfig: """Get config for the ChatModel. It will fetch the published prompt and version config from the Cloud. (It will be saved in cache DB, so there is no extra latency for API call.) - If you made A/B testing in Web Dashboard, it will fetch the prompt randomly by the A/B testing ratio. If dev mode is initializing, it will return None Returns: ChatModelConfig: config for the ChatModel, which contains prompts and version_detail, message_logs """ prompt, version_detail, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, self.session_uuid, self.version) ) return ChatModelConfig( system_prompt=prompt, model=version_detail["model"], name=self.name, version_uuid=str(version_detail["uuid"]), version=version_detail["version"], message_logs=message_logs, ) @check_connection_status_decorator def add_messages( self, new_messages: List[Dict[str, Any]], metadata_list: List[Optional[Dict]] = [], *args, **kwargs, ) -> None: """Add messages to the chat model. Args: new_messages (List[Dict[str, Any]]): list of messages. Each message is a dict with 'role', 'content', and 'function_call'. """ # Save messages to Cloud DB log_uuid_list = [str(uuid4()) for _ in range(len(new_messages))] run_async_in_sync( self.llm_proxy._async_chat_log_to_cloud( session_uuid=str(self.session_uuid), version_uuid=None, chat_log_request_list=[ ChatLogRequest(**{"message": message, "uuid": str(uuid4())}) for message in new_messages ], ) ) self.recent_log_uuid = log_uuid_list[-1] @check_connection_status_decorator def run( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs,
from __future__ import annotations class RegisteringMeta(type): def __call__(cls, *args, **kwargs): instance: ChatModel = super().__call__(*args, **kwargs) # Find the global client instance in the current context client = cls.find_client_instance() if client is not None: client.register_chat_model(instance.name) return instance @staticmethod def find_client_instance(): # Get the current frame frame = sys._getframe(2) # Get global variables in the current frame global_vars = frame.f_globals # Find an instance of Client among global variables for var_name, var_val in global_vars.items(): if isinstance(var_val, DevClient): return var_val return None class ChatModel(metaclass=RegisteringMeta): """ Args: name (_type_): _description_ version (Optional[ Union[str, int] ], optional): Choose which FunctionModel version to use. Defaults to "deploy". It can be "deploy", "latest", or version number. api_key (Optional[str], optional): API key for the LLM. Defaults to None. If None, use api_key in .env file. """ def __init__( self, name, session_uuid: str = None, version: Optional[Union[str, int]] = "deploy", api_key: Optional[str] = None, ): self.name = name self.api_key = api_key self.llm_proxy = LLMProxy(name, version) self.version = version self.recent_log_uuid = None if session_uuid is None: self.session_uuid = str(uuid4()) instruction, version_details, chat_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, None, version) ) config = read_config() if ( "connection" in config and "initializing" in config["connection"] and config["connection"]["initializing"] == True ): return elif ( "connection" in config and "reloading" in config["connection"] and config["connection"]["reloading"] == True ): return else: run_async_in_sync( self.llm_proxy._async_make_session_cloud( self.session_uuid, version_details["uuid"], ) ) else: self.session_uuid = session_uuid @check_connection_status_decorator def get_config( self, *args, **kwargs, ) -> ChatModelConfig: """Get config for the ChatModel. It will fetch the published prompt and version config from the Cloud. (It will be saved in cache DB, so there is no extra latency for API call.) - If you made A/B testing in Web Dashboard, it will fetch the prompt randomly by the A/B testing ratio. If dev mode is initializing, it will return None Returns: ChatModelConfig: config for the ChatModel, which contains prompts and version_detail, message_logs """ prompt, version_detail, message_logs = run_async_in_sync( LLMProxy.fetch_chat_model(self.name, self.session_uuid, self.version) ) return ChatModelConfig( system_prompt=prompt, model=version_detail["model"], name=self.name, version_uuid=str(version_detail["uuid"]), version=version_detail["version"], message_logs=message_logs, ) @check_connection_status_decorator def add_messages( self, new_messages: List[Dict[str, Any]], metadata_list: List[Optional[Dict]] = [], *args, **kwargs, ) -> None: """Add messages to the chat model. Args: new_messages (List[Dict[str, Any]]): list of messages. Each message is a dict with 'role', 'content', and 'function_call'. """ # Save messages to Cloud DB log_uuid_list = [str(uuid4()) for _ in range(len(new_messages))] run_async_in_sync( self.llm_proxy._async_chat_log_to_cloud( session_uuid=str(self.session_uuid), version_uuid=None, chat_log_request_list=[ ChatLogRequest(**{"message": message, "uuid": str(uuid4())}) for message in new_messages ], ) ) self.recent_log_uuid = log_uuid_list[-1] @check_connection_status_decorator def run( self, functions: Optional[List[Dict[str, Any]]] = None, tools: Optional[List[Dict[str, Any]]] = None, stream: Optional[bool] = False, *args, **kwargs,
) -> LLMResponse:
8
2023-10-09 03:35:44+00:00
16k
cambridgeltl/ClaPS
run_prune_search.py
[ { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: L...
import random import numpy as np import json import argparse import os import torch import logging from tqdm import tqdm from transformers import AutoTokenizer, set_seed from rewards.text_classification_reward import PromptedClassificationReward from utils.fsc_datasets import PromptedClassificationDataset from algs.genetics import GeneticAlgorithmTrainer, Genetics from algs.particle_swarm import ParticleSwarmOptimizer from algs.greedy import GreedyTrainer
13,773
logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def remove_special_token(text: str, special_token: str) -> str: return text.replace(special_token, "") def find_kl_dict(args, data, vocab, obj_func, prompted_dataset): premise_texts, hypothesis_texts, class_labels = prompted_dataset.get_data(data) if args["prune_type"] == "kl": default_probs = obj_func.compute_default_kl( premise_texts, hypothesis_texts, class_labels, "", True ) else: default_probs = obj_func.compute_default_reward( premise_texts, hypothesis_texts, class_labels, "", True ) collect_kl = [] kl_dict = {} for v, k in tqdm(vocab.items()): if args["prune_type"] == "kl": kl = obj_func.compute_kl( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) else: kl = obj_func.compute_reward_diff( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) collect_kl.append(kl) kl_dict[v] = kl for k, v in kl_dict.items(): kl_dict[k] = float(v) with open(args["dict_path"], "w") as fp: json.dump(kl_dict, fp, indent=4, ensure_ascii=False) collect_kl_np = [] for tensor in collect_kl: collect_kl_np.append(tensor.cpu().numpy()) return kl_dict, collect_kl_np def load_kl_dict(args): # load the KL dict from json file with open(args["dict_path"], "r") as fp: kl_dict = json.load(fp) collect_kl_np = [] for k, v in kl_dict.items(): collect_kl_np.append(v) return kl_dict, collect_kl_np def load_vocab(args): with open(args["vocab_path"], "r") as fp: vocab = json.load(fp) vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) return vocab, vocab_key, vocab_id def action_set_pruning(args, kl_dict, collect_kl_np, vocab): if not args["random_prune"]: collect_kl_np = np.array(collect_kl_np) top_10_percent = np.percentile(collect_kl_np, args["percentile"]) # filter the vocab based on the top_10_percent_idx new_vocab = { word: vocab[word] for word, value in kl_dict.items() if value > top_10_percent } vocab = new_vocab vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) logger.info(len(vocab_key)) else: # random select 10% of the vocab vocab, vocab_key, vocab_id = random_pruning(args, vocab, args["percentile"]) logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def random_pruning(args, vocab: dict, percent: int = 99): vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) length = int(len(vocab_key) * (100 - percent) / 100) pruned_index = random.sample(list(np.arange(len(vocab_key))), length) vocab_key = [vocab_key[i] for i in pruned_index] vocab_id = [vocab_id[i] for i in pruned_index] vocab = {vocab_key[i]: vocab_id[i] for i in range(len(vocab_key))} logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def main(args): print(args) set_seed(args["seed"]) revocab_flag = args["reprune_vocab"] shots = args["num_shots"] batch_size = args["train_batch_size"] args["is_mask_lm"] = False special_space = "▁" if "bert" in args["model_name"]: args["is_mask_lm"] = True special_space = "Ġ" logging.info("......Loading dataset......")
logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def remove_special_token(text: str, special_token: str) -> str: return text.replace(special_token, "") def find_kl_dict(args, data, vocab, obj_func, prompted_dataset): premise_texts, hypothesis_texts, class_labels = prompted_dataset.get_data(data) if args["prune_type"] == "kl": default_probs = obj_func.compute_default_kl( premise_texts, hypothesis_texts, class_labels, "", True ) else: default_probs = obj_func.compute_default_reward( premise_texts, hypothesis_texts, class_labels, "", True ) collect_kl = [] kl_dict = {} for v, k in tqdm(vocab.items()): if args["prune_type"] == "kl": kl = obj_func.compute_kl( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) else: kl = obj_func.compute_reward_diff( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) collect_kl.append(kl) kl_dict[v] = kl for k, v in kl_dict.items(): kl_dict[k] = float(v) with open(args["dict_path"], "w") as fp: json.dump(kl_dict, fp, indent=4, ensure_ascii=False) collect_kl_np = [] for tensor in collect_kl: collect_kl_np.append(tensor.cpu().numpy()) return kl_dict, collect_kl_np def load_kl_dict(args): # load the KL dict from json file with open(args["dict_path"], "r") as fp: kl_dict = json.load(fp) collect_kl_np = [] for k, v in kl_dict.items(): collect_kl_np.append(v) return kl_dict, collect_kl_np def load_vocab(args): with open(args["vocab_path"], "r") as fp: vocab = json.load(fp) vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) return vocab, vocab_key, vocab_id def action_set_pruning(args, kl_dict, collect_kl_np, vocab): if not args["random_prune"]: collect_kl_np = np.array(collect_kl_np) top_10_percent = np.percentile(collect_kl_np, args["percentile"]) # filter the vocab based on the top_10_percent_idx new_vocab = { word: vocab[word] for word, value in kl_dict.items() if value > top_10_percent } vocab = new_vocab vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) logger.info(len(vocab_key)) else: # random select 10% of the vocab vocab, vocab_key, vocab_id = random_pruning(args, vocab, args["percentile"]) logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def random_pruning(args, vocab: dict, percent: int = 99): vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) length = int(len(vocab_key) * (100 - percent) / 100) pruned_index = random.sample(list(np.arange(len(vocab_key))), length) vocab_key = [vocab_key[i] for i in pruned_index] vocab_id = [vocab_id[i] for i in pruned_index] vocab = {vocab_key[i]: vocab_id[i] for i in range(len(vocab_key))} logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def main(args): print(args) set_seed(args["seed"]) revocab_flag = args["reprune_vocab"] shots = args["num_shots"] batch_size = args["train_batch_size"] args["is_mask_lm"] = False special_space = "▁" if "bert" in args["model_name"]: args["is_mask_lm"] = True special_space = "Ġ" logging.info("......Loading dataset......")
prompt_dataset = PromptedClassificationDataset(args)
1
2023-10-08 12:39:44+00:00
16k
clessig/atmorep
atmorep/core/atmorep_model.py
[ { "identifier": "identity", "path": "atmorep/utils/utils.py", "snippet": "def identity( func, *args) :\n return func( *args)" }, { "identifier": "NetMode", "path": "atmorep/utils/utils.py", "snippet": "class NetMode( Enum) :\n indeterminate = 0\n train = 1\n test = 2" }, { "i...
import torch import numpy as np import code import atmorep.utils.utils as utils from atmorep.utils.utils import identity from atmorep.utils.utils import NetMode from atmorep.utils.utils import get_model_filename from atmorep.transformer.transformer_base import prepare_token from atmorep.transformer.transformer_base import checkpoint_wrapper from atmorep.datasets.multifield_data_sampler import MultifieldDataSampler from atmorep.transformer.transformer_encoder import TransformerEncoder from atmorep.transformer.transformer_decoder import TransformerDecoder from atmorep.transformer.tail_ensemble import TailEnsemble
11,795
def create( self, devices, load_pretrained=True) : '''Create network''' cf = self.cf self.devices = devices size_token_info = 6 self.fields_coupling_idx = [] self.fields_index = {} for ifield, field_info in enumerate(cf.fields) : self.fields_index[ field_info[0] ] = ifield # # embedding network for global/auxiliary token infos # TODO: only for backward compatibility, remove self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net) torch.nn.init.constant_( self.embed_token_info.weight, 0.0) self.embeds_token_info = torch.nn.ModuleList() for ifield, field_info in enumerate( cf.fields) : self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)) if len(field_info[1]) > 4 and load_pretrained : # TODO: inconsistent with embeds_token_info -> version that can handle both # we could imply use the file name: embed_token_info vs embeds_token_info name = 'AtmoRep' + '_embed_token_info' mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1])) self.embeds_token_info[-1].load_state_dict( mloaded) print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) ) else : # initalization torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0) self.embeds_token_info[-1].bias.data.fill_(0.0) # embedding and encoder self.embeds = torch.nn.ModuleList() self.encoders = torch.nn.ModuleList() self.masks = torch.nn.ParameterList() for field_idx, field_info in enumerate(cf.fields) : # learnabl class token if cf.learnable_mask : mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True)) self.masks.append( mask.to(devices[0])) else : self.masks.append( None) # encoder self.encoders.append( TransformerEncoder( cf, field_idx, True).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'encoder', self.encoders[-1]) self.embeds.append( self.encoders[-1].embed) # indices of coupled fields for efficient access in forward self.fields_coupling_idx.append( [field_idx]) for field_coupled in field_info[1][2] : if 'axial' in cf.encoder_att_type : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) else : for _ in range(cf.coupling_num_heads_per_field) : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) # decoder self.decoders = torch.nn.ModuleList() self.field_pred_idxs = [] for field in cf.fields_prediction : for ifield, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.field_pred_idxs.append( ifield) break self.decoders.append( TransformerDecoder( cf, field_info ) ) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'decoder', self.decoders[-1]) # tail networks self.tails = torch.nn.ModuleList() for ifield, field in enumerate(cf.fields_prediction) : field_idx = self.field_pred_idxs[ifield] field_info = cf.fields[field_idx] self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained: self.load_block( field_info, 'tail', self.tails[-1]) # set devices for field_idx, field_info in enumerate(cf.fields) : # find determined device, use default if nothing specified device = self.devices[0] if len(field_info[1]) > 3 : assert field_info[1][3] < 4, 'Only single node model parallelism supported' assert field_info[1][3] < len(devices), 'Per field device id larger than max devices' device = self.devices[ field_info[1][3] ] # set device if self.masks[field_idx] != None : self.masks[field_idx].to(device) self.embeds[field_idx].to(device) self.encoders[field_idx].to(device) for field_idx, field in enumerate(cf.fields_prediction) : field_info = cf.fields[ self.field_pred_idxs[field_idx] ] device = self.devices[0] if len(field_info[1]) > 3 : device = self.devices[ field_info[1][3] ] self.decoders[field_idx].to(device) self.tails[field_idx].to(device) # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove self.embeds_token_info.to(devices[0])
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class AtmoRepData( torch.nn.Module) : def __init__( self, net) : '''Wrapper class for AtmoRep that handles data loading''' super( AtmoRepData, self).__init__() self.data_loader_test = None self.data_loader_train = None self.data_loader_iter = None self.net = net # ensure that all data loaders have the same seed and hence load the same data self.rng_seed = net.cf.rng_seed if not self.rng_seed : self.rng_seed = int(torch.randint( 100000000, (1,))) ################################################### def load_data( self, mode : NetMode, batch_size = -1, num_loader_workers = -1) : '''Load data''' cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_max if num_loader_workers < 0 : num_loader_workers = cf.num_loader_workers if mode == NetMode.train : self.data_loader_train = self._load_data( self.dataset_train, batch_size, num_loader_workers) elif mode == NetMode.test : batch_size = cf.batch_size_test self.data_loader_test = self._load_data( self.dataset_test, batch_size, num_loader_workers) else : assert False ################################################### def _load_data( self, dataset, batch_size, num_loader_workers) : '''Private implementation for load''' dataset.load_data( batch_size) loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, 'num_workers': num_loader_workers, 'pin_memory': True} data_loader = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) return data_loader ################################################### def set_data( self, mode : NetMode, times_pos, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_data( times_pos, batch_size) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def set_global( self, mode : NetMode, times, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_global( times, batch_size, cf.token_overlap) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def set_location( self, mode : NetMode, pos, years, months, num_t_samples_per_month, batch_size = -1, num_loader_workers = -1) : cf = self.net.cf if batch_size < 0 : batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test dataset = self.dataset_train if mode == NetMode.train else self.dataset_test dataset.set_location( pos, years, months, num_t_samples_per_month, batch_size) self._set_data( dataset, mode, batch_size, num_loader_workers) ################################################### def _set_data( self, dataset, mode : NetMode, batch_size = -1, loader_workers = -1) : '''Private implementation for set_data, set_global''' cf = self.net.cf if loader_workers < 0 : loader_workers = cf.num_loader_workers loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, 'num_workers': loader_workers, 'pin_memory': True} if mode == NetMode.train : self.data_loader_train = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) elif mode == NetMode.test : self.data_loader_test = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) else : assert False ################################################### def normalizer( self, field, vl_idx) : if isinstance( field, str) : for fidx, field_info in enumerate(self.cf.fields) : if field == field_info[0] : break assert fidx < len(self.cf.fields), 'invalid field' normalizer = self.dataset_train.datasets[fidx].normalizer elif isinstance( field, int) : normalizer = self.dataset_train.datasets[field][vl_idx].normalizer else : assert False, 'invalid argument type (has to be index to cf.fields or field name)' return normalizer ################################################### def mode( self, mode : NetMode) : if mode == NetMode.train : self.data_loader_iter = iter(self.data_loader_train) self.net.train() elif mode == NetMode.test : self.data_loader_iter = iter(self.data_loader_test) self.net.eval() else : assert False self.cur_mode = mode ################################################### def len( self, mode : NetMode) : if mode == NetMode.train : return len(self.data_loader_train) elif mode == NetMode.test : return len(self.data_loader_test) else : assert False ################################################### def next( self) : return next(self.data_loader_iter) ################################################### def forward( self, xin) : pred = self.net.forward( xin) return pred ################################################### def get_attention( self, xin): #, field_idx) : attn = self.net.get_attention( xin) #, field_idx) return attn ################################################### def create( self, pre_batch, devices, create_net = True, pre_batch_targets = None, load_pretrained=True) : if create_net : self.net.create( devices, load_pretrained) self.pre_batch = pre_batch self.pre_batch_targets = pre_batch_targets cf = self.net.cf self.dataset_train = MultifieldDataSampler( cf.data_dir, cf.years_train, cf.fields, batch_size = cf.batch_size_start, num_t_samples = cf.num_t_samples, num_patches_per_t = cf.num_patches_per_t_train, num_load = cf.num_files_train, pre_batch = self.pre_batch, rng_seed = self.rng_seed, file_shape = cf.file_shape, smoothing = cf.data_smoothing, level_type = cf.level_type, file_format = cf.file_format, month = cf.month, time_sampling = cf.time_sampling, geo_range = cf.geo_range_sampling, fields_targets = cf.fields_targets, pre_batch_targets = self.pre_batch_targets ) self.dataset_test = MultifieldDataSampler( cf.data_dir, cf.years_test, cf.fields, batch_size = cf.batch_size_test, num_t_samples = cf.num_t_samples, num_patches_per_t = cf.num_patches_per_t_test, num_load = cf.num_files_test, pre_batch = self.pre_batch, rng_seed = self.rng_seed, file_shape = cf.file_shape, smoothing = cf.data_smoothing, level_type = cf.level_type, file_format = cf.file_format, month = cf.month, time_sampling = cf.time_sampling, geo_range = cf.geo_range_sampling, lat_sampling_weighted = cf.lat_sampling_weighted, fields_targets = cf.fields_targets, pre_batch_targets = self.pre_batch_targets ) return self #################################################################################################### class AtmoRep( torch.nn.Module) : def __init__(self, cf) : '''Constructor''' super( AtmoRep, self).__init__() self.cf = cf ################################################### def create( self, devices, load_pretrained=True) : '''Create network''' cf = self.cf self.devices = devices size_token_info = 6 self.fields_coupling_idx = [] self.fields_index = {} for ifield, field_info in enumerate(cf.fields) : self.fields_index[ field_info[0] ] = ifield # # embedding network for global/auxiliary token infos # TODO: only for backward compatibility, remove self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net) torch.nn.init.constant_( self.embed_token_info.weight, 0.0) self.embeds_token_info = torch.nn.ModuleList() for ifield, field_info in enumerate( cf.fields) : self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)) if len(field_info[1]) > 4 and load_pretrained : # TODO: inconsistent with embeds_token_info -> version that can handle both # we could imply use the file name: embed_token_info vs embeds_token_info name = 'AtmoRep' + '_embed_token_info' mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1])) self.embeds_token_info[-1].load_state_dict( mloaded) print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) ) else : # initalization torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0) self.embeds_token_info[-1].bias.data.fill_(0.0) # embedding and encoder self.embeds = torch.nn.ModuleList() self.encoders = torch.nn.ModuleList() self.masks = torch.nn.ParameterList() for field_idx, field_info in enumerate(cf.fields) : # learnabl class token if cf.learnable_mask : mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True)) self.masks.append( mask.to(devices[0])) else : self.masks.append( None) # encoder self.encoders.append( TransformerEncoder( cf, field_idx, True).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'encoder', self.encoders[-1]) self.embeds.append( self.encoders[-1].embed) # indices of coupled fields for efficient access in forward self.fields_coupling_idx.append( [field_idx]) for field_coupled in field_info[1][2] : if 'axial' in cf.encoder_att_type : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) else : for _ in range(cf.coupling_num_heads_per_field) : self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] ) # decoder self.decoders = torch.nn.ModuleList() self.field_pred_idxs = [] for field in cf.fields_prediction : for ifield, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.field_pred_idxs.append( ifield) break self.decoders.append( TransformerDecoder( cf, field_info ) ) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained : self.load_block( field_info, 'decoder', self.decoders[-1]) # tail networks self.tails = torch.nn.ModuleList() for ifield, field in enumerate(cf.fields_prediction) : field_idx = self.field_pred_idxs[ifield] field_info = cf.fields[field_idx] self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create()) # load pre-trained model if specified if len(field_info[1]) > 4 and load_pretrained: self.load_block( field_info, 'tail', self.tails[-1]) # set devices for field_idx, field_info in enumerate(cf.fields) : # find determined device, use default if nothing specified device = self.devices[0] if len(field_info[1]) > 3 : assert field_info[1][3] < 4, 'Only single node model parallelism supported' assert field_info[1][3] < len(devices), 'Per field device id larger than max devices' device = self.devices[ field_info[1][3] ] # set device if self.masks[field_idx] != None : self.masks[field_idx].to(device) self.embeds[field_idx].to(device) self.encoders[field_idx].to(device) for field_idx, field in enumerate(cf.fields_prediction) : field_info = cf.fields[ self.field_pred_idxs[field_idx] ] device = self.devices[0] if len(field_info[1]) > 3 : device = self.devices[ field_info[1][3] ] self.decoders[field_idx].to(device) self.tails[field_idx].to(device) # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove self.embeds_token_info.to(devices[0])
self.checkpoint = identity
0
2023-10-09 19:42:46+00:00
16k
NKI-AI/ahcore
ahcore/callbacks/wsi_metric_callback.py
[ { "identifier": "WriteH5Callback", "path": "ahcore/callbacks/h5_callback.py", "snippet": "class WriteH5Callback(Callback):\n def __init__(\n self,\n max_queue_size: int,\n max_concurrent_writers: int,\n dump_dir: Path,\n normalization_type: str = str(NormalizationTy...
import itertools import json import multiprocessing import time import pytorch_lightning as pl import torch from collections import namedtuple from multiprocessing.pool import Pool from pathlib import Path from typing import Any, Generator, Optional, cast from pytorch_lightning import Callback from ahcore.callbacks import WriteH5Callback from ahcore.lit_module import AhCoreLightningModule from ahcore.metrics import WSIMetricFactory from ahcore.readers import H5FileImageReader, StitchingMode from ahcore.utils.callbacks import _get_h5_output_filename, _ValidationDataset from ahcore.utils.data import DataDescription from ahcore.utils.io import get_logger from ahcore.utils.manifest import DataManager, ImageMetadata, fetch_image_metadata, get_mask_and_annotations_from_record
11,415
from __future__ import annotations logger = get_logger(__name__) class ComputeWsiMetricsCallback(Callback): def __init__(self, max_processes: int = 10, save_per_image: bool = True) -> None: """ Callback to compute metrics on whole-slide images. This callback is used to compute metrics on whole-slide images in separate processes. Parameters ---------- max_processes : int The maximum number of concurrent processes. """ self._data_description: Optional[DataDescription] = None self._reader = H5FileImageReader self._max_processes: int = max_processes self._dump_dir: Optional[Path] = None self._save_per_image = save_per_image self._filenames: dict[Path, Path] = {} self._wsi_metrics: WSIMetricFactory | None = None self._class_names: dict[int, str] = {} self._data_manager = None self._validate_filenames_gen = None self._model_name: str | None = None self._validate_metadata_gen: Generator[ImageMetadata, None, None] | None = None self._dump_list: list[dict[str, str]] = [] self._logger = get_logger(type(self).__name__) def setup( self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None, ) -> None:
from __future__ import annotations logger = get_logger(__name__) class ComputeWsiMetricsCallback(Callback): def __init__(self, max_processes: int = 10, save_per_image: bool = True) -> None: """ Callback to compute metrics on whole-slide images. This callback is used to compute metrics on whole-slide images in separate processes. Parameters ---------- max_processes : int The maximum number of concurrent processes. """ self._data_description: Optional[DataDescription] = None self._reader = H5FileImageReader self._max_processes: int = max_processes self._dump_dir: Optional[Path] = None self._save_per_image = save_per_image self._filenames: dict[Path, Path] = {} self._wsi_metrics: WSIMetricFactory | None = None self._class_names: dict[int, str] = {} self._data_manager = None self._validate_filenames_gen = None self._model_name: str | None = None self._validate_metadata_gen: Generator[ImageMetadata, None, None] | None = None self._dump_list: list[dict[str, str]] = [] self._logger = get_logger(type(self).__name__) def setup( self, trainer: pl.Trainer, pl_module: pl.LightningModule, stage: Optional[str] = None, ) -> None:
if not isinstance(pl_module, AhCoreLightningModule):
1
2023-10-14 18:04:12+00:00
16k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/cluster/_dbscan.py
[ { "identifier": "BaseEstimator", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/base.py", "snippet": "class BaseEstimator(_MetadataRequester):\n \"\"\"Base class for all estimators in scikit-learn.\n\n Notes\n -----\n All estimators should specify all the parameters that can be se...
import warnings import numpy as np from numbers import Integral, Real from scipy import sparse from ..base import BaseEstimator, ClusterMixin, _fit_context from ..metrics.pairwise import _VALID_METRICS from ..neighbors import NearestNeighbors from ..utils._param_validation import Interval, StrOptions from ..utils.validation import _check_sample_weight from ._dbscan_inner import dbscan_inner
11,087
eps : float, default=0.5 The maximum distance between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. min_samples : int, default=5 The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : str or callable, default='minkowski' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square during fit. X may be a :term:`sparse graph <sparse graph>`, in which case only "nonzero" elements may be considered neighbors. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=2 The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array-like of shape (n_samples,), default=None Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. If precomputed distance are used, parallel execution is not available and thus n_jobs will have no effect. Returns ------- core_samples : ndarray of shape (n_core_samples,) Indices of core samples. labels : ndarray of shape (n_samples,) Cluster labels for each point. Noisy samples are given the label -1. See Also -------- DBSCAN : An estimator interface for this clustering algorithm. OPTICS : A similar estimator interface clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. """ est = DBSCAN( eps=eps, min_samples=min_samples, metric=metric, metric_params=metric_params, algorithm=algorithm, leaf_size=leaf_size, p=p, n_jobs=n_jobs, ) est.fit(X, sample_weight=sample_weight) return est.core_sample_indices_, est.labels_
""" DBSCAN: Density-Based Spatial Clustering of Applications with Noise """ # Author: Robert Layton <robertlayton@gmail.com> # Joel Nothman <joel.nothman@gmail.com> # Lars Buitinck # # License: BSD 3 clause def dbscan( X, eps=0.5, *, min_samples=5, metric="minkowski", metric_params=None, algorithm="auto", leaf_size=30, p=2, sample_weight=None, n_jobs=None, ): """Perform DBSCAN clustering from vector array or distance matrix. Read more in the :ref:`User Guide <dbscan>`. Parameters ---------- X : {array-like, sparse (CSR) matrix} of shape (n_samples, n_features) or \ (n_samples, n_samples) A feature array, or array of distances between samples if ``metric='precomputed'``. eps : float, default=0.5 The maximum distance between two samples for one to be considered as in the neighborhood of the other. This is not a maximum bound on the distances of points within a cluster. This is the most important DBSCAN parameter to choose appropriately for your data set and distance function. min_samples : int, default=5 The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. This includes the point itself. metric : str or callable, default='minkowski' The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by :func:`sklearn.metrics.pairwise_distances` for its metric parameter. If metric is "precomputed", X is assumed to be a distance matrix and must be square during fit. X may be a :term:`sparse graph <sparse graph>`, in which case only "nonzero" elements may be considered neighbors. metric_params : dict, default=None Additional keyword arguments for the metric function. .. versionadded:: 0.19 algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, default='auto' The algorithm to be used by the NearestNeighbors module to compute pointwise distances and find nearest neighbors. See NearestNeighbors module documentation for details. leaf_size : int, default=30 Leaf size passed to BallTree or cKDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. p : float, default=2 The power of the Minkowski metric to be used to calculate distance between points. sample_weight : array-like of shape (n_samples,), default=None Weight of each sample, such that a sample with a weight of at least ``min_samples`` is by itself a core sample; a sample with negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. n_jobs : int, default=None The number of parallel jobs to run for neighbors search. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary <n_jobs>` for more details. If precomputed distance are used, parallel execution is not available and thus n_jobs will have no effect. Returns ------- core_samples : ndarray of shape (n_core_samples,) Indices of core samples. labels : ndarray of shape (n_samples,) Cluster labels for each point. Noisy samples are given the label -1. See Also -------- DBSCAN : An estimator interface for this clustering algorithm. OPTICS : A similar estimator interface clustering at multiple values of eps. Our implementation is optimized for memory usage. Notes ----- For an example, see :ref:`examples/cluster/plot_dbscan.py <sphx_glr_auto_examples_cluster_plot_dbscan.py>`. This implementation bulk-computes all neighborhood queries, which increases the memory complexity to O(n.d) where d is the average number of neighbors, while original DBSCAN had memory complexity O(n). It may attract a higher memory complexity when querying these nearest neighborhoods, depending on the ``algorithm``. One way to avoid the query complexity is to pre-compute sparse neighborhoods in chunks using :func:`NearestNeighbors.radius_neighbors_graph <sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>` with ``mode='distance'``, then using ``metric='precomputed'`` here. Another way to reduce memory and computation time is to remove (near-)duplicate points and use ``sample_weight`` instead. :class:`~sklearn.cluster.OPTICS` provides a similar clustering with lower memory usage. References ---------- Ester, M., H. P. Kriegel, J. Sander, and X. Xu, `"A Density-Based Algorithm for Discovering Clusters in Large Spatial Databases with Noise" <https://www.dbs.ifi.lmu.de/Publikationen/Papers/KDD-96.final.frame.pdf>`_. In: Proceedings of the 2nd International Conference on Knowledge Discovery and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996 Schubert, E., Sander, J., Ester, M., Kriegel, H. P., & Xu, X. (2017). :doi:`"DBSCAN revisited, revisited: why and how you should (still) use DBSCAN." <10.1145/3068335>` ACM Transactions on Database Systems (TODS), 42(3), 19. """ est = DBSCAN( eps=eps, min_samples=min_samples, metric=metric, metric_params=metric_params, algorithm=algorithm, leaf_size=leaf_size, p=p, n_jobs=n_jobs, ) est.fit(X, sample_weight=sample_weight) return est.core_sample_indices_, est.labels_
class DBSCAN(ClusterMixin, BaseEstimator):
0
2023-10-07 13:19:48+00:00
16k
hellloxiaotian/KDNet
test_ccpd.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distill...
import argparse import json import os import numpy as np import torch import yaml from pathlib import Path from threading import Thread from tqdm import tqdm from models.experimental import attempt_load from utils.datasets import create_dataloader from utils.general import coco80_to_coco91_class, check_dataset, check_file, check_img_size, check_requirements, \ box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, increment_path, colorstr from utils.metrics import ap_per_class, ConfusionMatrix from utils.plots import plot_images, output_to_target, plot_study_txt from utils.torch_utils import select_device, time_synchronized, TracedModel from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
11,871
coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging - Media Panel Plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
def test(data, weights=None, batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, wandb_logger=None, compute_loss=None, half_precision=True, trace=False, is_coco=False, v5_metric=False): # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check img_size if trace: model = TracedModel(model, device, imgsz) # Half half = device.type != 'cpu' and half_precision # half precision only supported on CUDA if half: model.half() # Configure model.eval() if isinstance(data, str): is_coco = data.endswith('coco.yaml') with open(data) as f: data = yaml.load(f, Loader=yaml.SafeLoader) check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Logging log_imgs = 0 if wandb_logger and wandb_logger.wandb: log_imgs = min(wandb_logger.log_imgs, 100) # Dataloader if not training: if device.type != 'cpu': model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once task = opt.task if opt.task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, gs, opt, pad=0.5, rect=True, prefix=colorstr(f'{task}: '))[0] if v5_metric: print("Testing with YOLOv5 AP metric...") seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model t = time_synchronized() out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if compute_loss: loss += compute_loss([x.float() for x in train_out], targets)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() out = non_max_suppression(out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb, multi_label=True) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(out): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging - Media Panel Plots if len(wandb_images) < log_imgs and wandb_logger.current_epoch > 0: # Check for test operation if wandb_logger.current_epoch % wandb_logger.bbox_interval == 0: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb_logger.wandb.Image(img[si], boxes=boxes, caption=path.name)) wandb_logger.log_training_progress(predn, path, names) if wandb_logger and wandb_logger.wandb_run else None # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(predn, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions
Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start()
18
2023-10-08 13:05:58+00:00
16k
falesiani/torch_ga
torch_ga/layers.py
[ { "identifier": "BladeKind", "path": "torch_ga/blades.py", "snippet": "class BladeKind(Enum):\n \"\"\"Kind of blade depending on its degree.\"\"\"\n MV = \"mv\"\n EVEN = \"even\"\n ODD = \"odd\"\n SCALAR = \"scalar\"\n VECTOR = \"vector\"\n BIVECTOR = \"bivector\"\n TRIVECTOR = \...
from typing import List, Union from .blades import BladeKind from .torch_ga import GeometricAlgebra import numpy as np import torch import torch.nn as nn import torch.nn.init as init
10,926
"""Provides Geometric Algebra Keras layers.""" class GeometricAlgebraLayer(nn.Module): def __init__(self, algebra: GeometricAlgebra, **kwargs): self.algebra = algebra self.built = False super().__init__(**kwargs) def build(self): assert(False), "why me?" @classmethod def from_config(cls, config): # Create algebra if necessary (should only occur once, assumes that # config is actually mutable). if "algebra" not in config: assert "metric" in config config["algebra"] = GeometricAlgebra(config["metric"]) del config["metric"] return cls(**config) def get_config(self): # Store metric of the algebra. In from_config() we will recreate the # algebra from the metric. config = super().get_config() config.update({ "metric": self.algebra.metric.numpy() }) return config # @register_keras_serializable(package="TFGA") class TensorToGeometric(GeometricAlgebraLayer): """Layer for converting tensors with given blade indices to geometric algebra tensors. Args: algebra: GeometricAlgebra instance to use blade_indices: blade indices to interpret the last axis of the input tensor as """ def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int], **kwargs): super().__init__(algebra=algebra, **kwargs) self.blade_indices = torch.tensor(blade_indices, dtype=torch.int64) # self.blade_indices = blade_indices.to(dtype=torch.int64) self.built = False def compute_output_shape(self, input_shape): return [*input_shape[:-1], self.algebra.num_blades] def forward(self, inputs): if not self.build: self.build(inputs.shape) return self.algebra.from_tensor(inputs, blade_indices=self.blade_indices) def build(self,input_shape): self.built = True def get_config(self): config = super().get_config() config.update({ "blade_indices": self.blade_indices.numpy() }) return config # @register_keras_serializable(package="TFGA") class TensorWithKindToGeometric(GeometricAlgebraLayer): """Layer for converting tensors with given blade kind to geometric algebra tensors. Args: algebra: GeometricAlgebra instance to use kind: blade kind indices to interpret the last axis of the input tensor as """
"""Provides Geometric Algebra Keras layers.""" class GeometricAlgebraLayer(nn.Module): def __init__(self, algebra: GeometricAlgebra, **kwargs): self.algebra = algebra self.built = False super().__init__(**kwargs) def build(self): assert(False), "why me?" @classmethod def from_config(cls, config): # Create algebra if necessary (should only occur once, assumes that # config is actually mutable). if "algebra" not in config: assert "metric" in config config["algebra"] = GeometricAlgebra(config["metric"]) del config["metric"] return cls(**config) def get_config(self): # Store metric of the algebra. In from_config() we will recreate the # algebra from the metric. config = super().get_config() config.update({ "metric": self.algebra.metric.numpy() }) return config # @register_keras_serializable(package="TFGA") class TensorToGeometric(GeometricAlgebraLayer): """Layer for converting tensors with given blade indices to geometric algebra tensors. Args: algebra: GeometricAlgebra instance to use blade_indices: blade indices to interpret the last axis of the input tensor as """ def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int], **kwargs): super().__init__(algebra=algebra, **kwargs) self.blade_indices = torch.tensor(blade_indices, dtype=torch.int64) # self.blade_indices = blade_indices.to(dtype=torch.int64) self.built = False def compute_output_shape(self, input_shape): return [*input_shape[:-1], self.algebra.num_blades] def forward(self, inputs): if not self.build: self.build(inputs.shape) return self.algebra.from_tensor(inputs, blade_indices=self.blade_indices) def build(self,input_shape): self.built = True def get_config(self): config = super().get_config() config.update({ "blade_indices": self.blade_indices.numpy() }) return config # @register_keras_serializable(package="TFGA") class TensorWithKindToGeometric(GeometricAlgebraLayer): """Layer for converting tensors with given blade kind to geometric algebra tensors. Args: algebra: GeometricAlgebra instance to use kind: blade kind indices to interpret the last axis of the input tensor as """
def __init__(self, algebra: GeometricAlgebra, kind: BladeKind,
0
2023-10-07 13:34:07+00:00
16k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n ...
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
12,661
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝'
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝'
async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]):
9
2023-10-12 02:01:46+00:00
16k
azuline/rose
rose/tracks_test.py
[ { "identifier": "AudioTags", "path": "rose/audiotags.py", "snippet": "class AudioTags:\n id: str | None\n release_id: str | None\n title: str | None\n year: int | None\n tracknumber: str | None\n tracktotal: int | None\n discnumber: str | None\n disctotal: int | None\n album: ...
import json import pytest from pathlib import Path from rose.audiotags import AudioTags from rose.config import Config from rose.rule_parser import MetadataAction, MetadataMatcher from rose.tracks import dump_track, dump_tracks, run_actions_on_track
14,284
"producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t4", "source_path": f"{config.music_source_dir}/r3/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 1, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r3", "albumtitle": "Release 3", "releasetype": "album", "year": 2021, "new": True, "genres": [], "labels": [], "albumartists": { "main": [], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, ] @pytest.mark.usefixtures("seeded_cache") def test_dump_tracks_with_matcher(config: Config) -> None: matcher = MetadataMatcher.parse("artist:Techno Man") assert json.loads(dump_tracks(config, matcher)) == [ { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t1", "source_path": f"{config.music_source_dir}/r1/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 240, "id": "t2", "source_path": f"{config.music_source_dir}/r1/02.m4a", "tracktitle": "Track 2", "tracknumber": "02", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, ] @pytest.mark.usefixtures("seeded_cache") def test_dump_track(config: Config) -> None:
def test_run_action_on_track(config: Config, source_dir: Path) -> None: action = MetadataAction.parse("tracktitle::replace:Bop") af = AudioTags.from_file(source_dir / "Test Release 2" / "01.m4a") assert af.id is not None run_actions_on_track(config, af.id, [action]) af = AudioTags.from_file(source_dir / "Test Release 2" / "01.m4a") assert af.title == "Bop" @pytest.mark.usefixtures("seeded_cache") def test_dump_tracks(config: Config) -> None: assert json.loads(dump_tracks(config)) == [ { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t1", "source_path": f"{config.music_source_dir}/r1/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 240, "id": "t2", "source_path": f"{config.music_source_dir}/r1/02.m4a", "tracktitle": "Track 2", "tracknumber": "02", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [{"alias": False, "name": "Conductor Woman"}], "main": [{"alias": False, "name": "Violin Woman"}], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t3", "source_path": f"{config.music_source_dir}/r2/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 1, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r2", "albumtitle": "Release 2", "releasetype": "album", "year": 2021, "new": False, "genres": ["Classical"], "labels": ["Native State"], "albumartists": { "main": [{"name": "Violin Woman", "alias": False}], "guest": [{"name": "Conductor Woman", "alias": False}], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t4", "source_path": f"{config.music_source_dir}/r3/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 1, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r3", "albumtitle": "Release 3", "releasetype": "album", "year": 2021, "new": True, "genres": [], "labels": [], "albumartists": { "main": [], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, ] @pytest.mark.usefixtures("seeded_cache") def test_dump_tracks_with_matcher(config: Config) -> None: matcher = MetadataMatcher.parse("artist:Techno Man") assert json.loads(dump_tracks(config, matcher)) == [ { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 120, "id": "t1", "source_path": f"{config.music_source_dir}/r1/01.m4a", "tracktitle": "Track 1", "tracknumber": "01", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, { "trackartists": { "composer": [], "djmixer": [], "guest": [], "main": [ {"alias": False, "name": "Techno Man"}, {"alias": False, "name": "Bass Man"}, ], "producer": [], "remixer": [], }, "discnumber": "01", "disctotal": 1, "duration_seconds": 240, "id": "t2", "source_path": f"{config.music_source_dir}/r1/02.m4a", "tracktitle": "Track 2", "tracknumber": "02", "tracktotal": 2, "added_at": "0000-01-01T00:00:00+00:00", "release_id": "r1", "albumtitle": "Release 1", "releasetype": "album", "year": 2023, "new": False, "genres": ["Techno", "Deep House"], "labels": ["Silk Music"], "albumartists": { "main": [ {"name": "Techno Man", "alias": False}, {"name": "Bass Man", "alias": False}, ], "guest": [], "remixer": [], "producer": [], "composer": [], "djmixer": [], }, }, ] @pytest.mark.usefixtures("seeded_cache") def test_dump_track(config: Config) -> None:
assert json.loads(dump_track(config, "t1")) == {
4
2023-10-09 14:42:23+00:00
16k
grainseed/monitask
sam/segment_anything/automatic_mask_generator.py
[ { "identifier": "Sam", "path": "sam/segment_anything/modeling/sam.py", "snippet": "class Sam(nn.Module):\r\n mask_threshold: float = 0.0\r\n image_format: str = \"RGB\"\r\n\r\n def __init__(\r\n self,\r\n image_encoder: ImageEncoderViT,\r\n prompt_encoder: PromptEncoder,\r\...
import numpy as np import torch import cv2 # type: ignore # noqa: F401 from torchvision.ops.boxes import batched_nms, box_area # type: ignore from typing import Any, Dict, List, Optional, Tuple from .modeling import Sam from .predictor import SamPredictor from .utils.amg import ( MaskData, area_from_rle, batch_iterator, batched_mask_to_box, box_xyxy_to_xywh, build_all_layer_point_grids, calculate_stability_score, coco_encode_rle, generate_crop_boxes, is_box_near_crop_edge, mask_to_rle_pytorch, remove_small_regions, rle_to_mask, uncrop_boxes_xyxy, uncrop_masks, uncrop_points, ) from pycocotools import mask as mask_utils # type: ignore # noqa: F401
11,251
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray, multimask_output: bool = True) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size, multimask_output) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], multimask_output: bool = True, ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size, multimask_output) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], multimask_output: bool = True, ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=multimask_output, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold
# Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. class SamAutomaticMaskGenerator: def __init__( self, model: Sam, points_per_side: Optional[int] = 32, points_per_batch: int = 64, pred_iou_thresh: float = 0.88, stability_score_thresh: float = 0.95, stability_score_offset: float = 1.0, box_nms_thresh: float = 0.7, crop_n_layers: int = 0, crop_nms_thresh: float = 0.7, crop_overlap_ratio: float = 512 / 1500, crop_n_points_downscale_factor: int = 1, point_grids: Optional[List[np.ndarray]] = None, min_mask_region_area: int = 0, output_mode: str = "binary_mask", ) -> None: """ Using a SAM model, generates masks for the entire image. Generates a grid of point prompts over the image, then filters low quality and duplicate masks. The default settings are chosen for SAM with a ViT-H backbone. Arguments: model (Sam): The SAM model to use for mask prediction. points_per_side (int or None): The number of points to be sampled along one side of the image. The total number of points is points_per_side**2. If None, 'point_grids' must provide explicit point sampling. points_per_batch (int): Sets the number of points run simultaneously by the model. Higher numbers may be faster but use more GPU memory. pred_iou_thresh (float): A filtering threshold in [0,1], using the model's predicted mask quality. stability_score_thresh (float): A filtering threshold in [0,1], using the stability of the mask under changes to the cutoff used to binarize the model's mask predictions. stability_score_offset (float): The amount to shift the cutoff when calculated the stability score. box_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks. crop_n_layers (int): If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where each layer has 2**i_layer number of image crops. crop_nms_thresh (float): The box IoU cutoff used by non-maximal suppression to filter duplicate masks between different crops. crop_overlap_ratio (float): Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the image length. Later layers with more crops scale down this overlap. crop_n_points_downscale_factor (int): The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n. point_grids (list(np.ndarray) or None): A list over explicit grids of points used for sampling, normalized to [0,1]. The nth grid in the list is used in the nth crop layer. Exclusive with points_per_side. min_mask_region_area (int): If >0, postprocessing will be applied to remove disconnected regions and holes in masks with area smaller than min_mask_region_area. Requires opencv. output_mode (str): The form masks are returned in. Can be 'binary_mask', 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools. For large resolutions, 'binary_mask' may consume large amounts of memory. """ assert (points_per_side is None) != ( point_grids is None ), "Exactly one of points_per_side or point_grid must be provided." if points_per_side is not None: self.point_grids = build_all_layer_point_grids( points_per_side, crop_n_layers, crop_n_points_downscale_factor, ) elif point_grids is not None: self.point_grids = point_grids else: raise ValueError("Can't have both points_per_side and point_grid be None.") assert output_mode in [ "binary_mask", "uncompressed_rle", "coco_rle", ], f"Unknown output_mode {output_mode}." if output_mode == "coco_rle": if min_mask_region_area > 0: self.predictor = SamPredictor(model) self.points_per_batch = points_per_batch self.pred_iou_thresh = pred_iou_thresh self.stability_score_thresh = stability_score_thresh self.stability_score_offset = stability_score_offset self.box_nms_thresh = box_nms_thresh self.crop_n_layers = crop_n_layers self.crop_nms_thresh = crop_nms_thresh self.crop_overlap_ratio = crop_overlap_ratio self.crop_n_points_downscale_factor = crop_n_points_downscale_factor self.min_mask_region_area = min_mask_region_area self.output_mode = output_mode @torch.no_grad() def generate(self, image: np.ndarray, multimask_output: bool = True) -> List[Dict[str, Any]]: """ Generates masks for the given image. Arguments: image (np.ndarray): The image to generate masks for, in HWC uint8 format. Returns: list(dict(str, any)): A list over records for masks. Each record is a dict containing the following keys: segmentation (dict(str, any) or np.ndarray): The mask. If output_mode='binary_mask', is an array of shape HW. Otherwise, is a dictionary containing the RLE. bbox (list(float)): The box around the mask, in XYWH format. area (int): The area in pixels of the mask. predicted_iou (float): The model's own prediction of the mask's quality. This is filtered by the pred_iou_thresh parameter. point_coords (list(list(float))): The point coordinates input to the model to generate this mask. stability_score (float): A measure of the mask's quality. This is filtered on using the stability_score_thresh parameter. crop_box (list(float)): The crop of the image used to generate the mask, given in XYWH format. """ # Generate masks mask_data = self._generate_masks(image, multimask_output) # Filter small disconnected regions and holes in masks if self.min_mask_region_area > 0: mask_data = self.postprocess_small_regions( mask_data, self.min_mask_region_area, max(self.box_nms_thresh, self.crop_nms_thresh), ) # Encode masks if self.output_mode == "coco_rle": mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]] elif self.output_mode == "binary_mask": mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]] else: mask_data["segmentations"] = mask_data["rles"] # Write mask records curr_anns = [] for idx in range(len(mask_data["segmentations"])): ann = { "segmentation": mask_data["segmentations"][idx], "area": area_from_rle(mask_data["rles"][idx]), "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(), "predicted_iou": mask_data["iou_preds"][idx].item(), "point_coords": [mask_data["points"][idx].tolist()], "stability_score": mask_data["stability_score"][idx].item(), "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(), } curr_anns.append(ann) return curr_anns def _generate_masks(self, image: np.ndarray, multimask_output: bool = True) -> MaskData: orig_size = image.shape[:2] crop_boxes, layer_idxs = generate_crop_boxes( orig_size, self.crop_n_layers, self.crop_overlap_ratio ) # Iterate over image crops data = MaskData() for crop_box, layer_idx in zip(crop_boxes, layer_idxs): crop_data = self._process_crop(image, crop_box, layer_idx, orig_size, multimask_output) data.cat(crop_data) # Remove duplicate masks between crops if len(crop_boxes) > 1: # Prefer masks from smaller crops scores = 1 / box_area(data["crop_boxes"]) scores = scores.to(data["boxes"].device) keep_by_nms = batched_nms( data["boxes"].float(), scores, torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.crop_nms_thresh, ) data.filter(keep_by_nms) data.to_numpy() return data def _process_crop( self, image: np.ndarray, crop_box: List[int], crop_layer_idx: int, orig_size: Tuple[int, ...], multimask_output: bool = True, ) -> MaskData: # Crop the image and calculate embeddings x0, y0, x1, y1 = crop_box cropped_im = image[y0:y1, x0:x1, :] cropped_im_size = cropped_im.shape[:2] self.predictor.set_image(cropped_im) # Get points for this crop points_scale = np.array(cropped_im_size)[None, ::-1] points_for_image = self.point_grids[crop_layer_idx] * points_scale # Generate masks for this crop in batches data = MaskData() for (points,) in batch_iterator(self.points_per_batch, points_for_image): batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size, multimask_output) data.cat(batch_data) del batch_data self.predictor.reset_image() # Remove duplicates within this crop. keep_by_nms = batched_nms( data["boxes"].float(), data["iou_preds"], torch.zeros_like(data["boxes"][:, 0]), # categories iou_threshold=self.box_nms_thresh, ) data.filter(keep_by_nms) # Return to the original image frame data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box) data["points"] = uncrop_points(data["points"], crop_box) data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))]) return data def _process_batch( self, points: np.ndarray, im_size: Tuple[int, ...], crop_box: List[int], orig_size: Tuple[int, ...], multimask_output: bool = True, ) -> MaskData: orig_h, orig_w = orig_size # Run model on this batch transformed_points = self.predictor.transform.apply_coords(points, im_size) in_points = torch.as_tensor(transformed_points, device=self.predictor.device) in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device) masks, iou_preds, _ = self.predictor.predict_torch( in_points[:, None, :], in_labels[:, None], multimask_output=multimask_output, return_logits=True, ) # Serialize predictions and store in MaskData data = MaskData( masks=masks.flatten(0, 1), iou_preds=iou_preds.flatten(0, 1), points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)), ) del masks # Filter by predicted IoU if self.pred_iou_thresh > 0.0: keep_mask = data["iou_preds"] > self.pred_iou_thresh data.filter(keep_mask) # Calculate stability score data["stability_score"] = calculate_stability_score( data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset ) if self.stability_score_thresh > 0.0: keep_mask = data["stability_score"] >= self.stability_score_thresh data.filter(keep_mask) # Threshold masks and calculate boxes data["masks"] = data["masks"] > self.predictor.model.mask_threshold
data["boxes"] = batched_mask_to_box(data["masks"])
5
2023-10-14 13:45:54+00:00
16k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_mbcql_roboverse.py
[ { "identifier": "MLP", "path": "offlinerlkit/nets/mlp.py", "snippet": "class MLP(nn.Module):\n def __init__(\n self,\n input_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n output_dim: Optional[int] = None,\n activation: nn.Module = nn.ReLU,\n dropou...
import argparse import os import random import pickle import datetime import roboverse import numpy as np import torch from offlinerlkit.nets import MLP from offlinerlkit.modules import ActorProb, Critic, TanhDiagGaussian from offlinerlkit.buffer import ReplayBuffer from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import MFPolicyTrainer from offlinerlkit.policy import CQLPolicy from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset
10,931
parser.add_argument("--algo-name", type=str, default="mbcql") parser.add_argument("--task", type=str, default="pickplace") parser.add_argument('--last_eval', action='store_false', help="Show eval result for every epoch if False") # env config (pickplace) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") parser.add_argument('--rollout_ckpt_path', type=str, required=True, help="dir path, used to load mbrcsl rollout trajectories" ) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=5.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--epoch", type=int, default=200) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device)
# MBCQL. Need rollout data from MBRCSL def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--algo-name", type=str, default="mbcql") parser.add_argument("--task", type=str, default="pickplace") parser.add_argument('--last_eval', action='store_false', help="Show eval result for every epoch if False") # env config (pickplace) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") parser.add_argument('--rollout_ckpt_path', type=str, required=True, help="dir path, used to load mbrcsl rollout trajectories" ) parser.add_argument("--seed", type=int, default=0) parser.add_argument("--hidden-dims", type=int, nargs='*', default=[256, 256, 256]) parser.add_argument("--actor-lr", type=float, default=1e-4) parser.add_argument("--critic-lr", type=float, default=3e-4) parser.add_argument("--gamma", type=float, default=0.99) parser.add_argument("--tau", type=float, default=0.005) parser.add_argument("--alpha", type=float, default=0.2) parser.add_argument("--target-entropy", type=int, default=None) parser.add_argument("--auto-alpha", default=True) parser.add_argument("--alpha-lr", type=float, default=1e-4) parser.add_argument("--cql-weight", type=float, default=5.0) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--max-q-backup", type=bool, default=False) parser.add_argument("--deterministic-backup", type=bool, default=True) parser.add_argument("--with-lagrange", type=bool, default=False) parser.add_argument("--lagrange-threshold", type=float, default=10.0) parser.add_argument("--cql-alpha-lr", type=float, default=3e-4) parser.add_argument("--num-repeat-actions", type=int, default=10) parser.add_argument("--epoch", type=int, default=200) parser.add_argument("--step-per-epoch", type=int, default=1000) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--batch-size", type=int, default=256) parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") return parser.parse_args() def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape args.obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape args.action_dim = np.prod(args.action_shape) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] print(f"Loaded {num_traj_all} rollout trajectories") else: raise NotImplementedError env.reset(seed=args.seed) # create policy model actor_backbone = MLP(input_dim=np.prod(args.obs_shape), hidden_dims=args.hidden_dims) critic1_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) critic2_backbone = MLP(input_dim=np.prod(args.obs_shape) + args.action_dim, hidden_dims=args.hidden_dims) dist = TanhDiagGaussian( latent_dim=getattr(actor_backbone, "output_dim"), output_dim=args.action_dim, unbounded=True, conditioned_sigma=True ) actor = ActorProb(actor_backbone, dist, args.device)
critic1 = Critic(critic1_backbone, args.device)
2
2023-10-11 08:36:06+00:00
16k
wilhelmagren/finq
finq/portfolio.py
[ { "identifier": "Asset", "path": "finq/asset.py", "snippet": "class Asset(object):\n \"\"\" \"\"\"\n\n def __init__(\n self,\n data: pd.Series,\n name: str,\n *,\n market: Optional[str] = None,\n index_name: Optional[str] = None,\n price_type: str =...
import logging import pandas as pd import numpy as np import scipy.optimize as scipyopt import matplotlib.pyplot as plt from functools import wraps from tqdm import tqdm from finq.asset import Asset from finq.datasets import Dataset from finq.exceptions import ( FinqError, InvalidCombinationOfArgumentsError, InvalidPortfolioWeightsError, ObjectiveFunctionError, PortfolioNotYetOptimizedError, ) from finq.formulas import ( period_returns, sharpe_ratio, weighted_returns, weighted_variance, ) from typing import ( Any, Callable, List, Dict, Tuple, Union, Optional, )
10,819
def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """ return weighted_returns(self._weights.T, self.daily_returns_mean()) @check_valid_weights def sharpe_ratio(self) -> float: """ """ r = self.expected_returns() v = self.volatility() return sharpe_ratio(r, v, self._risk_free_rate) def verify_can_optimize(self) -> Optional[FinqError]: """ """ if self._objective_function is None:
""" MIT License Copyright (c) 2023 Wilhelm Ågren Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. File created: 2023-10-20 Last updated: 2023-11-10 """ log = logging.getLogger(__name__) class Portfolio(object): """ """ # For a full list of `scipy` optimization methods and references, see the link below. # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html _supported_optimization_methods = ( "Nelder-Mead", "Powell", "CG", "BFGS", "Newton-CG", "L-BFGS-B", "TNC", "COBYLA", "SLSQP", "trust-constr", "dogleg", "trust-ncg", "trust-exact", "trust-krylov", ) _weight_initializations = { "lognormal": np.random.lognormal, "normal": np.random.normal, "uniform": np.random.uniform, } def __init__( self, data: Union[Dataset, List[Asset], np.ndarray, pd.DataFrame], *, weights: Optional[np.ndarray] = None, names: Optional[Union[Dict[str, str], List[str]]] = None, symbols: Optional[Union[Dict[str, str], List[str]]] = None, confidence_level: float = 0.95, risk_free_rate: float = 5e-3, n_trading_days: int = 252, objective_function: Optional[Callable] = None, objective_function_args: Tuple[Any, ...] = (), objective_bounds: Optional[List[Tuple[int, ...]]] = None, objective_constraints: Optional[Tuple[Dict, ...]] = None, ): """ """ if isinstance(data, Dataset): assets = data.as_assets() data = list(assets.values()) symbols = list(assets.keys()) if not isinstance(data, list): if names is None and symbols is None and not isinstance(data, pd.DataFrame): raise InvalidCombinationOfArgumentsError( "You need to provide the names and ticker symbols of each asset that you " "want to include in your portfolio if the data you provided is neither a " "`list` of `Asset` objects or a `pd.DataFrame`. You can also try " "providing only one of the arguments `names` and `symbols`, but then as " "a dictionary of the form `key=name` `value=symbol`." ) if isinstance(data, list): symbols = [a.name for a in data] data = np.array([a.data for a in data]) if isinstance(data, pd.DataFrame): symbols = data.columns data = data.to_numpy().T if isinstance(names, dict): symbols = list(names.values()) names = list(names.keys()) if isinstance(symbols, dict): names = list(symbols.keys()) symbols = list(symbols.values()) self._data = data self._weights = weights self._names = names self._symbols = symbols self._confidence_level = confidence_level self._risk_free_rate = risk_free_rate self._n_trading_days = n_trading_days self._random_portfolios = None self._objective_function = objective_function self._objective_function_args = objective_function_args self._objective_bounds = objective_bounds self._objective_constraints = objective_constraints def weights_are_normalized(self) -> bool: """ """ return np.allclose(self._weights.sum(), 1.0, rtol=1e-6) def initialize_random_weights( self, distribution: Union[str, Callable], *args: Tuple[Any, ...], **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) weights = distribution(*args, **kwargs) self._weights = weights / weights.sum() def check_valid_weights(func) -> Callable: """ """ @wraps(func) def _check_valid_weights(self, *args, **kwargs) -> Optional[FinqError]: """ """ if self._weights is None: raise PortfolioNotYetOptimizedError( "Portfolio weights are `None`. Perhaps you have not yet optimized it? " ) if not self.weights_are_normalized(): raise InvalidPortfolioWeightsError( "Your portfolio weights are not normalized. Make sure to normalize them " "(they sum to one) before calculating any analytical quantities. " ) return func(self, *args, **kwargs) return _check_valid_weights def daily_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=1) def yearly_returns(self) -> np.ndarray: """ """ return period_returns(self._data, period=self._n_trading_days) def period_returns(self, period: int) -> np.ndarray: """ """ return period_returns(self._data, period=period) def daily_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=1), axis=1) def yearly_returns_mean(self) -> float: """ """ return np.mean(period_returns(self._data, period=self._n_trading_days), axis=1) def period_returns_mean(self, period: int) -> float: """ """ return np.mean(period_returns(self._data, period=period), axis=1) def daily_covariance(self) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=1), rowvar=True) def yearly_covariance(self) -> np.ndarray: """ """ return np.cov( period_returns(self._data, period=self._n_trading_days), rowvar=True ) def period_covariance(self, period: int) -> np.ndarray: """ """ return np.cov(period_returns(self._data, period=period), rowvar=True) def set_objective_function( self, function: Callable, *args: Tuple[Any, ...], ): """ """ self._objective_function = function self._objective_function_args = args def set_objective_constraints( self, *constraints, ): """ """ self._objective_constraints = [{"type": t, "fun": c} for (t, c) in constraints] def set_objective_bounds( self, bounds: Union[Tuple[int, ...], List[Tuple[int, ...]]], ): """ """ if isinstance(bounds, tuple): bounds = [bounds for _ in range(self._data.shape[0])] self._objective_bounds = bounds def sample_random_portfolios( self, n_samples: int, *, distribution: Union[str, Callable] = "lognormal", **kwargs: Dict[str, Any], ): """ """ if isinstance(distribution, str): distribution = self._weight_initializations.get(distribution, None) if distribution is None: raise ValueError( "You provided a non valid weight initialization distribution." ) portfolios = [] for i in (bar := tqdm(range(n_samples))): if i % 10: bar.set_description( f"Sampling random portfolio {i + 1} from " f"{distribution.__name__} distribution" ) portfolio = distribution(**kwargs) portfolios.append(portfolio / portfolio.sum()) self._random_portfolios = np.transpose(np.concatenate(portfolios, axis=1)) @check_valid_weights def variance(self) -> float: """ """ return weighted_variance( self._weights.T, self.daily_covariance(), ) @check_valid_weights def volatility(self) -> float: """ """ return np.sqrt( weighted_variance( self._weights.T, self.daily_covariance(), ), ) @check_valid_weights def expected_returns(self) -> float: """ """ return weighted_returns(self._weights.T, self.daily_returns_mean()) @check_valid_weights def sharpe_ratio(self) -> float: """ """ r = self.expected_returns() v = self.volatility() return sharpe_ratio(r, v, self._risk_free_rate) def verify_can_optimize(self) -> Optional[FinqError]: """ """ if self._objective_function is None:
raise ObjectiveFunctionError
5
2023-10-09 19:02:54+00:00
16k
lmb-freiburg/ldce
scripts/ldce.py
[ { "identifier": "disabled_train", "path": "sampling_helpers.py", "snippet": "def disabled_train(self, mode=True):\n \"\"\"Overwrite model.train with this function to make sure train/eval mode\n does not change anymore.\"\"\"\n return self" }, { "identifier": "get_model", "path": "sa...
import argparse import os import psutil import yaml import copy import random import matplotlib.pyplot as plt import numpy as np import pathlib import torch import hydra import wandb import torchvision import json import sys import regex as re import open_clip from contextlib import nullcontext from torch import autocast from omegaconf import OmegaConf, open_dict from hydra.utils import instantiate from omegaconf import DictConfig, OmegaConf from torchvision import transforms, datasets from torchvision.utils import save_image from sampling_helpers import disabled_train, get_model, _unmap_img, generate_samples from sampling_helpers import load_model_hf from ldm import * from ldm.models.diffusion.cc_ddim import CCMDDIMSampler from data.imagenet_classnames import name_map, openai_imagenet_classes from utils.DecisionDensenetModel import DecisionDensenetModel from utils.preprocessor import Normalizer, CropAndNormalizer, ResizeAndNormalizer, GenericPreprocessing, Crop from utils.vision_language_wrapper import VisionLanguageWrapper from utils.madry_net import MadryNet from utils.dino_linear import LinearClassifier, DINOLinear
11,984
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = Crop(classifier_model) else: classifier_model = getattr(torchvision.models, classifier_name)(pretrained=True) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = CropAndNormalizer(classifier_model) elif "CelebAHQDataset" in cfg.data._target_: assert cfg.data.query_label in [20, 31, 39], 'Query label MUST be 20 (Gender), 31 (Smile), or 39 (Age) for CelebAHQ' ql = 0 if cfg.data.query_label in [31, 39]: ql = 1 if cfg.data.query_label == 31 else 2 classifier_model = DecisionDensenetModel(3, pretrained=False, query_label=ql) classifier_model.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location='cpu')['model_state_dict']) if cfg.classifier_model.classifier_wrapper: classifier_model = Normalizer( classifier_model, [0.5] * 3, [0.5] * 3 ) elif "Flowers102" in cfg.data._target_: # fine-tuned Dino ViT B/8: https://arxiv.org/pdf/2104.14294.pdf dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').to(device).eval() dim = dino.embed_dim linear_classifier = LinearClassifier(dim*cfg.classifier_model.n_last_blocks, 102) linear_classifier.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location="cpu"), strict=True) linear_classifier = linear_classifier.eval().to(device)
torch.backends.cuda.matmul.allow_tf32 = True # torch.backends.cudnn.benchmark = True try: except: print("Install OpenClip via: pip install open_clip_torch") def set_seed(seed: int = 0): torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) torch.cuda.manual_seed_all(seed) def blockPrint(): sys.stdout = open(os.devnull, 'w') def get_classifier(cfg, device): if "ImageNet" in cfg.data._target_: classifier_name = cfg.classifier_model.name if classifier_name == "robust_resnet50": classifier_model = MadryNet(cfg.classifier_model.ckpt, device) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = Crop(classifier_model) else: classifier_model = getattr(torchvision.models, classifier_name)(pretrained=True) if "classifier_wrapper" in cfg.classifier_model and cfg.classifier_model.classifier_wrapper: classifier_model = CropAndNormalizer(classifier_model) elif "CelebAHQDataset" in cfg.data._target_: assert cfg.data.query_label in [20, 31, 39], 'Query label MUST be 20 (Gender), 31 (Smile), or 39 (Age) for CelebAHQ' ql = 0 if cfg.data.query_label in [31, 39]: ql = 1 if cfg.data.query_label == 31 else 2 classifier_model = DecisionDensenetModel(3, pretrained=False, query_label=ql) classifier_model.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location='cpu')['model_state_dict']) if cfg.classifier_model.classifier_wrapper: classifier_model = Normalizer( classifier_model, [0.5] * 3, [0.5] * 3 ) elif "Flowers102" in cfg.data._target_: # fine-tuned Dino ViT B/8: https://arxiv.org/pdf/2104.14294.pdf dino = torch.hub.load('facebookresearch/dino:main', 'dino_vits8').to(device).eval() dim = dino.embed_dim linear_classifier = LinearClassifier(dim*cfg.classifier_model.n_last_blocks, 102) linear_classifier.load_state_dict(torch.load(cfg.classifier_model.classifier_path, map_location="cpu"), strict=True) linear_classifier = linear_classifier.eval().to(device)
classifier_model = DINOLinear(dino, linear_classifier)
16
2023-10-10 09:40:10+00:00
16k
cpuimage/minSDXLTF
stable_diffusion_xl/stable_diffusion_xl.py
[ { "identifier": "SimpleTokenizer", "path": "stable_diffusion_xl/clip_tokenizer.py", "snippet": "class SimpleTokenizer:\n def __init__(self, bpe_path=None):\n bpe_path = bpe_path or tf.keras.utils.get_file(\n \"bpe_simple_vocab_16e6.txt.gz\",\n \"https://github.com/openai/...
import numpy as np import tensorflow as tf from PIL import Image from scipy.ndimage import correlate1d from .clip_tokenizer import SimpleTokenizer from .diffusion_model import DiffusionXLModel from .image_decoder import ImageDecoder from .image_encoder import ImageEncoder from .long_prompt_weighting import get_weighted_text_embeddings from .scheduler import Scheduler from .text_encoder_laion import TextEncoderLaion, TextEncoderLaionProj from .text_encoder_openai import TextEncoderOpenAi
13,264
negative_prompt = "" unconditional_context, unconditional_add_text_embeds = self.encode_text(negative_prompt) unconditional_context = self._expand_tensor(unconditional_context, batch_size) if diffusion_noise is not None: diffusion_noise = np.squeeze(diffusion_noise) if len(diffusion_noise.shape) == 3: diffusion_noise = np.repeat(np.expand_dims(diffusion_noise, axis=0), batch_size, axis=0) # Iterative reverse diffusion stage self.scheduler.set_timesteps(num_steps) timesteps = self.scheduler.timesteps[::-1] init_time = None init_latent = None input_image_array = None input_mask_array = None latent_mask_tensor = None if inpaint_mask is not None: input_mask_array, latent_mask_tensor = self.preprocessed_mask(inpaint_mask, mask_blur_strength) if input_mask_array is None or latent_mask_tensor is None: print("wrong inpaint mask:{}".format(inpaint_mask)) if reference_image is not None and (0. < reference_image_strength < 1.): input_image_array, input_image_tensor = self.preprocessed_image(reference_image) if input_image_tensor is not None: num_steps = int(num_steps * reference_image_strength + 0.5) init_time = timesteps[num_steps] init_latent = self.image_encoder.predict_on_batch(input_image_tensor) timesteps = timesteps[:num_steps] else: print("wrong reference image:{}".format(reference_image)) latent = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=init_time, seed=seed, noise=diffusion_noise) progbar = tf.keras.utils.Progbar(len(timesteps)) iteration = 0 if original_size is None: original_size = [self.img_height, self.img_width] if target_size is None: target_size = [self.img_height, self.img_width] add_time_ids = tf.expand_dims( tf.convert_to_tensor(list(list(original_size) + list(crops_coords_top_left) + list(target_size)), latent.dtype), axis=0) for index, timestep in list(enumerate(timesteps))[::-1]: latent_prev = latent # Set aside the previous latent vector time_emb = np.repeat(np.reshape(timestep, [1, -1]), batch_size, axis=0) if unconditional_guidance_scale > 0.0: unconditional_latent = self.diffusion_model.predict_on_batch( [latent, time_emb, unconditional_context, add_time_ids, tf.zeros_like(add_text_embeds)]) latent_text = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = unconditional_latent + unconditional_guidance_scale * ( latent_text - unconditional_latent) if guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/abs/2305.08891 latent = self.rescale_noise_cfg(latent, latent_text, guidance_rescale=guidance_rescale) else: latent = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = self.scheduler.step(latent, timestep, latent_prev) if latent_mask_tensor is not None and init_latent is not None: latent_orgin = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=timestep, seed=seed, noise=diffusion_noise) latent = latent_orgin * (1. - latent_mask_tensor) + latent * latent_mask_tensor iteration += 1 if callback is not None: callback(iteration) progbar.update(iteration) # Decoding stage decoded = self.image_decoder.predict_on_batch(latent) decoded = np.array(((decoded + 1.) * 0.5), dtype=np.float32) if input_mask_array is not None and input_image_array is not None: decoded = input_image_array * (1. - input_mask_array) + decoded * input_mask_array return np.clip(decoded * 255., 0, 255).astype("uint8") def _expand_tensor(self, text_embedding, batch_size): """Extends a tensor by repeating it to fit the shape of the given batch size.""" text_embedding = np.squeeze(text_embedding) if len(text_embedding.shape) == 2: text_embedding = np.repeat( np.expand_dims(text_embedding, axis=0), batch_size, axis=0 ) return text_embedding @property def image_encoder(self): pass @property def text_encoder_openai(self): pass @property def text_encoder_laion(self): pass @property def text_encoder_laion_proj(self): pass @property def diffusion_model(self): pass @property def image_decoder(self): pass @property def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None:
# Copyright 2022 The KerasCV Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Keras implementation of StableDiffusionXL.""" MAX_PROMPT_LENGTH = 77 class StableDiffusionXLBase: """Base class for stable diffusion xl model.""" def __init__(self, img_height=1024, img_width=1024, jit_compile=False, active_lcm=False): self.img_height = img_height self.img_width = img_width # lazy initialize the component models and the tokenizer self._image_encoder = None self._text_encoder_laion = None self._text_encoder_laion_proj = None self._text_encoder_openai = None self._diffusion_model = None self._image_decoder = None self._tokenizer = None self.jit_compile = jit_compile self.active_lcm = active_lcm self.scheduler = Scheduler(active_lcm=active_lcm) def text_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def image_to_image( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def inpaint( self, prompt, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, seed=None, reference_image=None, reference_image_strength=0.8, inpaint_mask=None, mask_blur_strength=None, original_size=None, crops_coords_top_left=(0, 0), target_size=None, guidance_rescale=0.7, callback=None): encoded_text, add_text_embeds = self.encode_text(prompt) return self.generate_image( encoded_text, add_text_embeds, negative_prompt=negative_prompt, batch_size=batch_size, num_steps=num_steps, unconditional_guidance_scale=unconditional_guidance_scale, seed=seed, reference_image=reference_image, reference_image_strength=reference_image_strength, inpaint_mask=inpaint_mask, mask_blur_strength=mask_blur_strength, original_size=original_size, crops_coords_top_left=crops_coords_top_left, target_size=target_size, guidance_rescale=guidance_rescale, callback=callback) def encode_text(self, prompt): """Encodes a prompt into a latent text encoding. The encoding produced by this method should be used as the `encoded_text` parameter of `StableDiffusion.generate_image`. Encoding text separately from generating an image can be used to arbitrarily modify the text encoding prior to image generation, e.g. for walking between two prompts. Args: prompt: a string to encode, must be 77 tokens or shorter. Example: ```python from keras_cv.models import StableDiffusion model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) encoded_text = model.encode_text("Tacos at dawn") img = model.generate_image(encoded_text) ``` """ # Tokenize prompt (i.e. starting context) context_openai, _ = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_openai, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=49407) context_laion, add_text_embeds = get_weighted_text_embeddings(self.tokenizer, self.text_encoder_laion, prompt, model_max_length=MAX_PROMPT_LENGTH, pad_token_id=0, text_encoder_pool=self.text_encoder_laion_proj) return np.concatenate([context_openai, context_laion], axis=-1), add_text_embeds def gaussian_blur(self, image, radius=3, h_axis=1, v_axis=2): def build_filter1d(kernel_size): if kernel_size == 1: filter1d = [1] else: triangle = [[1, 1]] for i in range(1, kernel_size - 1): cur_row = [1] prev_row = triangle[i - 1] for j in range(len(prev_row) - 1): cur_row.append(prev_row[j] + prev_row[j + 1]) cur_row.append(1) triangle.append(cur_row) filter1d = triangle[-1] filter1d = np.reshape(filter1d, (kernel_size,)) return filter1d / np.sum(filter1d) weights = build_filter1d(radius) # Apply filter horizontally blurred_image = correlate1d(image, weights, axis=h_axis, output=None, mode="reflect", cval=0.0, origin=0) # Apply filter vertically blurred_image = correlate1d(blurred_image, weights, axis=v_axis, output=None, mode="reflect", cval=0.0, origin=0) return blurred_image @staticmethod def resize(image_array, new_h=None, new_w=None): h, w, c = image_array.shape if new_h == h and new_w == w: return image_array h_bounds = 0, h - 1 w_bounds = 0, w - 1 y = np.expand_dims(np.linspace(h_bounds[0], h_bounds[1], new_h), axis=-1) x = np.expand_dims(np.linspace(w_bounds[0], w_bounds[1], new_w), axis=0) # Calculate the floor and ceiling values of x and y x_floor = np.floor(x).astype(int) x_ceil = np.ceil(x).astype(int) y_floor = np.floor(y).astype(int) y_ceil = np.ceil(y).astype(int) # Clip the values to stay within the image bounds x_floor = np.clip(x_floor, w_bounds[0], w_bounds[1]) x_ceil = np.clip(x_ceil, w_bounds[0], w_bounds[1]) y_floor = np.clip(y_floor, h_bounds[0], h_bounds[1]) y_ceil = np.clip(y_ceil, h_bounds[0], h_bounds[1]) # Calculate the fractional part of x and y dx = x - x_floor dy = y - y_floor # Get the values of the four neighboring pixels dx = np.expand_dims(dx, axis=-1) dy = np.expand_dims(dy, axis=-1) q11 = image_array[y_floor, x_floor, :] q21 = image_array[y_floor, x_ceil, :] q12 = image_array[y_ceil, x_floor, :] q22 = image_array[y_ceil, x_ceil, :] # Perform bilinear interpolation top_interp = q11 * (1.0 - dx) + q21 * dx bottom_interp = q12 * (1.0 - dx) + q22 * dx interpolated = top_interp * (1.0 - dy) + bottom_interp * dy return interpolated def preprocessed_image(self, x): if type(x) is str: x = np.array(Image.open(x).convert("RGB")) else: x = np.asarray(x) image_array = self.resize(x, self.img_height, self.img_width) image_array = np.array(image_array, dtype=np.float32) / 255.0 input_image_array = image_array[None, ..., :3] input_image_tensor = input_image_array * 2.0 - 1.0 return input_image_array, input_image_tensor def preprocessed_mask(self, x, blur_radius=5): if type(x) is str: x = np.array(Image.open(x).convert("L")) else: x = np.asarray(x) if len(x.shape) == 2: x = np.expand_dims(x, axis=-1) mask_array = self.resize(x, self.img_height, self.img_width) if mask_array.shape[-1] != 1: mask_array = np.mean(mask_array, axis=-1, keepdims=True) input_mask_array = np.array(mask_array, dtype=np.float32) / 255.0 if blur_radius is not None: input_mask_array = self.gaussian_blur(input_mask_array, radius=blur_radius, h_axis=0, v_axis=1) latent_mask_tensor = self.resize(input_mask_array, self.img_width // 8, self.img_height // 8) return np.expand_dims(input_mask_array, axis=0), np.expand_dims(latent_mask_tensor, axis=0) def rescale_noise_cfg(self, noise_cfg, noise_pred_text, guidance_rescale=0.0, epsilon=1e-05): """ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and Sample Steps are Flawed](https://arxiv.org/abs/2305.08891). See Section 3.4 """ std_text = np.std(noise_pred_text, axis=tuple(range(1, len(noise_pred_text.shape))), keepdims=True) std_cfg = np.std(noise_cfg, axis=tuple(range(1, len(noise_cfg.shape))), keepdims=True) + epsilon # rescale the results from guidance (fixes overexposure) noise_pred_rescaled = noise_cfg * (std_text / std_cfg) # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images noise_cfg = guidance_rescale * noise_pred_rescaled + (1.0 - guidance_rescale) * noise_cfg return noise_cfg def generate_image( self, encoded_text, add_text_embeds, negative_prompt=None, batch_size=1, num_steps=50, unconditional_guidance_scale=7.5, diffusion_noise=None, seed=None, inpaint_mask=None, mask_blur_strength=None, reference_image=None, reference_image_strength=0.8, callback=None, original_size=None, crops_coords_top_left=(0, 0), guidance_rescale=0.0, target_size=None): """Generates an image based on encoded text. The encoding passed to this method should be derived from `StableDiffusion.encode_text`. Args: encoded_text: Tensor of shape (`batch_size`, 77, 768), or a Tensor of shape (77, 768). When the batch axis is omitted, the same encoded text will be used to produce every generated image. batch_size: int, number of images to generate, defaults to 1. negative_prompt: a string containing information to negatively guide the image generation (e.g. by removing or altering certain aspects of the generated image), defaults to None. num_steps: int, number of diffusion steps (controls image quality), defaults to 50. unconditional_guidance_scale: float, controlling how closely the image should adhere to the prompt. Larger values result in more closely adhering to the prompt, but will make the image noisier. Defaults to 7.5. diffusion_noise: Tensor of shape (`batch_size`, img_height // 8, img_width // 8, 4), or a Tensor of shape (img_height // 8, img_width // 8, 4). Optional custom noise to seed the diffusion process. When the batch axis is omitted, the same noise will be used to seed diffusion for every generated image. seed: integer which is used to seed the random generation of diffusion noise, only to be specified if `diffusion_noise` is None. Example: ```python from stable_diffusion_xl.stable_diffusion_xl import StableDiffusionXL batch_size = 8 model = StableDiffusionXL(img_height=1024, img_width=1024, jit_compile=True) e_tacos = model.encode_text("Tacos at dawn") e_watermelons = model.encode_text("Watermelons at dusk") e_interpolated = tf.linspace(e_tacos, e_watermelons, batch_size) images = model.generate_image(e_interpolated, batch_size=batch_size) ``` """ if diffusion_noise is not None and seed is not None: raise ValueError( "`diffusion_noise` and `seed` should not both be passed to " "`generate_image`. `seed` is only used to generate diffusion " "noise when it's not already user-specified." ) context = self._expand_tensor(encoded_text, batch_size) if negative_prompt is None: negative_prompt = "" unconditional_context, unconditional_add_text_embeds = self.encode_text(negative_prompt) unconditional_context = self._expand_tensor(unconditional_context, batch_size) if diffusion_noise is not None: diffusion_noise = np.squeeze(diffusion_noise) if len(diffusion_noise.shape) == 3: diffusion_noise = np.repeat(np.expand_dims(diffusion_noise, axis=0), batch_size, axis=0) # Iterative reverse diffusion stage self.scheduler.set_timesteps(num_steps) timesteps = self.scheduler.timesteps[::-1] init_time = None init_latent = None input_image_array = None input_mask_array = None latent_mask_tensor = None if inpaint_mask is not None: input_mask_array, latent_mask_tensor = self.preprocessed_mask(inpaint_mask, mask_blur_strength) if input_mask_array is None or latent_mask_tensor is None: print("wrong inpaint mask:{}".format(inpaint_mask)) if reference_image is not None and (0. < reference_image_strength < 1.): input_image_array, input_image_tensor = self.preprocessed_image(reference_image) if input_image_tensor is not None: num_steps = int(num_steps * reference_image_strength + 0.5) init_time = timesteps[num_steps] init_latent = self.image_encoder.predict_on_batch(input_image_tensor) timesteps = timesteps[:num_steps] else: print("wrong reference image:{}".format(reference_image)) latent = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=init_time, seed=seed, noise=diffusion_noise) progbar = tf.keras.utils.Progbar(len(timesteps)) iteration = 0 if original_size is None: original_size = [self.img_height, self.img_width] if target_size is None: target_size = [self.img_height, self.img_width] add_time_ids = tf.expand_dims( tf.convert_to_tensor(list(list(original_size) + list(crops_coords_top_left) + list(target_size)), latent.dtype), axis=0) for index, timestep in list(enumerate(timesteps))[::-1]: latent_prev = latent # Set aside the previous latent vector time_emb = np.repeat(np.reshape(timestep, [1, -1]), batch_size, axis=0) if unconditional_guidance_scale > 0.0: unconditional_latent = self.diffusion_model.predict_on_batch( [latent, time_emb, unconditional_context, add_time_ids, tf.zeros_like(add_text_embeds)]) latent_text = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = unconditional_latent + unconditional_guidance_scale * ( latent_text - unconditional_latent) if guidance_rescale > 0.0: # Based on 3.4. in https://arxiv.org/abs/2305.08891 latent = self.rescale_noise_cfg(latent, latent_text, guidance_rescale=guidance_rescale) else: latent = self.diffusion_model.predict_on_batch( [latent, time_emb, context, add_time_ids, add_text_embeds]) latent = self.scheduler.step(latent, timestep, latent_prev) if latent_mask_tensor is not None and init_latent is not None: latent_orgin = self._get_initial_diffusion_latent(batch_size=batch_size, init_latent=init_latent, init_time=timestep, seed=seed, noise=diffusion_noise) latent = latent_orgin * (1. - latent_mask_tensor) + latent * latent_mask_tensor iteration += 1 if callback is not None: callback(iteration) progbar.update(iteration) # Decoding stage decoded = self.image_decoder.predict_on_batch(latent) decoded = np.array(((decoded + 1.) * 0.5), dtype=np.float32) if input_mask_array is not None and input_image_array is not None: decoded = input_image_array * (1. - input_mask_array) + decoded * input_mask_array return np.clip(decoded * 255., 0, 255).astype("uint8") def _expand_tensor(self, text_embedding, batch_size): """Extends a tensor by repeating it to fit the shape of the given batch size.""" text_embedding = np.squeeze(text_embedding) if len(text_embedding.shape) == 2: text_embedding = np.repeat( np.expand_dims(text_embedding, axis=0), batch_size, axis=0 ) return text_embedding @property def image_encoder(self): pass @property def text_encoder_openai(self): pass @property def text_encoder_laion(self): pass @property def text_encoder_laion_proj(self): pass @property def diffusion_model(self): pass @property def image_decoder(self): pass @property def tokenizer(self): """tokenizer returns the tokenizer used for text inputs. Can be overriden for tasks like textual inversion where the tokenizer needs to be modified. """ if self._tokenizer is None:
self._tokenizer = SimpleTokenizer()
0
2023-10-14 18:40:16+00:00
16k
spla-tam/SplaTAM
scripts/iphone_demo.py
[ { "identifier": "relative_transformation", "path": "datasets/gradslam_datasets/geometryutils.py", "snippet": "def relative_transformation(\n trans_01: torch.Tensor, trans_02: torch.Tensor, orthogonal_rotations: bool = False\n) -> torch.Tensor:\n r\"\"\"Function that computes the relative homogenou...
import argparse import os import shutil import sys import time import json import cv2 import matplotlib.pyplot as plt import numpy as np import torch import torch.nn.functional as F import cyclonedds.idl as idl import cyclonedds.idl.annotations as annotate import cyclonedds.idl.types as types from pathlib import Path from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets.geometryutils import relative_transformation from utils.common_utils import seed_everything, save_params_ckpt, save_params from utils.eval_helpers import report_progress from utils.keyframe_selection import keyframe_selection_overlap from utils.recon_helpers import setup_camera from utils.slam_external import build_rotation, prune_gaussians, densify from scripts.splatam import get_loss, initialize_optimizer, initialize_params, initialize_camera_pose, get_pointcloud, add_new_gaussians from diff_gaussian_rasterization import GaussianRasterizer as Renderer from dataclasses import dataclass from cyclonedds.domain import DomainParticipant, Domain from cyclonedds.core import Qos, Policy from cyclonedds.sub import DataReader from cyclonedds.topic import Topic from cyclonedds.util import duration
12,930
# Depth if avaiable save_depth = None if sample.has_depth: # Save Depth Image save_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) save_depth = (save_depth*65535/float(depth_scale)).astype(np.uint16) save_depth = cv2.resize(save_depth, dsize=( sample.width, sample.height), interpolation=cv2.INTER_NEAREST) cv2.imwrite(str(depth_dir.joinpath(f"{total_frames}.png")), save_depth) # Load Depth Image for SplaTAM curr_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) else: print("No Depth Image Received. Please make sure that the NeRFCapture App \ mentions Depth Supported on the top right corner. Skipping Frame...") continue # ARKit Poses for saving dataset X_WV = np.asarray(sample.transform_matrix, dtype=np.float32).reshape((4, 4)).T frame = { "transform_matrix": X_WV.tolist(), "file_path": f"rgb/{total_frames}.png", "fl_x": sample.fl_x, "fl_y": sample.fl_y, "cx": sample.cx, "cy": sample.cy, "w": sample.width, "h": sample.height } if save_depth is not None: frame["depth_path"] = f"depth/{total_frames}.png" manifest["frames"].append(frame) # Convert ARKit Pose to GradSLAM format gt_pose = torch.from_numpy(X_WV).float() gt_pose = P @ gt_pose @ P.T if time_idx == 0: first_abs_gt_pose = gt_pose gt_pose = relative_transformation(first_abs_gt_pose.unsqueeze(0), gt_pose.unsqueeze(0), orthogonal_rotations=False) gt_w2c = torch.linalg.inv(gt_pose[0]) gt_w2c_all_frames.append(gt_w2c) # Initialize Tracking & Mapping Resolution Data color = cv2.resize(image, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_LINEAR) depth = cv2.resize(curr_depth, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_NEAREST) depth = np.expand_dims(depth, -1) color = torch.from_numpy(color).cuda().float() color = color.permute(2, 0, 1) / 255 depth = torch.from_numpy(depth).cuda().float() depth = depth.permute(2, 0, 1) if time_idx == 0: intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() intrinsics = intrinsics / config['data']['downscale_factor'] intrinsics[2, 2] = 1.0 first_frame_w2c = torch.eye(4).cuda().float() cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Densification Resolution Data densify_color = cv2.resize(image, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_LINEAR) densify_depth = cv2.resize(curr_depth, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_NEAREST) densify_depth = np.expand_dims(densify_depth, -1) densify_color = torch.from_numpy(densify_color).cuda().float() densify_color = densify_color.permute(2, 0, 1) / 255 densify_depth = torch.from_numpy(densify_depth).cuda().float() densify_depth = densify_depth.permute(2, 0, 1) if time_idx == 0: densify_intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() densify_intrinsics = densify_intrinsics / config['data']['densify_downscale_factor'] densify_intrinsics[2, 2] = 1.0 densify_cam = setup_camera(densify_color.shape[2], densify_color.shape[1], densify_intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame
""" Script to stream RGB-D data from the NeRFCapture iOS App & build a Gaussian Splat on the fly using SplaTAM. The CycloneDDS parts of this script are adapted from the Instant-NGP Repo: https://github.com/NVlabs/instant-ngp/blob/master/scripts/nerfcapture2nerf.py """ #!/usr/bin/env python3 _BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) def parse_args(): parser = argparse.ArgumentParser() parser.add_argument("--config", default="./configs/iphone/online_demo.py", type=str, help="Path to config file.") return parser.parse_args() # DDS # ================================================================================================== @dataclass @annotate.final @annotate.autoid("sequential") class SplatCaptureFrame(idl.IdlStruct, typename="SplatCaptureData.SplatCaptureFrame"): id: types.uint32 annotate.key("id") timestamp: types.float64 fl_x: types.float32 fl_y: types.float32 cx: types.float32 cy: types.float32 transform_matrix: types.array[types.float32, 16] width: types.uint32 height: types.uint32 image: types.sequence[types.uint8] has_depth: bool depth_width: types.uint32 depth_height: types.uint32 depth_scale: types.float32 depth_image: types.sequence[types.uint8] dds_config = """<?xml version="1.0" encoding="UTF-8" ?> \ <CycloneDDS xmlns="https://cdds.io/config" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://cdds.io/config https://raw.githubusercontent.com/eclipse-cyclonedds/cyclonedds/master/etc/cyclonedds.xsd"> \ <Domain id="any"> \ <Internal> \ <MinimumSocketReceiveBufferSize>10MB</MinimumSocketReceiveBufferSize> \ </Internal> \ <Tracing> \ <Verbosity>config</Verbosity> \ <OutputFile>stdout</OutputFile> \ </Tracing> \ </Domain> \ </CycloneDDS> \ """ # ================================================================================================== def dataset_capture_loop(reader: DataReader, save_path: Path, overwrite: bool, n_frames: int, depth_scale: float, config: dict): rgb_path = save_path.joinpath("rgb") if rgb_path.exists(): if overwrite: # Prompt user to confirm deletion if (input(f"warning! folder '{save_path}' will be deleted/replaced. continue? (Y/n)").lower().strip()+"y")[:1] != "y": sys.exit(1) shutil.rmtree(save_path) else: print(f"rgb_path {rgb_path} already exists. Please use overwrite=True in config if you want to overwrite.") sys.exit(1) print("Waiting for frames...") # Make directory images_dir = save_path.joinpath("rgb") manifest = { "fl_x": 0.0, "fl_y": 0.0, "cx": 0.0, "cy": 0.0, "w": 0.0, "h": 0.0, "frames": [] } total_frames = 0 # Total frames received time_idx = total_frames num_frames = n_frames # Total frames desired # Initialize list to keep track of Keyframes keyframe_list = [] keyframe_time_indices = [] # Init Variables to keep track of ARkit poses and runtimes gt_w2c_all_frames = [] tracking_iter_time_sum = 0 tracking_iter_time_count = 0 mapping_iter_time_sum = 0 mapping_iter_time_count = 0 tracking_frame_time_sum = 0 tracking_frame_time_count = 0 mapping_frame_time_sum = 0 mapping_frame_time_count = 0 P = torch.tensor( [ [1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1] ] ).float() # Start DDS Loop while True: sample = reader.read_next() # Get frame from NeRFCapture if sample: print(f"{total_frames + 1}/{n_frames} frames received") if total_frames == 0: save_path.mkdir(parents=True, exist_ok=True) images_dir.mkdir(exist_ok=True) manifest["w"] = sample.width manifest["h"] = sample.height manifest["cx"] = sample.cx manifest["cy"] = sample.cy manifest["fl_x"] = sample.fl_x manifest["fl_y"] = sample.fl_y manifest["integer_depth_scale"] = float(depth_scale)/65535.0 if sample.has_depth: depth_dir = save_path.joinpath("depth") depth_dir.mkdir(exist_ok=True) # RGB image = np.asarray(sample.image, dtype=np.uint8).reshape((sample.height, sample.width, 3)) cv2.imwrite(str(images_dir.joinpath(f"{total_frames}.png")), cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) # Depth if avaiable save_depth = None if sample.has_depth: # Save Depth Image save_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) save_depth = (save_depth*65535/float(depth_scale)).astype(np.uint16) save_depth = cv2.resize(save_depth, dsize=( sample.width, sample.height), interpolation=cv2.INTER_NEAREST) cv2.imwrite(str(depth_dir.joinpath(f"{total_frames}.png")), save_depth) # Load Depth Image for SplaTAM curr_depth = np.asarray(sample.depth_image, dtype=np.uint8).view( dtype=np.float32).reshape((sample.depth_height, sample.depth_width)) else: print("No Depth Image Received. Please make sure that the NeRFCapture App \ mentions Depth Supported on the top right corner. Skipping Frame...") continue # ARKit Poses for saving dataset X_WV = np.asarray(sample.transform_matrix, dtype=np.float32).reshape((4, 4)).T frame = { "transform_matrix": X_WV.tolist(), "file_path": f"rgb/{total_frames}.png", "fl_x": sample.fl_x, "fl_y": sample.fl_y, "cx": sample.cx, "cy": sample.cy, "w": sample.width, "h": sample.height } if save_depth is not None: frame["depth_path"] = f"depth/{total_frames}.png" manifest["frames"].append(frame) # Convert ARKit Pose to GradSLAM format gt_pose = torch.from_numpy(X_WV).float() gt_pose = P @ gt_pose @ P.T if time_idx == 0: first_abs_gt_pose = gt_pose gt_pose = relative_transformation(first_abs_gt_pose.unsqueeze(0), gt_pose.unsqueeze(0), orthogonal_rotations=False) gt_w2c = torch.linalg.inv(gt_pose[0]) gt_w2c_all_frames.append(gt_w2c) # Initialize Tracking & Mapping Resolution Data color = cv2.resize(image, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_LINEAR) depth = cv2.resize(curr_depth, dsize=( config['data']['desired_image_width'], config['data']['desired_image_height']), interpolation=cv2.INTER_NEAREST) depth = np.expand_dims(depth, -1) color = torch.from_numpy(color).cuda().float() color = color.permute(2, 0, 1) / 255 depth = torch.from_numpy(depth).cuda().float() depth = depth.permute(2, 0, 1) if time_idx == 0: intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() intrinsics = intrinsics / config['data']['downscale_factor'] intrinsics[2, 2] = 1.0 first_frame_w2c = torch.eye(4).cuda().float() cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Densification Resolution Data densify_color = cv2.resize(image, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_LINEAR) densify_depth = cv2.resize(curr_depth, dsize=( config['data']['densification_image_width'], config['data']['densification_image_height']), interpolation=cv2.INTER_NEAREST) densify_depth = np.expand_dims(densify_depth, -1) densify_color = torch.from_numpy(densify_color).cuda().float() densify_color = densify_color.permute(2, 0, 1) / 255 densify_depth = torch.from_numpy(densify_depth).cuda().float() densify_depth = densify_depth.permute(2, 0, 1) if time_idx == 0: densify_intrinsics = torch.tensor([[sample.fl_x, 0, sample.cx], [0, sample.fl_y, sample.cy], [0, 0, 1]]).cuda().float() densify_intrinsics = densify_intrinsics / config['data']['densify_downscale_factor'] densify_intrinsics[2, 2] = 1.0 densify_cam = setup_camera(densify_color.shape[2], densify_color.shape[1], densify_intrinsics.cpu().numpy(), first_frame_w2c.cpu().numpy()) # Initialize Params for first time step if time_idx == 0: # Get Initial Point Cloud mask = (densify_depth > 0) # Mask out invalid depth values mask = mask.reshape(-1) init_pt_cld, mean3_sq_dist = get_pointcloud(densify_color, densify_depth, densify_intrinsics, first_frame_w2c, mask=mask, compute_mean_sq_dist=True, mean_sq_dist_method=config['mean_sq_dist_method']) params, variables = initialize_params(init_pt_cld, num_frames, mean3_sq_dist) variables['scene_radius'] = torch.max(densify_depth)/config['scene_radius_depth_ratio'] # Initialize Mapping & Tracking for current frame iter_time_idx = time_idx curr_gt_w2c = gt_w2c_all_frames curr_data = {'cam': cam, 'im': color, 'depth':depth, 'id': iter_time_idx, 'intrinsics': intrinsics, 'w2c': first_frame_w2c, 'iter_gt_w2c_list': curr_gt_w2c} tracking_curr_data = curr_data # Optimization Iterations num_iters_mapping = config['mapping']['num_iters'] # Initialize the camera pose for the current frame if time_idx > 0: params = initialize_camera_pose(params, time_idx, forward_prop=config['tracking']['forward_prop']) # Tracking tracking_start_time = time.time() if time_idx > 0 and not config['tracking']['use_gt_poses']: # Reset Optimizer & Learning Rates for tracking optimizer = initialize_optimizer(params, config['tracking']['lrs'], tracking=True) # Keep Track of Best Candidate Rotation & Translation candidate_cam_unnorm_rot = params['cam_unnorm_rots'][..., time_idx].detach().clone() candidate_cam_tran = params['cam_trans'][..., time_idx].detach().clone() current_min_loss = float(1e20) # Tracking Optimization iter = 0 do_continue_slam = False num_iters_tracking = config['tracking']['num_iters'] progress_bar = tqdm(range(num_iters_tracking), desc=f"Tracking Time Step: {time_idx}") while True: iter_start_time = time.time() # Loss for current frame
loss, variables, losses = get_loss(params, tracking_curr_data, variables, iter_time_idx, config['tracking']['loss_weights'],
10
2023-11-30 20:26:47+00:00
16k
zhyever/PatchFusion
ControlNet/ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ControlNet/ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from ControlNet.ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ControlNet.ldm.modules.ema import LitEma from ControlNet.ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ControlNet.ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ControlNet.ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ControlNet.ldm.models.diffusion.ddim import DDIMSampler
12,180
log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config):
model = instantiate_from_config(config)
7
2023-12-04 08:43:15+00:00
16k
baaivision/GeoDream
extern/ldm_zero123/models/diffusion/ddpm.py
[ { "identifier": "AutoencoderKL", "path": "extern/ldm_zero123/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(\n self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"i...
import itertools import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from contextlib import contextmanager, nullcontext from functools import partial from einops import rearrange, repeat from omegaconf import ListConfig from pytorch_lightning.utilities.rank_zero import rank_zero_only from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from extern.ldm_zero123.models.autoencoder import ( AutoencoderKL, IdentityFirstStage, VQModelInterface, ) from extern.ldm_zero123.models.diffusion.ddim import DDIMSampler from extern.ldm_zero123.modules.attention import CrossAttention from extern.ldm_zero123.modules.diffusionmodules.util import ( extract_into_tensor, make_beta_schedule, noise_like, ) from extern.ldm_zero123.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl, ) from extern.ldm_zero123.modules.ema import LitEma from extern.ldm_zero123.util import ( count_params, default, exists, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat, )
12,692
padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {"concat": "c_concat", "crossattn": "c_crossattn", "adm": "y"} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0.0, v_posterior=0.0, # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1.0, conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0.0, make_it_fit=False, ucg_training=None, ): super().__init__() assert parameterization in [ "eps", "x0", ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode" ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet ) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule( beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s, ) alphas = 1.0 - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1.0, alphas_cumprod[:-1]) (timesteps,) = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert ( alphas_cumprod.shape[0] == self.num_timesteps ), "alphas have to be defined for each timestep" to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer("betas", to_torch(betas)) self.register_buffer("alphas_cumprod", to_torch(alphas_cumprod)) self.register_buffer("alphas_cumprod_prev", to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", to_torch(np.sqrt(alphas_cumprod))) self.register_buffer( "sqrt_one_minus_alphas_cumprod", to_torch(np.sqrt(1.0 - alphas_cumprod)) ) self.register_buffer( "log_one_minus_alphas_cumprod", to_torch(np.log(1.0 - alphas_cumprod)) ) self.register_buffer( "sqrt_recip_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod)) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", to_torch(np.sqrt(1.0 / alphas_cumprod - 1)) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * ( 1.0 - alphas_cumprod_prev ) / (1.0 - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer("posterior_variance", to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", to_torch(np.log(np.maximum(posterior_variance, 1e-20))), ) self.register_buffer( "posterior_mean_coef1", to_torch(betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod)), ) self.register_buffer( "posterior_mean_coef2", to_torch( (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod) ), ) if self.parameterization == "eps": lvlb_weights = self.betas**2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod) ) elif self.parameterization == "x0": lvlb_weights = ( 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2.0 * 1 - torch.Tensor(alphas_cumprod)) ) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer("lvlb_weights", lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) if self.make_it_fit: n_params = len( [ name for name, _ in itertools.chain( self.named_parameters(), self.named_buffers() ) ] ) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params, ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[ i % old_shape[0], j % old_shape[1] ] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = ( self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False) ) print( f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys" ) if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor( self.log_one_minus_alphas_cumprod, t, x_start.shape ) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor( self.posterior_log_variance_clipped, t, x_t.shape ) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1.0, 1.0) model_mean, posterior_variance, posterior_log_variance = self.q_posterior( x_start=x_recon, x_t=x, t=t ) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance( x=x, t=t, clip_denoised=clip_denoised ) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm( reversed(range(0, self.num_timesteps)), desc="Sampling t", total=self.num_timesteps, ): img = self.p_sample( img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised, ) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop( (batch_size, channels, image_size, image_size), return_intermediates=return_intermediates, ) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise ) def get_loss(self, pred, target, mean=True): if self.loss_type == "l1": loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == "l2": if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction="none") else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError( f"Paramterization {self.parameterization} not yet supported" ) loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = "train" if self.training else "val" loss_dict.update({f"{log_prefix}/loss_simple": loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f"{log_prefix}/loss_vlb": loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f"{log_prefix}/loss": loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint( 0, self.num_timesteps, (x.shape[0],), device=self.device ).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, "b h w c -> b c h w") x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict( loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True ) self.log( "global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False, ) if self.use_scheduler: lr = self.optimizers().param_groups[0]["lr"] self.log( "lr_abs", lr, prog_bar=True, logger=True, on_step=True, on_epoch=False ) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + "_ema": loss_dict_ema[key] for key in loss_dict_ema} self.log_dict( loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) self.log_dict( loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True ) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), "1 -> b", b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample( batch_size=N, return_intermediates=True ) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__( self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, unet_trainable=True, *args, **kwargs, ): self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs["timesteps"] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = "concat" if concat_mode else "crossattn" if cond_stage_config == "__is_unconditional__": conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.unet_trainable = unet_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer("scale_factor", torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward # construct linear projection layer for concatenating image CLIP embedding and RT self.cc_projection = nn.Linear(772, 768) nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768]) nn.init.zeros_(list(self.cc_projection.parameters())[1]) self.cc_projection.requires_grad_(True) self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True def make_cond_schedule( self, ): self.cond_ids = torch.full( size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long, ) ids = torch.round( torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond) ).long() self.cond_ids[: self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if ( self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt ): assert ( self.scale_factor == 1.0 ), "rather not use custom rescaling and std-rescaling simultaneously" # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer("scale_factor", 1.0 / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule( self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, ): super().register_schedule( given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s ) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != "__is_first_stage__" assert config != "__is_unconditional__" model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list( self, samples, desc="", force_no_decoder_quantization=False ): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append( self.decode_first_stage( zd.to(self.device), force_not_quantize=force_no_decoder_quantization ) ) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, "n b c h w -> b n c h w") denoise_grid = rearrange(denoise_grid, "b n c h w -> (b n) c h w") denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError( f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented" ) return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, "encode") and callable( self.cond_stage_model.encode ): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min( torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1 )[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip( weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip( L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"], ) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold( self, x, kernel_size, stride, uf=1, df=1 ): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting( kernel_size[0], kernel_size[1], Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf), ) fold = torch.nn.Fold( output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h * uf, w * uf ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx) ) elif df > 1 and uf == 1: fold_params = dict( kernel_size=kernel_size, dilation=1, padding=0, stride=stride ) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict( kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df), ) fold = torch.nn.Fold( output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2 ) weighting = self.get_weighting( kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device ).to(x.dtype) normalization = fold(weighting).view( 1, 1, h // df, w // df ) # normalizes the overlap weighting = weighting.view( (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx) ) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input( self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, uncond=0.05, ): x = super().get_input(batch, k) T = batch["T"].to(memory_format=torch.contiguous_format).float() if bs is not None: x = x[:bs] T = T[:bs].to(self.device) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() cond_key = cond_key or self.cond_stage_key xc = super().get_input(batch, cond_key).to(self.device) if bs is not None: xc = xc[:bs] cond = {} # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%. random = torch.rand(x.size(0), device=x.device) prompt_mask = rearrange(random < 2 * uncond, "n -> n 1 1") input_mask = 1 - rearrange( (random >= uncond).float() * (random < 3 * uncond).float(), "n -> n 1 1 1" ) null_prompt = self.get_learned_conditioning([""]) # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768] # print('=========== xc shape ===========', xc.shape) with torch.enable_grad(): clip_emb = self.get_learned_conditioning(xc).detach() null_prompt = self.get_learned_conditioning([""]).detach() cond["c_crossattn"] = [ self.cc_projection( torch.cat( [ torch.where(prompt_mask, null_prompt, clip_emb), T[:, None, :], ], dim=-1, ) ) ] cond["c_concat"] = [ input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach() ] out = [z, cond] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out # @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, "b h w c -> b c h w").contiguous() z = 1.0 / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold( z, ks, stride, uf=uf ) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view( (z.shape[0], -1, ks[0], ks[1], z.shape[-1]) ) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
2
2023-12-01 01:59:42+00:00
16k
lucidrains/meshgpt-pytorch
meshgpt_pytorch/trainer.py
[ { "identifier": "custom_collate", "path": "meshgpt_pytorch/data.py", "snippet": "def custom_collate(data, pad_id = -1):\n is_dict = isinstance(first(data), dict)\n\n if is_dict:\n keys = first(data).keys()\n data = [d.values() for d in data]\n\n output = []\n\n for datum in zip...
from pathlib import Path from functools import partial from packaging import version from contextlib import nullcontext, contextmanager from torch import nn, Tensor from torch.nn import Module from torch.utils.data import Dataset, DataLoader from torch.optim.lr_scheduler import _LRScheduler from pytorch_custom_utils import ( get_adam_optimizer, OptimizerWithWarmupSchedule, add_wandb_tracker_contextmanager ) from accelerate import Accelerator from accelerate.utils import DistributedDataParallelKwargs from beartype import beartype from beartype.door import is_bearable from beartype.typing import Optional, Tuple, Type, List from ema_pytorch import EMA from meshgpt_pytorch.data import custom_collate from meshgpt_pytorch.version import __version__ from meshgpt_pytorch.meshgpt_pytorch import ( MeshAutoencoder, MeshTransformer ) import torch import torch.nn.functional as F
11,743
# constants DEFAULT_DDP_KWARGS = DistributedDataParallelKwargs( find_unused_parameters = True ) # helper functions def exists(v): return v is not None def default(v, d): return v if exists(v) else d def divisible_by(num, den): return (num % den) == 0 def cycle(dl): while True: for data in dl: yield data def maybe_del(d: dict, *keys): for key in keys: if key not in d: continue del d[key] # autoencoder trainer @add_wandb_tracker_contextmanager() class MeshAutoencoderTrainer(Module): @beartype def __init__( self, model: MeshAutoencoder, dataset: Dataset, num_train_steps: int, batch_size: int, grad_accum_every: int, val_dataset: Optional[Dataset] = None, val_every: int = 100, val_num_batches: int = 5, learning_rate: float = 1e-4, weight_decay: float = 0., max_grad_norm: Optional[float] = None, ema_kwargs: dict = dict(), scheduler: Optional[Type[_LRScheduler]] = None, scheduler_kwargs: dict = dict(), accelerator_kwargs: dict = dict(), optimizer_kwargs: dict = dict(), checkpoint_every = 1000, checkpoint_folder = './checkpoints', data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True,
# constants DEFAULT_DDP_KWARGS = DistributedDataParallelKwargs( find_unused_parameters = True ) # helper functions def exists(v): return v is not None def default(v, d): return v if exists(v) else d def divisible_by(num, den): return (num % den) == 0 def cycle(dl): while True: for data in dl: yield data def maybe_del(d: dict, *keys): for key in keys: if key not in d: continue del d[key] # autoencoder trainer @add_wandb_tracker_contextmanager() class MeshAutoencoderTrainer(Module): @beartype def __init__( self, model: MeshAutoencoder, dataset: Dataset, num_train_steps: int, batch_size: int, grad_accum_every: int, val_dataset: Optional[Dataset] = None, val_every: int = 100, val_num_batches: int = 5, learning_rate: float = 1e-4, weight_decay: float = 0., max_grad_norm: Optional[float] = None, ema_kwargs: dict = dict(), scheduler: Optional[Type[_LRScheduler]] = None, scheduler_kwargs: dict = dict(), accelerator_kwargs: dict = dict(), optimizer_kwargs: dict = dict(), checkpoint_every = 1000, checkpoint_folder = './checkpoints', data_kwargs: Tuple[str, ...] = ['vertices', 'faces', 'face_edges'], warmup_steps = 1000, use_wandb_tracking = False ): super().__init__() # experiment tracker self.use_wandb_tracking = use_wandb_tracking if use_wandb_tracking: accelerator_kwargs['log_with'] = 'wandb' if 'kwargs_handlers' not in accelerator_kwargs: accelerator_kwargs['kwargs_handlers'] = [DEFAULT_DDP_KWARGS] # accelerator self.accelerator = Accelerator(**accelerator_kwargs) self.model = model if self.is_main: self.ema_model = EMA(model, **ema_kwargs) self.optimizer = OptimizerWithWarmupSchedule( accelerator = self.accelerator, optimizer = get_adam_optimizer(model.parameters(), lr = learning_rate, wd = weight_decay, **optimizer_kwargs), scheduler = scheduler, scheduler_kwargs = scheduler_kwargs, warmup_steps = warmup_steps, max_grad_norm = max_grad_norm ) self.dataloader = DataLoader( dataset, batch_size = batch_size, shuffle = True, drop_last = True,
collate_fn = partial(custom_collate, pad_id = model.pad_id)
0
2023-11-29 14:58:15+00:00
16k
alvinliu0/HumanGaussian
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
13,769
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
self.mesh: Optional[Mesh] = None
6
2023-11-27 02:39:39+00:00
16k
EricGuo5513/momask-codes
train_res_transformer.py
[ { "identifier": "ResidualTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class ResidualTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1,\n num_heads=4, dropout=0.1, clip_dim=512...
import os import torch import numpy as np from torch.utils.data import DataLoader from os.path import join as pjoin from models.mask_transformer.transformer import ResidualTransformer from models.mask_transformer.transformer_trainer import ResidualTransformerTrainer from models.vq.model import RVQVAE from options.train_option import TrainT2MOptions from utils.plot_script import plot_3d_motion from utils.motion_process import recover_from_ric from utils.get_opt import get_opt from utils.fixseed import fixseed from utils.paramUtil import t2m_kinematic_chain, kit_kinematic_chain from data.t2m_dataset import Text2MotionDataset from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper
13,955
vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {opt.vq_name}') vq_model.to(opt.device) return vq_model, vq_opt if __name__ == '__main__': parser = TrainT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) opt.model_dir = pjoin(opt.save_root, 'model') # opt.meta_dir = pjoin(opt.save_root, 'meta') opt.eval_dir = pjoin(opt.save_root, 'animation') opt.log_dir = pjoin('./log/res/', opt.dataset_name, opt.name) os.makedirs(opt.model_dir, exist_ok=True) # os.makedirs(opt.meta_dir, exist_ok=True) os.makedirs(opt.eval_dir, exist_ok=True) os.makedirs(opt.log_dir, exist_ok=True) if opt.dataset_name == 't2m': opt.data_root = './dataset/HumanML3D' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 22 opt.max_motion_len = 55 dim_pose = 263 radius = 4 fps = 20 kinematic_chain = t2m_kinematic_chain dataset_opt_path = './checkpoints/t2m/Comp_v6_KLD005/opt.txt' elif opt.dataset_name == 'kit': #TODO opt.data_root = './dataset/KIT-ML' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 21 radius = 240 * 8 fps = 12.5 dim_pose = 251 opt.max_motion_len = 55 kinematic_chain = kit_kinematic_chain dataset_opt_path = './checkpoints/kit/Comp_v6_KLD005/opt.txt' else: raise KeyError('Dataset Does Not Exist') opt.text_dir = pjoin(opt.data_root, 'texts') vq_model, vq_opt = load_vq_model() clip_version = 'ViT-B/32' opt.num_tokens = vq_opt.nb_code opt.num_quantizers = vq_opt.num_quantizers # if opt.is_v2: res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=opt.latent_dim, ff_size=opt.ff_size, num_layers=opt.n_layers, num_heads=opt.n_heads, dropout=opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=opt.share_weight, clip_version=clip_version, opt=opt) # else: # res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, # cond_mode='text', # latent_dim=opt.latent_dim, # ff_size=opt.ff_size, # num_layers=opt.n_layers, # num_heads=opt.n_heads, # dropout=opt.dropout, # clip_dim=512, # shared_codebook=vq_opt.shared_codebook, # cond_drop_prob=opt.cond_drop_prob, # # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, # clip_version=clip_version, # opt=opt) all_params = 0 pc_transformer = sum(param.numel() for param in res_transformer.parameters_wo_clip()) print(res_transformer) # print("Total parameters of t2m_transformer net: {:.2f}M".format(pc_transformer / 1000_000)) all_params += pc_transformer print('Total parameters of all models: {:.2f}M'.format(all_params / 1000_000)) mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'std.npy')) train_split_file = pjoin(opt.data_root, 'train.txt') val_split_file = pjoin(opt.data_root, 'val.txt') train_dataset = Text2MotionDataset(opt, mean, std, train_split_file) val_dataset = Text2MotionDataset(opt, mean, std, val_split_file) train_loader = DataLoader(train_dataset, batch_size=opt.batch_size, num_workers=4, shuffle=True, drop_last=True) val_loader = DataLoader(val_dataset, batch_size=opt.batch_size, num_workers=4, shuffle=True, drop_last=True) eval_val_loader, _ = get_dataset_motion_loader(dataset_opt_path, 32, 'val', device=opt.device) wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda')) eval_wrapper = EvaluatorModelWrapper(wrapper_opt)
def plot_t2m(data, save_dir, captions, m_lengths): data = train_dataset.inv_transform(data) # print(ep_curves.shape) for i, (caption, joint_data) in enumerate(zip(captions, data)): joint_data = joint_data[:m_lengths[i]] joint = recover_from_ric(torch.from_numpy(joint_data).float(), opt.joints_num).numpy() save_path = pjoin(save_dir, '%02d.mp4'%i) # print(joint.shape) plot_3d_motion(save_path, kinematic_chain, joint, title=caption, fps=20) def load_vq_model(): opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_opt = get_opt(opt_path, opt.device) vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {opt.vq_name}') vq_model.to(opt.device) return vq_model, vq_opt if __name__ == '__main__': parser = TrainT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) opt.model_dir = pjoin(opt.save_root, 'model') # opt.meta_dir = pjoin(opt.save_root, 'meta') opt.eval_dir = pjoin(opt.save_root, 'animation') opt.log_dir = pjoin('./log/res/', opt.dataset_name, opt.name) os.makedirs(opt.model_dir, exist_ok=True) # os.makedirs(opt.meta_dir, exist_ok=True) os.makedirs(opt.eval_dir, exist_ok=True) os.makedirs(opt.log_dir, exist_ok=True) if opt.dataset_name == 't2m': opt.data_root = './dataset/HumanML3D' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 22 opt.max_motion_len = 55 dim_pose = 263 radius = 4 fps = 20 kinematic_chain = t2m_kinematic_chain dataset_opt_path = './checkpoints/t2m/Comp_v6_KLD005/opt.txt' elif opt.dataset_name == 'kit': #TODO opt.data_root = './dataset/KIT-ML' opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs') opt.joints_num = 21 radius = 240 * 8 fps = 12.5 dim_pose = 251 opt.max_motion_len = 55 kinematic_chain = kit_kinematic_chain dataset_opt_path = './checkpoints/kit/Comp_v6_KLD005/opt.txt' else: raise KeyError('Dataset Does Not Exist') opt.text_dir = pjoin(opt.data_root, 'texts') vq_model, vq_opt = load_vq_model() clip_version = 'ViT-B/32' opt.num_tokens = vq_opt.nb_code opt.num_quantizers = vq_opt.num_quantizers # if opt.is_v2: res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=opt.latent_dim, ff_size=opt.ff_size, num_layers=opt.n_layers, num_heads=opt.n_heads, dropout=opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=opt.share_weight, clip_version=clip_version, opt=opt) # else: # res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, # cond_mode='text', # latent_dim=opt.latent_dim, # ff_size=opt.ff_size, # num_layers=opt.n_layers, # num_heads=opt.n_heads, # dropout=opt.dropout, # clip_dim=512, # shared_codebook=vq_opt.shared_codebook, # cond_drop_prob=opt.cond_drop_prob, # # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, # clip_version=clip_version, # opt=opt) all_params = 0 pc_transformer = sum(param.numel() for param in res_transformer.parameters_wo_clip()) print(res_transformer) # print("Total parameters of t2m_transformer net: {:.2f}M".format(pc_transformer / 1000_000)) all_params += pc_transformer print('Total parameters of all models: {:.2f}M'.format(all_params / 1000_000)) mean = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'mean.npy')) std = np.load(pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'meta', 'std.npy')) train_split_file = pjoin(opt.data_root, 'train.txt') val_split_file = pjoin(opt.data_root, 'val.txt') train_dataset = Text2MotionDataset(opt, mean, std, train_split_file) val_dataset = Text2MotionDataset(opt, mean, std, val_split_file) train_loader = DataLoader(train_dataset, batch_size=opt.batch_size, num_workers=4, shuffle=True, drop_last=True) val_loader = DataLoader(val_dataset, batch_size=opt.batch_size, num_workers=4, shuffle=True, drop_last=True) eval_val_loader, _ = get_dataset_motion_loader(dataset_opt_path, 32, 'val', device=opt.device) wrapper_opt = get_opt(dataset_opt_path, torch.device('cuda')) eval_wrapper = EvaluatorModelWrapper(wrapper_opt)
trainer = ResidualTransformerTrainer(opt, res_transformer, vq_model)
1
2023-11-29 19:21:27+00:00
16k
dvlab-research/LLMGA
llmga/serve/cli-sdxl.py
[ { "identifier": "IMAGE_TOKEN_INDEX", "path": "llmga/llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "llmga/llava/constants.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN",...
import argparse import torch import requests import os import copy from llmga.llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN from llmga.llava.conversation import conv_templates, SeparatorStyle from llmga.llava.model.builder import load_pretrained_model from llmga.llava.utils import disable_torch_init from llmga.llava.mm_utils import tokenizer_image_token, get_model_name_from_path, KeywordsStoppingCriteria from PIL import Image from PIL import Image from io import BytesIO from llmga.diffusers.pipeline_stable_diffusion_xl_lpw import StableDiffusionXLPipeline
14,275
def load_image(image_file): if image_file.startswith('http') or image_file.startswith('https'): response = requests.get(image_file) image = Image.open(BytesIO(response.content)).convert('RGB') else: image = Image.open(image_file).convert('RGB') return image def main(args): # Model disable_torch_init() model_name = get_model_name_from_path(args.model_path) tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
def load_image(image_file): if image_file.startswith('http') or image_file.startswith('https'): response = requests.get(image_file) image = Image.open(BytesIO(response.content)).convert('RGB') else: image = Image.open(image_file).convert('RGB') return image def main(args): # Model disable_torch_init() model_name = get_model_name_from_path(args.model_path) tokenizer, model, image_processor, context_len = load_pretrained_model(args.model_path, args.model_base, model_name, args.load_8bit, args.load_4bit)
pipe = StableDiffusionXLPipeline.from_pretrained(
10
2023-11-27 18:46:55+00:00
16k
sherwinbahmani/4dfy
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Flo...
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio
13,512
nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: raise NotImplementedError def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-11-29 05:15:56+00:00
16k
rlawjdghek/StableVITON
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n ...
import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import itertools import torchvision.transforms as T import random import torch.nn.functional as F from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager, nullcontext from functools import partial from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from omegaconf import ListConfig from torchvision.transforms.functional import resize from diffusers.models.autoencoder_kl import AutoencoderKLOutput from diffusers.models.vae import DecoderOutput from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like, zero_module, conv_nd from ldm.models.diffusion.ddim import DDIMSampler
12,955
if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., make_it_fit=False, ucg_training=None, reset_ema=False, reset_num_ema_updates=False, l_cond_simple_weight=1.0, l_cond_recon_weight=1.0, **kwargs ): super().__init__() assert parameterization in ["eps", "x0", "v"], 'currently only supporting "eps" and "x0" and "v"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.unet_config = unet_config self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.imagenet_norm = T.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.l_cond_simple_weight = l_cond_simple_weight self.l_cond_recon_weight = l_cond_recon_weight if monitor is not None: self.monitor = monitor self.make_it_fit = make_it_fit if reset_ema: assert exists(ckpt_path) if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) if reset_ema: assert self.use_ema print(f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) else: self.register_buffer('logvar', logvar) self.ucg_training = ucg_training or dict() if self.ucg_training: self.ucg_prng = np.random.RandomState() def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) elif self.parameterization == "v": lvlb_weights = torch.ones_like(self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))) else: raise NotImplementedError("mu not supported") lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") @torch.no_grad() def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] if self.make_it_fit: n_params = len([name for name, _ in itertools.chain(self.named_parameters(), self.named_buffers())]) for name, param in tqdm( itertools.chain(self.named_parameters(), self.named_buffers()), desc="Fitting old weights to new weights", total=n_params ): if not name in sd: continue old_shape = sd[name].shape new_shape = param.shape assert len(old_shape) == len(new_shape) if len(new_shape) > 2: # we only modify first two axes assert new_shape[2:] == old_shape[2:] # assumes first axis corresponds to output dim if not new_shape == old_shape: new_param = param.clone() old_param = sd[name] if len(new_shape) == 1: for i in range(new_param.shape[0]): new_param[i] = old_param[i % old_shape[0]] elif len(new_shape) >= 2: for i in range(new_param.shape[0]): for j in range(new_param.shape[1]): new_param[i, j] = old_param[i % old_shape[0], j % old_shape[1]] n_used_old = torch.ones(old_shape[1]) for j in range(new_param.shape[1]): n_used_old[j % old_shape[1]] += 1 n_used_new = torch.zeros(new_shape[1]) for j in range(new_param.shape[1]): n_used_new[j] = n_used_old[j % old_shape[1]] n_used_new = n_used_new[None, :] while len(n_used_new.shape) < len(new_shape): n_used_new = n_used_new.unsqueeze(-1) new_param /= n_used_new sd[name] = new_param missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys:\n {missing}") if len(unexpected) > 0: print(f"\nUnexpected Keys:\n {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def predict_start_from_z_and_v(self, x_t, t, v): # self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) # self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * v ) def predict_eps_from_z_and_v(self, x_t, t, v): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x_t.shape) * v + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_t.shape) * x_t ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_v(self, x, noise, t): return ( extract_into_tensor(self.sqrt_alphas_cumprod, t, x.shape) * noise - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x.shape) * x ) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}_loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}_loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}_loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): self.batch = batch for k in self.ucg_training: p = self.ucg_training[k]["p"] val = self.ucg_training[k]["val"] if val is None: val = "" for i in range(len(batch[k])): if self.ucg_prng.choice(2, p=[1 - p, p]): batch[k][i] = val loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, force_null_conditioning=False, *args, **kwargs): self.kwargs = kwargs self.force_null_conditioning = force_null_conditioning self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std self.cond_stage_trainable = cond_stage_trainable assert self.num_timesteps_cond <= kwargs['timesteps'] if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning: conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) reset_ema = kwargs.pop("reset_ema", False) reset_num_ema_updates = kwargs.pop("reset_num_ema_updates", False) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None if self.kwargs["use_imageCLIP"]: self.proj_out = nn.Linear(1024, 768) else: self.proj_out = None if self.use_pbe_weight: print("learnable vector gene") self.learnable_vector = nn.Parameter(torch.randn((1,1,768)), requires_grad=True) else: self.learnable_vector = None if self.kwargs["use_lastzc"]: # deprecated self.lastzc = zero_module(conv_nd(2, 4, 4, 1, 1, 0)) else: self.lastzc = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True if reset_ema: assert self.use_ema print( f"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.") self.model_ema = LitEma(self.model) if reset_num_ema_updates: print(" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ ") assert self.use_ema self.model_ema.reset_num_updates() def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None else: model = instantiate_from_config(config) self.cond_stage_model = model else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior elif isinstance(encoder_posterior, AutoencoderKLOutput): z = encoder_posterior.latent_dist.sample() else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None, return_x=False, no_latent=False, is_controlnet=False): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) if no_latent: _,_,h,w = x.shape x = resize(x, (h//8, w//8)) return [x, None] encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if is_controlnet and self.lastzc is not None: z = self.lastzc(z) if self.model.conditioning_key is not None and not self.force_null_conditioning: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox', "txt"]: xc = batch[cond_key] elif cond_key in ['class_label', 'cls']: xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: if self.kwargs["use_imageCLIP"]: xc = resize(xc, (224,224)) xc = self.imagenet_norm((xc+1)/2) c = xc else: if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) c = c.float() else: if self.kwargs["use_imageCLIP"]: xc = resize(xc, (224,224)) xc = self.imagenet_norm((xc+1)/2) c = xc if bs is not None: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_x: out.extend([x]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z output = self.first_stage_model.decode(z) if not isinstance(output, DecoderOutput): return output else: return output.sample def decode_first_stage_train(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): return self.first_stage_model.encode(x) def shared_step(self, batch, **kwargs): x, c = self.get_input(batch, self.first_stage_key) loss = self(x, c) return loss def forward(self, x, c, *args, **kwargs): if not self.use_pbe_weight: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) # pbe negative condition else: t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() self.u_cond_prop=random.uniform(0, 1) c["c_crossattn"] = [self.get_learned_conditioning(c["c_crossattn"])] if self.u_cond_prop < self.u_cond_percent: c["c_crossattn"] = [self.learnable_vector.repeat(x.shape[0],1,1)] return self.p_losses(x, c, t, *args, **kwargs) def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): loss_dict = {} noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output, cond_output_dict = self.apply_model(x_noisy, t, cond) prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise elif self.parameterization == "v": target = self.get_v(x_start, noise, t) else: raise NotImplementedError() model_loss = None if isinstance(model_output, tuple): model_output, model_loss = model_output if self.only_agn_simple_loss: _, _, l_h, l_w = model_output.shape m_agn = F.interpolate(super().get_input(self.batch, "agn_mask"), (l_h, l_w)) loss_simple = self.get_loss(model_output * (1-m_agn), target * (1-m_agn), mean=False).mean([1, 2, 3]) else: loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'simple': loss_simple.mean()}) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() if self.original_elbo_weight != 0: loss_dict.update({f'loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) if model_loss is not None: loss += model_loss loss_dict.update({f"model loss" : model_loss}) loss_dict.update({f'{prefix}_loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out, cond_output_dict = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if isinstance(model_out, tuple): model_out, _ = model_out if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def get_unconditional_conditioning(self, batch_size, null_label=None): if null_label is not None: xc = null_label if isinstance(xc, ListConfig): xc = list(xc) if isinstance(xc, dict) or isinstance(xc, list): c = self.get_learned_conditioning(xc) else: if hasattr(xc, "to"): xc = xc.to(self.device) c = self.get_learned_conditioning(xc) else: if self.cond_stage_key in ["class_label", "cls"]: xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device) return self.get_learned_conditioning(xc) else: raise NotImplementedError("todo") if isinstance(c, list): # in case the encoder gives us a list for i in range(len(c)): c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device) else: c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device) return c @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None, use_ema_scope=True, **kwargs): ema_scope = self.ema_scope if use_ema_scope else nullcontext use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption", "txt"]: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25) log["conditioning"] = xc elif self.cond_stage_key in ['class_label', "cls"]: try: xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"], size=x.shape[2] // 25) log['conditioning'] = xc except KeyError: # probably no "human_label" in batch pass elif isimage(xc): log["conditioning"] = xc
if ismap(xc):
3
2023-12-02 05:56:58+00:00
16k
AIFSH/NativeSpeaker
src/core.py
[ { "identifier": "HandleLog", "path": "src/log_helper.py", "snippet": "class HandleLog:\n \"\"\"\n 先创建日志记录器(logging.getLogger),然后再设置日志级别(logger.setLevel),\n 接着再创建日志文件,也就是日志保存的地方(logging.FileHandler),然后再设置日志格式(logging.Formatter),\n 最后再将日志处理程序记录到记录器(addHandler)\n \"\"\"\n\n def __init__(s...
import os import torch import soundfile as sf import gc; gc.collect(); torch.cuda.empty_cache(); del cloner import gc; gc.collect(); torch.cuda.empty_cache(); del diarize_model import gc; gc.collect(); torch.cuda.empty_cache(); del whisper from typing import Any from tqdm import tqdm from src.log_helper import HandleLog from moviepy.editor import VideoFileClip,concatenate_videoclips from pathlib import Path from pydub import AudioSegment from src.audio_bgm_split import AudioProcess from src.voice_clone import VoiceCloner from src.temp_manager import TempFileManager from src.translator import Translator from src.lipsync import LipSync from src.upscale import Upscale from src.nfsw import analyse_video from src.third_part.whisperx import load_model,load_audio,DiarizationPipeline
10,879
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice")
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com' logger = HandleLog() class Core: def __init__(self, args) -> None: cur_path = os.path.dirname(os.path.realpath(__file__)) # current path self.weights_path = os.path.join(os.path.dirname(cur_path), 'weights') # weights_path to save model if not os.path.exists(self.weights_path): os.mkdir(self.weights_path) # self.input_file = args.input_file_path self.output_file = args.output_file_path self.lang_code = args.lang_code self.device = "cuda" if torch.cuda.is_available() else "cpu" self.hf_token = args.hf_token self.temp_manager = TempFileManager() self.translotor = Translator() self.model_name = args.model_name self.xt_version_name = args.xt_version_name if analyse_video(args.input_file_path): raise("sorry! nativespeaker is not for you") def __call__(self, *args: Any, **kwds: Any) -> Any: logger.critical("[Step 1] Moviepy split voice and frames from video") org_voice_path = os.path.join(Path(self.input_file).parent, "org_voice.wav") org_video_clip = VideoFileClip(self.input_file) org_video_clip.audio.write_audiofile(org_voice_path,codec='pcm_s16le') logger.info("save original voice in {}".format(org_voice_path)) logger.critical("[Step 2] H5 Split vocal and bgm from voice")
audio_process = AudioProcess(15)
1
2023-12-01 12:23:19+00:00
16k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while T...
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
14,389
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact george.drettakis@inria.fr # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id
R = np.transpose(qvec2rotmat(extr.qvec))
2
2023-11-29 07:10:39+00:00
16k
emdgroup/baybe
examples/Serialization/basic_serialization.py
[ { "identifier": "Campaign", "path": "baybe/campaign.py", "snippet": "class Campaign(SerialMixin):\n \"\"\"Main class for interaction with BayBE.\n\n Campaigns define and record an experimentation process, i.e. the execution of a\n series of measurements and the iterative sequence of events invo...
import numpy as np from baybe import Campaign from baybe.objective import Objective from baybe.parameters import ( CategoricalParameter, NumericalDiscreteParameter, SubstanceParameter, ) from baybe.recommenders import FPSRecommender, SequentialGreedyRecommender from baybe.searchspace import SearchSpace from baybe.strategies import TwoPhaseStrategy from baybe.targets import NumericalTarget
11,824
### Example for the serialization of a campaign # This example shows how to serialize and also de-serialize a campaign. # It demonstrates and shows that the "original" and "new" objects behave the same. # This example assumes some basic familiarity with using BayBE. # We thus refer to [`campaign`](./../Basics/campaign.md) for a basic example. #### Necessary imports #### Experiment setup parameters = [ CategoricalParameter( name="Granularity", values=["coarse", "medium", "fine"], encoding="OHE", ), NumericalDiscreteParameter( name="Pressure[bar]", values=[1, 5, 10], tolerance=0.2, ), NumericalDiscreteParameter( name="Temperature[degree_C]", values=np.linspace(100, 200, 10), ), SubstanceParameter( name="Solvent", data={ "Solvent A": "COC", "Solvent B": "CCC", "Solvent C": "O", "Solvent D": "CS(=O)C", }, encoding="MORDRED", ), ] #### Creating the campaign campaign = Campaign( searchspace=SearchSpace.from_product(parameters=parameters, constraints=None), objective=Objective( mode="SINGLE", targets=[NumericalTarget(name="Yield", mode="MAX")] ), strategy=TwoPhaseStrategy(
### Example for the serialization of a campaign # This example shows how to serialize and also de-serialize a campaign. # It demonstrates and shows that the "original" and "new" objects behave the same. # This example assumes some basic familiarity with using BayBE. # We thus refer to [`campaign`](./../Basics/campaign.md) for a basic example. #### Necessary imports #### Experiment setup parameters = [ CategoricalParameter( name="Granularity", values=["coarse", "medium", "fine"], encoding="OHE", ), NumericalDiscreteParameter( name="Pressure[bar]", values=[1, 5, 10], tolerance=0.2, ), NumericalDiscreteParameter( name="Temperature[degree_C]", values=np.linspace(100, 200, 10), ), SubstanceParameter( name="Solvent", data={ "Solvent A": "COC", "Solvent B": "CCC", "Solvent C": "O", "Solvent D": "CS(=O)C", }, encoding="MORDRED", ), ] #### Creating the campaign campaign = Campaign( searchspace=SearchSpace.from_product(parameters=parameters, constraints=None), objective=Objective( mode="SINGLE", targets=[NumericalTarget(name="Yield", mode="MAX")] ), strategy=TwoPhaseStrategy(
recommender=SequentialGreedyRecommender(),
5
2023-11-27 17:02:40+00:00
16k
UX-Decoder/LLaVA-Grounding
llava/model/language_model/llava_llama_gd.py
[ { "identifier": "LlavaMetaModel", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config...
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM, LlavaMetaForCausalLM_gd,LlavaMetaForCausalLM_gd_interactive import torch import torch.nn as nn import transformers
12,209
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config)
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config)
class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM):
1
2023-12-04 10:59:21+00:00
16k