repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
nepali-ner | nepali-ner-master/utils/dataloader.py | #!/usr/bin/env python3
'''
NER Dataloader
Author: Oyesh Mann Singh
Date: 10/14/2019
Data format:
<WORD> <NER-tag>
'''
import os
import numpy as np
import pickle
import torch
from torchtext import data
from torchtext import vocab
from torchtext.datasets import SequenceTaggingDataset
from uniseg.graphemecluster import grapheme_clusters
class Dataloader():
def __init__(self, config, k):
self.root_path = os.path.join(config.root_path, k)
self.batch_size = config.batch_size
self.device = config.device
self.use_pos = config.use_pos
self.txt_field = data.Field(tokenize=list, use_vocab=True, unk_token='<unk>', batch_first=True)
self.label_field = data.Field(unk_token=None, batch_first=True)
self.char_field = data.Field(unk_token='<unk>', sequential=False)
self.graph_field = data.Field(unk_token='<unk>', sequential=False)
self.fields = (('TEXT', self.txt_field), ('LABEL', self.label_field))
if config.use_pos:
self.pos_field = data.Field(unk_token=None, batch_first=True)
self.fields = (('TEXT', self.txt_field), ('POS', self.pos_field), ('LABEL', self.label_field))
self.train_ds, self.val_ds, self.test_ds = SequenceTaggingDataset.splits(path=self.root_path,
fields=self.fields, separator='\t',
train='train.txt',
validation='val.txt',
test='test.txt')
self.char_list = []
self.graph_list = []
for each in self.train_ds.examples + self.test_ds.examples + self.val_ds.examples:
for x in each.TEXT:
self.char_list += list(x)
self.graph_list += list(grapheme_clusters(x))
self.char_list = list(set(self.char_list))
self.graph_list = list(set(self.graph_list))
self.graph_list.sort()
self.char_list.sort()
self.char_field.build_vocab(self.char_list)
self.graph_field.build_vocab(self.graph_list)
self.embedding_dir = config.emb_dir
self.vec = vocab.Vectors(name=config.emb_file, cache=self.embedding_dir)
self.txt_field.build_vocab(self.train_ds, self.test_ds, self.val_ds, max_size=None, vectors=self.vec)
self.label_field.build_vocab(self.train_ds.LABEL, self.test_ds.LABEL, self.val_ds.LABEL)
pickle.dump(self.txt_field, open(config.vocab_file, 'wb'))
pickle.dump(self.label_field, open(config.label_file, 'wb'))
if config.char_pretrained:
self.char_vec = vocab.Vectors(name=config.char_emb_file, cache=self.embedding_dir)
self.graph_vec = vocab.Vectors(name=config.graph_emb_file, cache=self.embedding_dir)
self.char_field.build_vocab(self.char_list, vectors=self.char_vec)
self.graph_field.build_vocab(self.graph_list, vectors=self.graph_vec)
else:
self.char_field.build_vocab(self.char_list)
self.graph_field.build_vocab(self.graph_list)
self.vocab_size = len(self.txt_field.vocab)
self.tagset_size = len(self.label_field.vocab)
self.char_vocab_size = len(self.char_field.vocab)
self.graph_vocab_size = len(self.graph_field.vocab)
self.weights = self.txt_field.vocab.vectors
self.char_weights = self.char_field.vocab.vectors
self.graph_weights = self.graph_field.vocab.vectors
if config.use_pos:
self.pos_field.build_vocab(self.train_ds.POS, self.test_ds.POS, self.val_ds.POS)
# Because len(pos) = 56 and len(pos_field.vocab) = 55
self.pos_size = len(self.pos_field.vocab) + 2
self.pos_one_hot = np.eye(self.pos_size)
self.one_hot_weight = torch.from_numpy(self.pos_one_hot).float()
if config.verbose:
self.print_stat()
def tokenizer(self, x):
return x.split()
def train_ds(self):
return self.train_ds
def val_ds(self):
return self.val_ds
def test_ds(self):
return self.test_ds
def txt_field(self):
return self.txt_field
def label_field(self):
return self.label_field
def vocab_size(self):
return self.vocab_size
def tagset_size(self):
return self.tagset_size
def pos_size(self):
return self.pos_size
def weights(self):
return self.weights
def char_weights(self):
return self.char_weights
def graph_weights(self):
return self.graph_weights
def get_char_vocab_size(self):
return self.char_vocab_size
def get_chars(self):
return self.char_list
def get_graph_vocab_size(self):
return self.graph_vocab_size
def get_graph(self):
return self.graph_list
def print_stat(self):
print('Length of text vocab (unique words in dataset) = ', self.vocab_size)
print('Length of label vocab (unique tags in labels) = ', self.tagset_size)
if self.use_pos:
print('Length of POS vocab (unique tags in POS) = ', self.pos_size)
print('Length of char vocab (unique characters in dataset) = ', self.char_vocab_size)
print('Length of grapheme vocab (unique graphemes in dataset) = ', self.graph_vocab_size)
def load_data(self, batch_size, shuffle=True):
train_iter, val_iter, test_iter = data.BucketIterator.splits(
datasets=(self.train_ds, self.val_ds, self.test_ds),
batch_sizes=(batch_size, batch_size, batch_size),
sort_key=lambda x: len(x.TEXT),
device=self.device,
sort_within_batch=True,
repeat=False,
shuffle=True)
return train_iter, val_iter, test_iter
| 6,000 | 34.720238 | 116 | py |
nepali-ner | nepali-ner-master/utils/eval.py | '''
Writes result into the file
Author: Oyesh Mann Singh
'''
import os
import torch
from tqdm import tqdm
import utils.conlleval_perl as e
tqdm.pandas(desc='Progress')
class Evaluator:
def __init__(self, config, logger, model, dataloader, model_name):
self.config = config
self.logger = logger
self.model = model
self.model_name = model_name
self.dataloader = dataloader
self.use_pos = config.use_pos
self.train_dl, self.val_dl, self.test_dl = dataloader.load_data(batch_size=1, shuffle=False)
self.results_dir = config.results_dir
tr_file = self.model_name + '_train.txt'
ts_file = self.model_name + '_test.txt'
vl_file = self.model_name + '_val.txt'
self.train_file = os.path.join(self.results_dir, tr_file)
self.test_file = os.path.join(self.results_dir, ts_file)
self.val_file = os.path.join(self.results_dir, vl_file)
self.raw = config.raw
self.delimiter = config.delimiter
self.oTag = config.oTag
self.latex = config.latex
def numpy_to_sent(self, tensor):
'''
Returns the corresponding TEXT of given Predictions
Returns chunks of string
'''
return ' '.join([self.dataloader.txt_field.vocab.itos[i] for i in tensor.cpu().data.numpy()[0]]).split()
def pred_to_tag(self, predictions):
'''
Returns the corresponding TAGS of given Predictions
Returns chunks of string
'''
return ' '.join([self.dataloader.label_field.vocab.itos[i] for i in predictions]).split()
def write_results(self):
with open(self.train_file, 'w', encoding='utf-8') as rtrn:
self.logger.info('Writing in file: {0}'.format(self.train_file))
tr = tqdm(iter(self.train_dl), leave=False)
for (k, v) in tr:
if self.use_pos:
(X, p, y) = k
pred = self.model(X, p)
else:
(X, y) = k
pred = self.model(X, None)
sent = self.numpy_to_sent(X)
pred_idx = torch.max(pred, 1)[1]
y = y.view(-1)
y_true_val = y.cpu().data.numpy().tolist()
true_tag = self.pred_to_tag(y_true_val)
y_pred_val = pred_idx.cpu().data.numpy().tolist()
pred_tag = self.pred_to_tag(y_pred_val)
for s, gt, pt in zip(sent, true_tag, pred_tag):
rtrn.write(s + ' ' + gt + ' ' + pt + '\n')
rtrn.write('\n')
rtrn.close()
with open(self.test_file, 'w', encoding='utf-8') as rtst:
self.logger.info('Writing in file: {0}'.format(self.test_file))
tt = tqdm(iter(self.test_dl), leave=False)
for (k, v) in tt:
if self.use_pos:
(X, p, y) = k
pred = self.model(X, p)
else:
(X, y) = k
pred = self.model(X, None)
sent = self.numpy_to_sent(X)
pred_idx = torch.max(pred, 1)[1]
y = y.view(-1)
y_true_val = y.cpu().data.numpy().tolist()
true_tag = self.pred_to_tag(y_true_val)
y_pred_val = pred_idx.cpu().data.numpy().tolist()
pred_tag = self.pred_to_tag(y_pred_val)
for s, gt, pt in zip(sent, true_tag, pred_tag):
rtst.write(s + ' ' + gt + ' ' + pt + '\n')
rtst.write('\n')
rtst.close()
with open(self.val_file, 'w', encoding='utf-8') as rval:
self.logger.info('Writing in file: {0}'.format(self.val_file))
vl = tqdm(iter(self.val_dl), leave=False)
for (k, v) in vl:
if self.use_pos:
(X, p, y) = k
pred = self.model(X, p)
else:
(X, y) = k
pred = self.model(X, None)
sent = self.numpy_to_sent(X)
pred_idx = torch.max(pred, 1)[1]
y = y.view(-1)
y_true_val = y.cpu().data.numpy().tolist()
true_tag = self.pred_to_tag(y_true_val)
y_pred_val = pred_idx.cpu().data.numpy().tolist()
pred_tag = self.pred_to_tag(y_pred_val)
for s, gt, pt in zip(sent, true_tag, pred_tag):
rval.write(s + ' ' + gt + ' ' + pt + '\n')
rval.write('\n')
rval.close()
def conll_eval(self):
"""
Prints CoNLL Evaluation Report
"""
acc, prec, rec, f1 = e.evaluate_conll_file(logger=self.logger,
fileName=self.test_file,
raw=self.raw,
delimiter=self.delimiter,
oTag=self.oTag,
latex=self.latex)
return acc, prec, rec, f1
def infer(self, sent):
"""
Prints the result
"""
# Tokenize the sentence and aspect terms
# print(sent.split())
sent_tok = self.dataloader.tokenizer(sent)
print(sent_tok)
# Get index from vocab
X = [self.dataloader.txt_field.vocab.stoi[t] for t in sent_tok]
# Convert into torch and reshape into [batch, sent_length]
X = torch.LongTensor(X).to(self.config.device)
X = X.unsqueeze(0)
# Get predictions
pred = self.model(X, None)
pred_idx = torch.max(pred, 1)[1]
y_pred_val = pred_idx.cpu().data.numpy().tolist()
pred_tag = self.pred_to_tag(y_pred_val)
return pred_tag | 5,892 | 34.5 | 112 | py |
frozen-in-time | frozen-in-time-main/test.py | import argparse
import pandas as pd
import torch
import transformers
from sacred import Experiment
from tqdm import tqdm
import glob
import data_loader.data_loader as module_data
import model.metric as module_metric
import model.model as module_arch
from model.model import compute_similarity
from parse_config import ConfigParser
from trainer.trainer import verbose
from utils.util import state_dict_data_parallel_fix
import numpy as np
import os
import copy
ex = Experiment('test')
@ex.main
def run():
# setup data_loader instances
config._config['data_loader']['args']['split'] = args.split
config._config['data_loader']['args']['tsfm_split'] = 'test' # set transform to test split to remove augmentations
config._config['data_loader']['args']['shuffle'] = False
config._config['data_loader']['args']['batch_size'] = args.batch_size
config._config['data_loader']['args']['sliding_window_stride'] = args.sliding_window_stride
# config._config['data_loader']['args']['video_params']['num_frames'] = 120
data_loader = config.initialize('data_loader', module_data)
n_samples = len(data_loader.dataset)
text_model_name = config['arch']['args']['text_params']['model']
if "openai/clip" in text_model_name:
tokenizer_builder = transformers.CLIPTokenizer
else:
tokenizer_builder = transformers.AutoTokenizer
tokenizer = tokenizer_builder.from_pretrained(
text_model_name,
model_max_length=config['arch']['args']['text_params'].get('max_length', 1e6),
TOKENIZERS_PARALLELISM=False)
# build model architecture
model = config.initialize('arch', module_arch)
# get function handles of loss and metrics
metric_fns = [getattr(module_metric, met) for met in config['metrics']]
# logger.info('Loading checkpoint: {} ...'.format(config.resume))
if config.resume is not None:
checkpoint = torch.load(config.resume)
state_dict = checkpoint['state_dict']
new_state_dict = state_dict_data_parallel_fix(state_dict, model.state_dict())
model.load_state_dict(new_state_dict, strict=True)
else:
print('Using random weights')
if config['n_gpu'] > 1:
model = torch.nn.DataParallel(model)
# prepare model for testing
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
model.eval()
ctr = 0
save_part = None
if args.save_feats:
part_seq = [int(x.split('_')[-1].split('.')[0]) for x in
glob.glob(os.path.join(args.save_feats, "ids_test_*.csv"))]
if len(part_seq) > 0:
save_part = max() + 1
else:
save_part = 0
print(F"##### WARNING SAVE_PART STARTING AT {save_part}, MAKE SURE THIS IS THE NEWEST")
meta_arr = []
text_embed_arr = []
vid_embed_arr = []
text_mask_arr = []
vid_mask_arr = []
print(len(data_loader))
with torch.no_grad():
for i, data_og in tqdm(tqdm(enumerate(data_loader))):
# leave this for now since not doing anything on the gpu
data = copy.deepcopy(data_og)
del data_og
if tokenizer is not None:
if args.vis_token_similarity:
data['meta']['tokenized_captions'] = [tokenizer.tokenize(x) for x in data['text']]
data['text'] = tokenizer(data['text'], return_tensors='pt', padding=True, truncation=True)
data['text'] = {key: val.cuda() for key, val in data['text'].items()}
if isinstance(data['video'], list):
data['video'] = [x.to(device) for x in data['video']]
else:
data['video'] = data['video'].to(device)
text_embeds, vid_embeds = model(data)
text_embed_arr.append(text_embeds.cpu().detach())
vid_embed_arr.append(vid_embeds.cpu().detach())
# meta stuff
meta_arr.append(data['meta'])
ctr += len(data['video'])
# save every 1mil samples to avoid OOM
if args.save_feats is not None and ctr > 1e4:
ctr = 0
text_embeds = torch.cat(text_embed_arr)
vid_embeds = torch.cat(vid_embed_arr)
meta_arr_cat = {key: [] for key in meta_arr[0].keys()}
for meta in meta_arr:
for key, val in meta.items():
meta_arr_cat[key].append(val)
meta_arr = meta_arr_cat
for key, val in meta_arr.items():
if isinstance(val[0], list):
val = [item for sublist in val for item in sublist]
meta_arr[key] = val
elif isinstance(val[0], torch.Tensor):
meta_arr[key] = torch.cat(val)
else:
raise NotImplementedError
save_feats(vid_embeds, text_embeds, meta_arr, args.save_feats, args.save_type,
data_loader.dataset.split, save_part=save_part)
text_embed_arr = []
vid_embed_arr = []
meta_arr = []
save_part += 1
vid_embeds = torch.cat(vid_embed_arr)
meta_arr_cat = {key: [] for key in meta_arr[0].keys()}
for meta in meta_arr:
for key, val in meta.items():
meta_arr_cat[key].append(val)
meta_arr = meta_arr_cat
for key, val in meta_arr.items():
if isinstance(val[0], list):
val = [item for sublist in val for item in sublist]
meta_arr[key] = val
elif isinstance(val[0], torch.Tensor):
meta_arr[key] = torch.cat(val)
else:
raise NotImplementedError
text_embeds = torch.cat(text_embed_arr)
mask = None
if data_loader.dataset.sliding_window_stride != -1:
cpu_vid_embeds = vid_embeds
cpu_text_embeds = text_embeds
li_vid_embeds = [x for x in cpu_vid_embeds]
li_txt_embeds = [x for x in cpu_text_embeds]
videoids = pd.Series(meta_arr['paths'])
raw_caps = pd.Series(meta_arr['raw_captions'])
vid_df = pd.DataFrame({'videoid': videoids, 'vid_embed': li_vid_embeds, 'txt_embed': li_txt_embeds, 'captions': raw_caps})
new_vid_embeds = []
new_txt_embeds = []
for vid in vid_df['videoid'].unique():
tdf = vid_df[vid_df['videoid'] == vid]
tvembeds = torch.stack(tdf['vid_embed'].values.tolist())
tvembeds = tvembeds.mean(dim=0)
new_vid_embeds.append(tvembeds)
for cap in tdf['captions'].unique():
cdf = vid_df[vid_df['captions'] == cap]
ttembeds = torch.stack(cdf['txt_embed'].values.tolist())
new_txt_embeds.append(ttembeds[0])
vid_embeds = torch.stack(new_vid_embeds)
text_embeds = torch.stack(new_txt_embeds)
if args.split != 'train': # because train is usually too big
chunk = True
if not chunk:
sims, _ = compute_similarity(text_embeds, vid_embeds, text_masks)
else:
chunk_size = 100
sim_row_arr = []
for tdx in range(0, len(text_embeds), chunk_size):
print(tdx, ' / ', len(text_embeds), ' ...')
t_embed = text_embeds[tdx:tdx + chunk_size]
sim_row = []
for vdx in range(0, len(vid_embeds), chunk_size):
v_embed = vid_embeds[vdx:vdx + chunk_size]
sim_chunk, _ = compute_similarity(t_embed, v_embed)
sim_row.append(sim_chunk)
sim_row = torch.cat(sim_row, dim=1)
sim_row_arr.append(sim_row)
sims = torch.cat(sim_row_arr, dim=0)
sims = sims.numpy()
# if not args.vis_token_similarity:
nested_metrics = {}
for metric in metric_fns:
metric_name = metric.__name__
res = metric(sims, query_masks=mask)
verbose(epoch=0, metrics=res, name="", mode=metric_name)
nested_metrics[metric_name] = res
# else:
# visualise_text_video_sim(sims, mask, meta_arr, num_vis=10)
# if config.config['visualizer']:
# raise NotImplementedError
if args.save_feats is not None:
if save_part == 0:
save_part = None
save_feats(vid_embeds, text_embeds, meta_arr, args.save_feats, args.save_type, data_loader.dataset.split,
save_part=save_part)
# meta_arr['frame_id'] = meta_arr['frame_id'].numpy()
if save_part is None:
fn = f'meta_arr.npy'
else:
fn = f'meta_arr_{save_part}.npy'
np.save(os.path.join(args.save_feats, fn), meta_arr)
def save_feats(vid_embeds, text_embeds, meta_arr, save_feats, save_type, split, save_part=None):
vid_embeds = vid_embeds.cpu().detach().numpy()
text_embeds = text_embeds.cpu().detach().numpy()
if save_part is not None:
vid_fn = f'vid_embeds_{split}_{save_part}.npy'
txt_fn = f'txt_embeds_{split}_{save_part}.npy'
csv_fn = f'ids_{split}_{save_part}.csv'
else:
vid_fn = f'vid_embeds_{split}.npy'
txt_fn = f'txt_embeds_{split}.npy'
csv_fn = f'ids_{split}.csv'
vid_embeds_save_fp = os.path.join(save_feats, vid_fn)
txt_embeds_save_fp = os.path.join(save_feats, txt_fn)
if save_type in ['video', 'both']:
np.save(vid_embeds_save_fp, vid_embeds)
if save_type in ['text', 'both']:
np.save(txt_embeds_save_fp, text_embeds)
videoids = pd.Series(meta_arr['paths'])
# frame_ids = pd.Series(meta_arr['frame_id'].numpy())
meta_df = pd.DataFrame({'0': videoids})
meta_df.to_csv(os.path.join(save_feats, csv_fn), index=False)
if len(videoids) != len(vid_embeds):
import pdb;
pdb.set_trace()
if __name__ == '__main__':
args = argparse.ArgumentParser(description='PyTorch Template')
args.add_argument('-r', '--resume', default=None, type=str,
help='path to latest checkpoint (default: None)')
args.add_argument('-d', '--device', default=None, type=str,
help='indices of GPUs to enable (default: all)')
args.add_argument('-c', '--config', default=None, type=str,
help='config file path (default: None)')
args.add_argument('-s', '--sliding_window_stride', default=-1, type=int,
help='test time temporal augmentation, repeat samples with different start times.')
args.add_argument('--save_feats', default=None,
help='path to store text & video feats, this is for saving embeddings if you want to do offline retrieval.')
args.add_argument('--save_type', default='both', choices=['both', 'text', 'video'],
help='Whether to save video, text or both feats. If running on inference videos, text is just a placeholder')
args.add_argument('--vis_token_similarity', action='store_true')
args.add_argument('--split', default='test', choices=['train', 'val', 'test'],
help='split to evaluate on.')
args.add_argument('--batch_size', default=16, type=int,
help='size of batch')
config = ConfigParser(args, test=True)
# hack to get sliding into config
args = args.parse_args()
config._config['sliding_window_stride'] = args.sliding_window_stride
ex.add_config(config.config)
ex.run()
| 11,571 | 39.603509 | 160 | py |
frozen-in-time | frozen-in-time-main/trainer/trainer.py | import numpy as np
import torch
from torch import nn
from tqdm.auto import tqdm
from base import BaseTrainer
from model.model import sim_matrix
from utils import inf_loop
class Trainer(BaseTrainer):
"""
Trainer class
Note:
Inherited from BaseTrainer.
"""
def __init__(self, model, loss, metrics, optimizer, config, data_loader,
valid_data_loader=None, lr_scheduler=None, len_epoch=None, writer=None,
visualizer=None, tokenizer=None, max_samples_per_epoch=50000):
super().__init__(model, loss, metrics, optimizer, config, writer)
self.config = config
self.data_loader = data_loader
if len_epoch is None:
# epoch-based training
# take the min
self.len_epoch = min(len(x) for x in data_loader)
else:
# iteration-based training
self.data_loader = inf_loop(data_loader)
self.len_epoch = len_epoch
self.valid_data_loader = valid_data_loader
self.do_validation = self.valid_data_loader is not None
self.lr_scheduler = lr_scheduler
self.visualizer = visualizer
self.val_chunking = True
self.batch_size = self.data_loader[0].batch_size
self.total_batch_sum = sum(x.batch_size for x in self.data_loader)
self.tokenizer = tokenizer
self.max_samples_per_epoch = max_samples_per_epoch
def _eval_metrics(self, output):
acc_metrics = np.zeros(len(self.metrics))
for i, metric in enumerate(self.metrics):
acc_metrics[i] += metric(output)
if self.writer is not None:
self.writer.log_scalar('{}'.format(metric.__name__), acc_metrics[i])
return acc_metrics
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current training epoch.
:return: A log that contains all information you want to save.
Note:
If you have additional information to record, for example:
> additional_log = {"x": x, "y": y}
merge it with log before return. i.e.
> log = {**log, **additional_log}
> return log
The metrics in log must have the key 'metrics'.
"""
self.model.train()
total_loss = [0] * len(self.data_loader)
total_iterations = self.max_samples_per_epoch // self.total_batch_sum + 1
with tqdm(zip(*self.data_loader), desc=f"Training epoch {epoch}", total=total_iterations) as progress:
for batch_idx, data_li in enumerate(progress):
if (batch_idx + 1) * self.total_batch_sum > self.max_samples_per_epoch:
break
for dl_idx, data in enumerate(data_li):
# then assume we must tokenize the input, e.g. its a string
if self.tokenizer is not None:
data['text'] = self.tokenizer(data['text'], return_tensors='pt', padding=True,
truncation=True)
data['text'] = {key: val.to(self.device) for key, val in data['text'].items()}
data['video'] = data['video'].to(self.device)
self.optimizer.zero_grad()
text_embeds, video_embeds = self.model(data)
output = sim_matrix(text_embeds, video_embeds)
loss = self.loss(output)
loss.backward()
self.optimizer.step()
detached_loss = loss.detach().item()
if self.writer is not None:
self.writer.log_scalar(f'loss_train_{dl_idx}', detached_loss)
total_loss[dl_idx] += detached_loss
progress.set_postfix({"dl": dl_idx, "loss": detached_loss})
self.optimizer.zero_grad()
if batch_idx == self.len_epoch:
break
log = {
f'loss_{dl_idx}': total_loss[dl_idx] / self.len_epoch for dl_idx in range(len(self.data_loader))
}
if self.do_validation:
val_log = self._valid_epoch(epoch)
log.update(val_log)
if self.lr_scheduler is not None:
self.lr_scheduler.step()
return log
def _valid_epoch(self, epoch):
"""
Validate after training an epoch
:return: A log that contains information about validation
Note:
The validation metrics in log must have the key 'val_metrics'.
"""
self.model.eval()
total_val_loss = [0] * len(self.valid_data_loader)
meta_arr = {x: [] for x in range(len(self.valid_data_loader))}
text_embed_arr = {x: [] for x in range(len(self.valid_data_loader))}
vid_embed_arr = {x: [] for x in range(len(self.valid_data_loader))}
with torch.no_grad():
# for validation we switch the nested loop order, because alternate batches not needed...
# ... and dataloaders can be of different length
for dl_idx, dl in enumerate(self.valid_data_loader):
for data in tqdm(dl, desc=f"Validating dl{dl_idx}"):
meta_arr[dl_idx].append(data['meta'])
if self.tokenizer is not None:
data['text'] = self.tokenizer(data['text'], return_tensors='pt', padding=True, truncation=True)
data['text'] = {key: val.to(self.device) for key, val in data['text'].items()}
data['video'] = data['video'].to(self.device)
# Note that if the batch is not scattered among all the GPUs, `DataParallel` will fail because
# the model's mandatory argument `data` will not be passed to some of them.
# It can happen with the last batch of the dataset, depending on its size.
# It could be safely ignored during training but on validation/test we want accurate metrics.
# This avoids using `DataParallel` in this case, and supposes this batch fits in one GPU.
current_batch_size = data['video'].shape[0]
if isinstance(self.model, nn.DataParallel) and current_batch_size < (dl.batch_size or 1):
scattered_len = len(self.model.scatter([torch.empty(current_batch_size)], {},
self.model.device_ids)[0])
avoid_data_parallel = scattered_len < len(self.model.device_ids)
else:
avoid_data_parallel = False
if avoid_data_parallel:
text_embed, vid_embed = self.model.module(data, return_embeds=True)
else:
text_embed, vid_embed = self.model(data, return_embeds=True)
text_embed_arr[dl_idx].append(text_embed.cpu())
vid_embed_arr[dl_idx].append(vid_embed.cpu())
sims_batch = sim_matrix(text_embed, vid_embed)
loss = self.loss(sims_batch)
total_val_loss[dl_idx] += loss.item()
for dl_idx in range(len(self.valid_data_loader)):
# TODO: this needs a clean
if self.writer is not None:
self.writer.log_scalar(f'loss_val_{dl_idx}',
total_val_loss[dl_idx] / len(self.valid_data_loader[dl_idx]))
nested_metrics = {x: {} for x in range(len(self.valid_data_loader))}
text_embeds = torch.cat(text_embed_arr[dl_idx])
vid_embeds = torch.cat(vid_embed_arr[dl_idx])
sims = sim_matrix(text_embeds, vid_embeds).detach().cpu().numpy()
for metric in self.metrics:
metric_name = metric.__name__
res = metric(sims)
verbose(epoch=epoch, metrics=res, name=self.valid_data_loader[dl_idx].dataset_name,
mode=metric_name)
nested_metrics[dl_idx][metric_name] = res
if self.writer is not None:
to_write = format_nested_metrics_for_writer(res, mode=metric_name,
name=self.valid_data_loader[dl_idx].dataset_name)
for key, val in to_write.items():
self.writer.log_scalar(key, val)
if self.visualizer is not None:
meta_arr_cat = {key: [] for key in meta_arr[0]}
for meta in meta_arr:
for key, val in meta.items():
meta_arr_cat[key] += val
self.visualizer.visualize_ranking(sims, epoch, meta_arr_cat, nested_metrics)
res_dict = {f'val_loss_{dl_idx}': total_val_loss[dl_idx] / len(self.valid_data_loader[dl_idx])
for dl_idx in range(len(self.valid_data_loader))}
res_dict['nested_val_metrics'] = nested_metrics
return res_dict
def _progress(self, batch_idx, dl_idx):
base = '[{}/{} ({:.0f}%)]'
if hasattr(self.data_loader[dl_idx], 'n_samples'):
current = batch_idx * self.data_loader[dl_idx].batch_size
total = self.data_loader[dl_idx].n_samples
else:
current = batch_idx
total = self.len_epoch
return base.format(current, total, 100.0 * current / total)
def verbose(epoch, metrics, mode, name="TEST"):
r1, r5, r10, r50 = metrics["R1"], metrics["R5"], metrics["R10"], metrics["R50"]
msg = f"[{mode}]{name:s} epoch {epoch}, R@1: {r1:.1f}"
msg += f", R@5: {r5:.1f}, R@10 {r10:.1f}, R@50 {r50:.1f}"
msg += f"MedR: {metrics['MedR']:g}, MeanR: {metrics['MeanR']:.1f}"
print(msg)
def format_nested_metrics_for_writer(metrics, mode, name="TEST"):
res = {}
for key, val in metrics.items():
log_name = f"[{mode}]{name}_{key}"
res[log_name] = val
return res
| 10,116 | 43.179039 | 119 | py |
frozen-in-time | frozen-in-time-main/data_loader/transforms.py | from torchvision import transforms
def init_transform_dict(input_res=224,
center_crop=256,
randcrop_scale=(0.5, 1.0),
color_jitter=(0, 0, 0),
norm_mean=(0.485, 0.456, 0.406),
norm_std=(0.229, 0.224, 0.225)):
normalize = transforms.Normalize(mean=norm_mean, std=norm_std)
tsfm_dict = {
'train': transforms.Compose([
transforms.RandomResizedCrop(input_res, scale=randcrop_scale),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=color_jitter[0], saturation=color_jitter[1], hue=color_jitter[2]),
normalize,
]),
'val': transforms.Compose([
transforms.Resize(center_crop),
transforms.CenterCrop(center_crop),
transforms.Resize(input_res),
normalize,
]),
'test': transforms.Compose([
transforms.Resize(center_crop),
transforms.CenterCrop(center_crop),
transforms.Resize(input_res),
normalize,
])
}
return tsfm_dict
| 1,160 | 35.28125 | 112 | py |
frozen-in-time | frozen-in-time-main/logger/visualization.py | import importlib
from utils import Timer
class TensorboardWriter:
def __init__(self, log_dir, logger, enabled):
self.writer = None
self.selected_module = ""
if enabled:
log_dir = str(log_dir)
# Retrieve visualization writer.
for module in ["torch.utils.tensorboard", "tensorboardX"]:
try:
self.writer = importlib.import_module(module).SummaryWriter(log_dir)
succeeded = True
break
except ImportError:
succeeded = False
self.selected_module = module
if not succeeded:
message = "Warning: visualization (Tensorboard) is configured to use, but currently not installed on " \
"this machine. Please install either TensorboardX with 'pip install tensorboardx', upgrade " \
"PyTorch to version >= 1.1 for using 'torch.utils.tensorboard' or turn off the option in " \
"the 'config.json' file."
logger.warning(message)
self.step = 0
self.mode = ''
self.tb_writer_ftns = {
'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio',
'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
}
self.tag_mode_exceptions = {'add_histogram', 'add_embedding'}
self.timer = Timer()
def set_step(self, step, mode='train'):
self.mode = mode
self.step = step
if step == 0:
self.timer.reset()
else:
duration = self.timer.check()
self.add_scalar('steps_per_sec', 1 / duration)
def __getattr__(self, name):
"""
If visualization is configured to use:
return add_data() methods of tensorboard with additional information (step, tag) added.
Otherwise:
return a blank function handle that does nothing
"""
if name in self.tb_writer_ftns:
add_data = getattr(self.writer, name, None)
def wrapper(tag, data, *args, **kwargs):
if add_data is not None:
# add mode(train/valid) tag
if name not in self.tag_mode_exceptions:
tag = '{}/{}'.format(tag, self.mode)
add_data(tag, data, self.step, *args, **kwargs)
return wrapper
else:
# default action for returning methods defined in this class, set_step() for instance.
try:
attr = object.__getattr__(name)
except AttributeError:
raise AttributeError("type object '{}' has no attribute '{}'".format(self.selected_module, name))
return attr
class SacredNeptuneWriter:
def __init__(self):
raise NotImplementedError
| 2,909 | 35.375 | 120 | py |
frozen-in-time | frozen-in-time-main/base/base_model.py | import torch.nn as nn
import numpy as np
from abc import abstractmethod
class BaseModel(nn.Module):
"""
Base class for all models
"""
@abstractmethod
def forward(self, *inputs):
"""
Forward pass logic
:return: Model output
"""
raise NotImplementedError
def __str__(self):
"""
Model prints with number of trainable parameters
"""
model_parameters = filter(lambda p: p.requires_grad, self.parameters())
params = sum(np.prod(p.size()) for p in model_parameters)
return super().__str__() + '\nTrainable parameters: {}'.format(params)
| 646 | 23.884615 | 79 | py |
frozen-in-time | frozen-in-time-main/base/base_trainer.py | from abc import abstractmethod
import torch
from numpy import inf
class BaseTrainer:
"""
Base class for all trainers
"""
def __init__(self, model, loss, metrics, optimizer, config, writer=None, init_val=False):
self.config = config
self.logger = config.get_logger('trainer', config['trainer']['verbosity'])
self.init_val = init_val
# setup GPU device if available, move model into configured device
self.device, device_ids = self._prepare_device(config['n_gpu'])
self.model = model.to(self.device)
self.model.device = self.device
if len(device_ids) > 1:
self.model = torch.nn.DataParallel(model, device_ids=device_ids)
loss = loss.to(self.device)
self.loss = loss
self.metrics = metrics
self.optimizer = optimizer
cfg_trainer = config['trainer']
self.epochs = cfg_trainer['epochs']
self.save_period = cfg_trainer['save_period']
self.monitor = cfg_trainer.get('monitor', 'off')
self.init_val = cfg_trainer.get('init_val', True)
# configuration to monitor model performance and save best
if self.monitor == 'off':
self.mnt_mode = 'off'
self.mnt_best = 0
else:
self.mnt_mode, self.mnt_metric = self.monitor.split()
assert self.mnt_mode in ['min', 'max']
self.mnt_best = inf if self.mnt_mode == 'min' else -inf
self.early_stop = cfg_trainer.get('early_stop', inf)
self.start_epoch = 1
self.checkpoint_dir = config.save_dir
# setup visualization writer instance
#self.writer = TensorboardWriter(config.log_dir, self.logger, cfg_trainer['tensorboard'])
self.writer = writer
if config.resume is not None:
self._resume_checkpoint(config.resume)
@abstractmethod
def _train_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
@abstractmethod
def _valid_epoch(self, epoch):
"""
Training logic for an epoch
:param epoch: Current epoch number
"""
raise NotImplementedError
def train(self):
"""
Full training logic
"""
not_improved_count = 0
if self.init_val:
_ = self._valid_epoch(-1)
for epoch in range(self.start_epoch, self.epochs + 1):
result = self._train_epoch(epoch)
# save logged informations into log dict
# save logged informations into log dict
log = {'epoch': epoch}
for key, value in result.items():
if key == 'metrics':
log.update({mtr.__name__: value[i]
for i, mtr in enumerate(self.metrics)})
elif key == 'val_metrics':
log.update({'val_' + mtr.__name__: value[i]
for i, mtr in enumerate(self.metrics)})
elif key == 'nested_val_metrics':
# NOTE: currently only supports two layers of nesting
for subkey, subval in value.items():
for subsubkey, subsubval in subval.items():
for subsubsubkey, subsubsubval in subsubval.items():
log[f"val_{subkey}_{subsubkey}_{subsubsubkey}"] = subsubsubval
else:
log[key] = value
# print logged informations to the screen
for key, value in log.items():
self.logger.info(' {:15s}: {}'.format(str(key), value))
# evaluate model performance according to configured metric, save best checkpoint as model_best
best = False
if self.mnt_mode != 'off':
try:
# check whether model performance improved or not, according to specified metric(mnt_metric)
improved = (self.mnt_mode == 'min' and log[self.mnt_metric] <= self.mnt_best) or \
(self.mnt_mode == 'max' and log[self.mnt_metric] >= self.mnt_best)
except KeyError:
self.logger.warning("Warning: Metric '{}' is not found. "
"Model performance monitoring is disabled.".format(self.mnt_metric))
self.mnt_mode = 'off'
improved = False
if improved:
self.mnt_best = log[self.mnt_metric]
not_improved_count = 0
best = True
else:
not_improved_count += 1
if not_improved_count > self.early_stop:
self.logger.info("Validation performance didn\'t improve for {} epochs. "
"Training stops.".format(self.early_stop))
break
if epoch % self.save_period == 0 or best:
#if best:
self._save_checkpoint(epoch, save_best=best)
def _prepare_device(self, n_gpu_use):
"""
setup GPU device if available, move model into configured device
"""
n_gpu = torch.cuda.device_count()
if n_gpu_use > 0 and n_gpu == 0:
self.logger.warning("Warning: There\'s no GPU available on this machine,"
"training will be performed on CPU.")
n_gpu_use = 0
if n_gpu_use > n_gpu:
self.logger.warning("Warning: The number of GPU\'s configured to use is {}, but only {} are available "
"on this machine.".format(n_gpu_use, n_gpu))
n_gpu_use = n_gpu
device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')
list_ids = list(range(n_gpu_use))
return device, list_ids
def _save_checkpoint(self, epoch, save_best=False):
"""
Saving checkpoints
:param epoch: current epoch number
:param log: logging information of the epoch
:param save_best: if True, rename the saved checkpoint to 'model_best.pth'
"""
arch = type(self.model).__name__
state = {
'arch': arch,
'epoch': epoch,
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'monitor_best': self.mnt_best,
'config': self.config
}
filename = str(self.checkpoint_dir / 'checkpoint-epoch{}.pth'.format(epoch))
torch.save(state, filename)
self.logger.info("Saving checkpoint: {} ...".format(filename))
if save_best:
best_path = str(self.checkpoint_dir / 'model_best.pth')
torch.save(state, best_path)
self.logger.info("Saving current best: model_best.pth ...")
def _resume_checkpoint(self, resume_path):
"""
Resume from saved checkpoints
:param resume_path: Checkpoint path to be resumed
"""
resume_path = str(resume_path)
self.logger.info("Loading checkpoint: {} ...".format(resume_path))
checkpoint = torch.load(resume_path)
self.start_epoch = checkpoint['epoch'] + 1
self.mnt_best = checkpoint['monitor_best']
# load architecture params from checkpoint.
if checkpoint['config']['arch'] != self.config['arch']:
self.logger.warning("Warning: Architecture configuration given in config file is different from that of "
"checkpoint. This may yield an exception while state_dict is being loaded.")
state_dict = checkpoint['state_dict']
load_state_dict_keys = list(state_dict.keys())
curr_state_dict_keys = list(self.model.state_dict().keys())
redo_dp = False
if not curr_state_dict_keys[0].startswith('module.') and load_state_dict_keys[0].startswith('module.'):
undo_dp = True
elif curr_state_dict_keys[0].startswith('module.') and not load_state_dict_keys[0].startswith('module.'):
redo_dp = True
undo_dp = False
else:
undo_dp = False
if undo_dp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
# load params
elif redo_dp:
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = 'module.' + k # remove `module.`
new_state_dict[name] = v
else:
new_state_dict = state_dict
self.model.load_state_dict(new_state_dict)
# load optimizer state from checkpoint only when optimizer type is not changed.
if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:
self.logger.warning("Warning: Optimizer type given in config file is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.logger.info("Checkpoint loaded. Resume training from epoch {}".format(self.start_epoch))
| 9,470 | 38.962025 | 117 | py |
frozen-in-time | frozen-in-time-main/base/base_dataset.py | import os
import random
from abc import abstractmethod
import av
import cv2
import decord
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset, get_worker_info
from torchvision import transforms
class TextVideoDataset(Dataset):
def __init__(self,
dataset_name,
text_params,
video_params,
data_dir,
metadata_dir=None,
split='train',
tsfms=None,
cut=None,
subsample=1,
sliding_window_stride=-1,
reader='decord'
):
self.dataset_name = dataset_name
self.text_params = text_params
self.video_params = video_params
# check for environment variables
self.data_dir = os.path.expandvars(data_dir)
if metadata_dir is not None:
self.metadata_dir = os.path.expandvars(metadata_dir)
else:
self.metadata_dir = self.data_dir
self.split = split
self.transforms = tsfms
self.cut = cut
self.subsample = subsample
self.sliding_window_stride = sliding_window_stride
self.video_reader = video_reader[reader]
self.label_type = 'caption'
self._load_metadata()
if self.sliding_window_stride != -1:
if self.split != 'test':
raise ValueError('Fixing frame sampling is for test time only. can remove but...')
self._fix_temporal_samples()
@abstractmethod
def _load_metadata(self):
raise NotImplementedError("Metadata loading must be implemented by subclass")
@abstractmethod
def _get_video_path(self, sample):
raise NotImplementedError("Get video path function must be implemented by subclass")
def _get_caption(self, sample):
raise NotImplementedError("Get caption function must be implemented by subclass")
def _get_video_lens(self):
vlen_li = []
for idx, row in self.metadata.iterrows():
video_path = self._get_video_path(row)[0]
vlen_li.append(get_video_len(video_path))
return vlen_li
def _fix_temporal_samples(self):
self.metadata['vlen'] = self._get_video_lens()
self.metadata['frame_intervals'] = self.metadata['vlen'].apply(
lambda x: np.linspace(start=0, stop=x, num=min(x, self.video_params['num_frames']) + 1).astype(int))
self.metadata['fix_start'] = self.metadata['frame_intervals'].apply(
lambda x: np.arange(0, int(x[-1] / len(x - 1)), self.sliding_window_stride)
)
self.metadata = self.metadata.explode('fix_start')
def __len__(self):
return len(self.metadata)
def __getitem__(self, item):
item = item % len(self.metadata)
sample = self.metadata.iloc[item]
video_fp, rel_fp = self._get_video_path(sample)
caption = self._get_caption(sample)
video_loading = self.video_params.get('loading', 'strict')
frame_sample = 'rand'
fix_start = None
if self.split == 'test':
frame_sample = 'uniform'
if self.sliding_window_stride != -1:
fix_start = sample['fix_start']
try:
if os.path.isfile(video_fp):
imgs, idxs = self.video_reader(video_fp, self.video_params['num_frames'], frame_sample,
fix_start=fix_start)
else:
print(f"Warning: missing video file {video_fp}.")
assert False
except Exception as e:
if video_loading == 'strict':
raise ValueError(
f'Video loading failed for {video_fp}, video loading for this dataset is strict.') from e
else:
imgs = Image.new('RGB', (self.video_params['input_res'], self.video_params['input_res']), (0, 0, 0))
imgs = transforms.ToTensor()(imgs).unsqueeze(0)
if self.transforms is not None:
imgs = self.transforms(imgs)
final = torch.zeros([self.video_params['num_frames'], 3, self.video_params['input_res'],
self.video_params['input_res']])
final[:imgs.shape[0]] = imgs
meta_arr = {'raw_captions': caption, 'paths': rel_fp, 'dataset': self.dataset_name}
data = {'video': final, 'text': caption, 'meta': meta_arr}
return data
class TextImageDataset(TextVideoDataset):
def __getitem__(self, item):
item = item % len(self.metadata)
sample = self.metadata.iloc[item]
video_fp, rel_fp = self._get_video_path(sample)
caption = self._get_caption(sample)
video_loading = self.video_params.get('loading', 'strict')
try:
img = Image.open(video_fp).convert("RGB")
except:
if video_loading == 'strict':
raise ValueError(f'Image loading failed for {video_fp}, image loading for this dataset is strict.')
else:
img = Image.new('RGB', (self.video_params['input_res'], self.video_params['input_res']), (0, 0, 0))
# convert to tensor because video transforms don't, expand such that its a 1-frame video.
img = transforms.ToTensor()(img).unsqueeze(0)
if self.transforms is not None:
img = self.transforms(img)
meta_arr = {'raw_captions': caption, 'paths': rel_fp, 'dataset': self.dataset_name}
data = {'video': img, 'text': caption, 'meta': meta_arr}
return data
def sample_frames(num_frames, vlen, sample='rand', fix_start=None):
acc_samples = min(num_frames, vlen)
intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
ranges = []
for idx, interv in enumerate(intervals[:-1]):
ranges.append((interv, intervals[idx + 1] - 1))
if sample == 'rand':
frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges]
elif fix_start is not None:
frame_idxs = [x[0] + fix_start for x in ranges]
elif sample == 'uniform':
frame_idxs = [(x[0] + x[1]) // 2 for x in ranges]
else:
raise NotImplementedError
return frame_idxs
def read_frames_cv2(video_path, num_frames, sample='rand', fix_start=None):
cap = cv2.VideoCapture(video_path)
assert (cap.isOpened())
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# get indexes of sampled frames
frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
frames = []
success_idxs = []
for index in frame_idxs:
cap.set(cv2.CAP_PROP_POS_FRAMES, index - 1)
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame)
# (H x W x C) to (C x H x W)
frame = frame.permute(2, 0, 1)
frames.append(frame)
success_idxs.append(index)
else:
pass
# print(frame_idxs, ' fail ', index, f' (vlen {vlen})')
frames = torch.stack(frames).float() / 255
cap.release()
return frames, success_idxs
def read_frames_av(video_path, num_frames, sample='rand', fix_start=None):
reader = av.open(video_path)
try:
frames = []
frames = [torch.from_numpy(f.to_rgb().to_ndarray()) for f in reader.decode(video=0)]
except (RuntimeError, ZeroDivisionError) as exception:
print('{}: WEBM reader cannot open {}. Empty '
'list returned.'.format(type(exception).__name__, video_path))
vlen = len(frames)
frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
frames = torch.stack([frames[idx] for idx in frame_idxs]).float() / 255
frames = frames.permute(0, 3, 1, 2)
return frames, frame_idxs
decord.bridge.set_bridge("torch")
def read_frames_decord(video_path, num_frames, sample='rand', fix_start=None):
video_reader = decord.VideoReader(video_path, num_threads=1)
vlen = len(video_reader)
frame_idxs = sample_frames(num_frames, vlen, sample=sample, fix_start=fix_start)
frames = video_reader.get_batch(frame_idxs)
frames = frames.float() / 255
frames = frames.permute(0, 3, 1, 2)
return frames, frame_idxs
def get_video_len(video_path):
cap = cv2.VideoCapture(video_path)
if not (cap.isOpened()):
return False
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
return vlen
video_reader = {
'av': read_frames_av,
'cv2': read_frames_cv2,
'decord': read_frames_decord
}
| 8,634 | 35.434599 | 116 | py |
frozen-in-time | frozen-in-time-main/base/base_data_loader.py | import numpy as np
from torch.utils.data import DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.sampler import SubsetRandomSampler
class BaseDataLoader(DataLoader):
"""
Base class for all data loaders
"""
def __init__(self, dataset, batch_size, shuffle, validation_split, num_workers, collate_fn=default_collate,prefetch_factor=2):
self.validation_split = validation_split
self.shuffle = shuffle
self.batch_idx = 0
self.n_samples = len(dataset)
self.sampler, self.valid_sampler = self._split_sampler(self.validation_split)
self.init_kwargs = {
'dataset': dataset,
'batch_size': batch_size,
'shuffle': self.shuffle,
'collate_fn': collate_fn,
'num_workers': num_workers,
'prefetch_factor': prefetch_factor
}
super().__init__(sampler=self.sampler, **self.init_kwargs)
def _split_sampler(self, split):
if split == 0.0:
return None, None
idx_full = np.arange(self.n_samples)
np.random.seed(0)
np.random.shuffle(idx_full)
if isinstance(split, int):
assert split > 0
assert split < self.n_samples, "validation set size is configured to be larger than entire dataset."
len_valid = split
else:
len_valid = int(self.n_samples * split)
valid_idx = idx_full[0:len_valid]
train_idx = np.delete(idx_full, np.arange(0, len_valid))
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
# turn off shuffle option which is mutually exclusive with sampler
self.shuffle = False
self.n_samples = len(train_idx)
return train_sampler, valid_sampler
def split_validation(self, diff_kwargs=None):
init_kwargs = self.init_kwargs
if diff_kwargs is not None:
init_kwargs.update(diff_kwargs)
if self.valid_sampler is None:
return None
else:
return DataLoader(sampler=self.valid_sampler, **self.init_kwargs)
def num_samples(self):
return len(self.sampler)
class BaseDataLoaderExplicitSplit(DataLoader):
"""
Base class for all data loaders
"""
def __init__(self, dataset, batch_size, shuffle, num_workers, collate_fn=default_collate, prefetch_factor=2):
self.shuffle = shuffle
self.batch_idx = 0
self.n_samples = len(dataset)
self.init_kwargs = {
'dataset': dataset,
'batch_size': batch_size,
'shuffle': self.shuffle,
'collate_fn': collate_fn,
'num_workers': num_workers,
'pin_memory': True,
'prefetch_factor': prefetch_factor
}
super().__init__(**self.init_kwargs)
class BaseMultiDataLoader:
"""
Currently implemented as undersample the bigger dataloaders...
"""
def __init__(self, dataloaders):
self.dataloaders = dataloaders
self.batch_size = self.dataloaders[0].batch_size
def __getitem__(self, item):
dl_idx = item % len(self.dataloaders)
return next(iter(self.dataloaders[dl_idx]))
def __len__(self):
return min(len(x) for x in self.dataloaders) * len(self.dataloaders)
def num_samples(self):
return sum(len(x.sampler) for x in self.dataloaders)
| 3,447 | 30.633028 | 130 | py |
frozen-in-time | frozen-in-time-main/utils/custom_transforms.py | import numbers
from typing import List, Tuple
import torch
from torch import Tensor
from torchvision.transforms import functional_pil as F_pil, functional_tensor as F_t
from torchvision.transforms.functional import center_crop, crop
def _get_image_size(img: Tensor) -> List[int]:
"""Returns image size as [w, h]
"""
if isinstance(img, torch.Tensor):
return F_t._get_image_size(img)
return F_pil._get_image_size(img)
def center_plus_four_crops(img: Tensor, size: List[int],
margin_h: int, margin_w: int) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""Crop the given image into four tiled borders and the central crop.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
size = (size[0], size[0])
if len(size) != 2:
raise ValueError("Please provide only two dimensions (h, w) for size.")
image_width, image_height = _get_image_size(img)
crop_height, crop_width = size
if crop_width > image_width or crop_height > image_height:
msg = "Requested crop size {} is bigger than input size {}"
raise ValueError(msg.format(size, (image_height, image_width)))
if crop_width + margin_w > image_width:
msg = "Requested margin size {} + input {} is bigger than input size {}"
raise ValueError(msg.format((margin_h, margin_w), size, (image_height, image_width)))
#vertical_border_height = image_height - crop_height
#horizontal_border_height = image_width - crop_width
#x1 = horizontal_border_height // 2
x11 = (image_width - crop_width - 2 * margin_w) // 2
x12 = x11 + margin_w
x21 = x12 + crop_width
x22 = x21 + margin_w
y11 = (image_height - crop_height - 2 * margin_h) // 2
y12 = y11 + margin_h
y21 = y12 + crop_height
y22 = y21 + margin_h
tl = crop(img, y11, x11, margin_h, margin_w + crop_width)
tr = crop(img, y11, x21, margin_h + crop_height, margin_w)
bl = crop(img, y12, x11, margin_h + crop_height, margin_w)
br = crop(img, y21, x12, margin_h, margin_w + crop_width)
center = center_crop(img, [crop_height, crop_width])
return tl, tr, bl, br, center
def center_plus_twohori_crops(img: Tensor, size: List[int],
margin_w: int) -> Tuple[Tensor, Tensor, Tensor]:
"""Crop the given image into four tiled borders and the central crop.
"""
if isinstance(size, numbers.Number):
size = (int(size), int(size))
elif isinstance(size, (tuple, list)) and len(size) == 1:
size = (size[0], size[0])
if len(size) != 2:
raise ValueError("Please provide only two dimensions (h, w) for size.")
image_width, image_height = _get_image_size(img)
crop_height, crop_width = size
if crop_width > image_width or crop_height > image_height:
msg = "Requested crop size {} is bigger than input size {}"
raise ValueError(msg.format(size, (image_height, image_width)))
if crop_width + margin_w > image_width :
msg = "Requested margin size {} + input {} is bigger than input size {}"
raise ValueError(msg.format((0, margin_w), size, (image_height, image_width)))
# vertical_border_height = image_height - crop_height
# horizontal_border_height = image_width - crop_width
# x1 = horizontal_border_height // 2
x11 = (image_width - crop_width - 2 * margin_w) // 2
x12 = x11 + margin_w
x21 = x12 + crop_width
y11 = (image_height - crop_height) // 2
left = crop(img, y11, x11, crop_height, margin_w)
right = crop(img, y11, x21, crop_height, margin_w)
center = center_crop(img, [crop_height, crop_width])
return left, right, center
from torch import nn
class TwoHoriCrop(nn.Module):
def __init__(self, size, margin_w):
super().__init__()
self.size = size
self.margin_w = margin_w
def forward(self, x):
return center_plus_twohori_crops(x, self.size, self.margin_w)
if __name__ == "__main__":
from PIL import Image
img = Image.open('visualisations/guitar.png')
crops = center_plus_four_crops(img, [336, 336], 112, 112)
order = ['tl', 'tr', 'bl', 'br', 'center']
for idx, subimg in zip(order, crops):
subimg.save(f'visualisations/guitar_{idx}.png')
crops = center_plus_twohori_crops(img, [448, 448], 112)
order = ['left', 'right', 'center2']
for idx, subimg in zip(order, crops):
subimg.save(f'visualisations/guitar_{idx}.png')
| 4,569 | 33.360902 | 109 | py |
frozen-in-time | frozen-in-time-main/utils/video.py | import random
import cv2
import numpy as np
import torch
def load_frames_from_video_path(path, num_frames, sample='rand'):
cap = cv2.VideoCapture(path)
assert (cap.isOpened())
vlen = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
acc_samples = min(num_frames, vlen)
intervals = np.linspace(start=0, stop=vlen, num=acc_samples + 1).astype(int)
ranges = []
for idx, interv in enumerate(intervals[:-1]):
ranges.append((interv, intervals[idx + 1] - 1))
if sample == 'rand':
frame_idxs = [random.choice(range(x[0], x[1])) for x in ranges]
elif sample == 'uniform':
frame_idxs = [(x[0] + x[1]) // 2 for x in ranges]
else:
raise NotImplementedError
frames = []
for index in frame_idxs:
cap.set(cv2.CAP_PROP_POS_FRAMES, index)
ret, frame = cap.read()
if ret:
cv2.imwrite(f'images/{index}.jpg', frame)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = torch.from_numpy(frame)
# (H x W x C) to (C x H x W)
frame = frame.permute(2, 0, 1)
frames.append(frame)
else:
raise ValueError
frames = torch.stack(frames).float() / 255
cap.release()
return frames, frame_idxs
| 1,264 | 29.853659 | 80 | py |
frozen-in-time | frozen-in-time-main/utils/visualisation.py | import matplotlib
import numpy as np
import torch
matplotlib.use('Agg')
def visualise_path(pred, target, window):
"""
:param pred: (P, 2) Tensor where P is the number of predictions, and 2 is the (i,j) coordinate
:param target: (T, 2) Tensor where T is the number of targets, and 2 is the (i,j) coordinate
:param dims: (H, W) tup/list the desired height and width of matrix (should be >= to max(i), max(j))
:param assignment_method: Method of assignment (dtw, minimum etc.)
:return: image, visualisation of path prediction and target.
"""
tp = torch.Tensor((64, 191, 64))
fp = torch.Tensor((191, 64, 64))
gt = torch.Tensor((102, 153, 255))
grid = torch.ones_like(window).unsqueeze(0).repeat(3, 1, 1) * 255
inf = 130 * torch.ones_like(grid)
grid = torch.where(torch.isnan(window), inf, grid)
clip_idxs = [t[0] for t in target]
local_idxs = np.unique(np.array(clip_idxs)).tolist()
for t in target:
local_idx = local_idxs.index(t[0])
grid[:, local_idx, t[1]] = gt
for p in pred:
local_idx = local_idxs.index(p[0])
if (grid[:, local_idx, p[1]] == gt).all():
grid[:, local_idx, p[1]] = tp
else:
grid[:, local_idx, p[1]] = fp
return grid / 255
def batch_path_vis(pred_dict, target, window):
grids = []
window = window.cpu()
for key, pred in pred_dict.items():
tmp_window = window
if key == 'min_dist':
tmp_window = torch.zeros_like(window)
grids.append(visualise_path(pred, target, tmp_window))
return torch.stack(grids)
if __name__ == "__main__":
pred = [[1, 1], [2, 4]]
gt = [[1, 1], [3, 4]]
window = torch.zeros((5, 6))
visualise_path(pred, gt, window)
| 1,768 | 28.983051 | 104 | py |
frozen-in-time | frozen-in-time-main/utils/visualizer.py | """A simple HTML visualizer.
It is based on the Cycle-GAN codebase:
https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
"""
import os
from pathlib import Path
import numpy as np
from . import html
class RetrievalVis:
"""This class includes several functions that can display/save images.
It uses a Python library 'visdom' for display, and a Python library 'dominate'
(wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, exp_name, web_dir, src_video_dir, vis_vid_freq, num_samples=50):
"""Initialize the Visualizer class
Create an HTML object for saveing HTML filters
"""
self.name = exp_name
self.web_dir = web_dir
self.vis_vid_freq = vis_vid_freq
self.img_dir = os.path.join(self.web_dir, "images")
self.num_samples = num_samples
self.data_type = 'images' # 'images' or 'videos'
assert self.data_type in ('images', 'videos')
print(f"create web directory {self.web_dir}...")
mkdirs([self.web_dir, self.img_dir])
# cluster specific
if "$TMPDIR" in src_video_dir:
src_video_dir = src_video_dir.replace("$TMPDIR", os.environ['TMPDIR'])
src_dir = Path(src_video_dir).absolute()
print(f"symlinking videos from {src_dir}...")
sym_dir = (Path(self.web_dir) / "videos").absolute()
if sym_dir.is_symlink():
os.remove(sym_dir)
sym_dir.symlink_to(src_dir)
def visualize_ranking(self, sims, epoch, meta, nested_metrics):
if not (self.vis_vid_freq and epoch % self.vis_vid_freq == 0):
return
dists = -sims
np.random.seed(0)
sorted_ranks = np.argsort(dists, axis=1)
gt_dists = np.diag(dists)
rankings = []
vis_top_k = 5
hide_gt = False
# num_indep_samples = 1
# random_seeds = np.arange(num_indep_samples)
sample = np.random.choice(np.arange(dists.shape[0]), size=self.num_samples,
replace=False)
for ii in sample:
ranked_idx = sorted_ranks[ii][:vis_top_k]
gt_captions = meta["raw_captions"][ii]
# if args.sample_single_gt_caption:
# gt_captions = np.random.choice(gt_captions, 1).tolist()
datum = {
"gt-sim": -gt_dists[ii],
"gt-captions": gt_captions,
"gt-rank": np.where(sorted_ranks[ii] == ii)[0][0],
"gt-path": meta["paths"][ii],
"top-k-sims": -dists[ii][ranked_idx],
"top-k-paths": np.array(meta["paths"])[ranked_idx],
"hide-gt": hide_gt,
}
rankings.append(datum)
self.display_current_results(
rankings,
epoch=epoch,
metrics=nested_metrics["t2v_metrics"],
)
def display_current_results(self, rankings, epoch, metrics):
"""Display current results on visdom; save current results to an HTML file.
Parameters:
visuals (OrderedDict) - - dictionary of images to display or save
epoch (int) - - the current epoch
save_result (bool) - - if save the current results to an HTML file
"""
if not Path(self.web_dir).exists():
Path(self.web_dir).mkdir(exist_ok=True, parents=True)
print(f"updating webpage at {self.web_dir}")
title = f"Experiment name = {self.name}"
refresh = True
if not refresh:
print("DISABLING WEB PAGE REFRESH")
webpage = html.HTML(web_dir=self.web_dir, title=title, refresh=refresh)
msg = f"epoch [{epoch}] - {self.name}"
webpage.add_header(msg)
msg = (f"R1: {metrics['R1']:.1f}, "
f"R5: {metrics['R5']:.1f}, "
f"R10: {metrics['R10']:.1f}, "
f"MedR: {metrics['MedR']}")
webpage.add_header(msg)
print(f"Top {len(rankings[0])} retreived videos at epoch: {epoch}")
for ranking in rankings:
vids, txts, links = [], [], []
gt_vid_path = os.path.join('videos', ranking["gt-path"])
#gt_captions = [" ".join(x) for x in ranking["gt-captions"]]
gt_captions = ranking['gt-captions']
gt_captions = "<br>" + (gt_captions) + "<br>"
if ranking["hide-gt"]:
txts.append(gt_captions)
links.append("hidden")
vids.append("hidden")
else:
txt = (f"{gt_captions}<br><b>Rank: {ranking['gt-rank']}, "
f"Sim: {ranking['gt-sim']:.3f} [{Path(ranking['gt-path']).stem}]")
txts.append(txt)
links.append(gt_vid_path)
vids.append(gt_vid_path)
for idx, (vid_path, sim) in enumerate(zip(ranking["top-k-paths"],
ranking["top-k-sims"])):
vid_path = Path(os.path.join('videos', vid_path))
if ranking["hide-gt"]:
txt = f"choice: {idx}"
else:
txt = f"<b>Rank: {idx}, Sim: {sim:.3f}, [{Path(vid_path).stem}]"
txts.append(txt)
vids.append(vid_path)
links.append(vid_path)
if self.data_type == 'videos':
webpage.add_videos(vids, txts, links, width=200)
elif self.data_type == 'images':
webpage.add_images(vids, txts, links, width=200)
print(f"added {len(vids)} videos")
webpage.save()
def mkdirs(paths):
"""create empty directories if they don't exist
Parameters:
paths (str list) -- a list of directory paths
"""
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
"""create a single empty directory if it didn't exist
Parameters:
path (str) -- a single directory path
"""
if not os.path.exists(path):
os.makedirs(path)
| 6,109 | 36.030303 | 89 | py |
frozen-in-time | frozen-in-time-main/model/loss.py | import torch
import torch.nn.functional as F
from torch import nn
class NormSoftmaxLoss(nn.Module):
def __init__(self, temperature=0.05):
super().__init__()
self.temperature = temperature
def forward(self, x):
"Assumes input x is similarity matrix of N x M \in [-1, 1], computed using the cosine similarity between normalised vectors"
i_logsm = F.log_softmax(x/self.temperature, dim=1)
j_logsm = F.log_softmax(x.t()/self.temperature, dim=1)
# sum over positives
idiag = torch.diag(i_logsm)
loss_i = idiag.sum() / len(idiag)
jdiag = torch.diag(j_logsm)
loss_j = jdiag.sum() / len(jdiag)
return - loss_i - loss_j
class MaxMarginRankingLoss(nn.Module):
def __init__(self, margin=1, fix_norm=True):
super().__init__()
self.fix_norm = fix_norm
self.loss = nn.MarginRankingLoss(margin)
self.margin = margin
def forward(self, x):
n = x.size()[0]
x1 = torch.diag(x)
x1 = x1.unsqueeze(1)
x1 = x1.expand(n, n)
x1 = x1.contiguous().view(-1, 1)
x1 = torch.cat((x1, x1), 0)
x2 = x.view(-1, 1)
x3 = x.transpose(0, 1).contiguous().view(-1, 1)
x2 = torch.cat((x2, x3), 0)
max_margin = F.relu(self.margin - (x1 - x2))
if self.fix_norm:
# remove the elements from the diagonal
keep = torch.ones(x.shape) - torch.eye(x.shape[0]) # 128 x 128
keep1 = keep.view(-1, 1)
keep2 = keep.transpose(0, 1).contiguous().view(-1, 1)
keep_idx = torch.nonzero(torch.cat((keep1, keep2), 0).flatten()).flatten()
if x1.is_cuda:
keep_idx = keep_idx.cuda()
x1_ = torch.index_select(x1, dim=0, index=keep_idx)
x2_ = torch.index_select(x2, dim=0, index=keep_idx)
max_margin = F.relu(self.margin - (x1_ - x2_))
return max_margin.mean()
class CrossEntropy(nn.Module):
def __init__(self):
super().__init__()
self.loss = nn.CrossEntropyLoss()
def forward(self, output, target):
return self.loss(output, target)
def cosine_sim(im, s):
"""Cosine similarity between all the image and sentence pairs
"""
return im.mm(s.t())
def order_sim(im, s):
"""Order embeddings similarity measure $max(0, s-im)$
"""
YmX = (s.unsqueeze(1).expand(s.size(0), im.size(0), s.size(1))
- im.unsqueeze(0).expand(s.size(0), im.size(0), s.size(1)))
score = -YmX.clamp(min=0).pow(2).sum(2).sqrt().t()
return score
def nll_loss(output, target):
return F.nll_loss(output, target)
if __name__ == "__main__":
import torch
random_sims = (torch.rand([10, 8]) * 2) - 1
loss = NormSoftmaxLoss()
loss(random_sims)
| 2,813 | 27.424242 | 132 | py |
frozen-in-time | frozen-in-time-main/model/model.py | import timm
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers import AutoModel
from base import BaseModel
from model.video_transformer import SpaceTimeTransformer
from utils.util import state_dict_data_parallel_fix
class FrozenInTime(BaseModel):
def __init__(self,
video_params,
text_params,
projection_dim=256,
load_checkpoint=None,
projection='minimal',
load_temporal_fix='zeros'):
super().__init__()
self.video_params = video_params
self.text_params = text_params
self.load_temporal_fix = load_temporal_fix
if not text_params['pretrained']:
raise NotImplementedError("Huggingface text models require pretrained init.")
self.text_model = AutoModel.from_pretrained(text_params['model'])
self.text_model.train()
pretrained = video_params['pretrained']
if video_params['model'] == "SpaceTimeTransformer":
num_frames = video_params.get('num_frames', 4)
time_init = video_params.get('time_init', 'zeros')
attention_style = video_params.get('attention_style', 'frozen-in-time')
arch_config = video_params.get('arch_config', 'base_patch16_224')
vit_init = video_params.get('vit_init', 'imagenet-21k')
if arch_config == 'base_patch16_224':
vit_model = timm.models.vision_transformer.vit_base_patch16_224(pretrained=pretrained)
model = SpaceTimeTransformer(num_frames=num_frames,
time_init=time_init,
attention_style=attention_style)
else:
raise NotImplementedError
model.head = nn.Identity()
model.pre_logits = nn.Identity()
ftr_dim = model.embed_dim
if load_checkpoint in ["", None]:
vit_checkpoint = vit_model.state_dict()
model.load_state_dict(vit_checkpoint, strict=False)
self.video_model = model
else:
raise NotImplementedError(f"{video_params['model']} not implemented")
# for backwards compatibility (old models)
self.video_model.fc = nn.Identity()
# Project to a common embedding
if projection == 'minimal':
txt_proj = nn.Sequential(nn.ReLU(),
nn.Linear(self.text_model.config.hidden_size, projection_dim),
)
vid_proj = nn.Sequential(
nn.Linear(ftr_dim, projection_dim)
)
elif projection == '':
txt_proj = nn.Identity()
vid_proj = nn.Identity()
else:
raise NotImplementedError
self.txt_proj = txt_proj
self.vid_proj = vid_proj
if load_checkpoint not in ["", None]:
checkpoint = torch.load(load_checkpoint)
state_dict = checkpoint['state_dict']
new_state_dict = state_dict_data_parallel_fix(state_dict, self.state_dict())
new_state_dict = self._inflate_positional_embeds(new_state_dict)
self.load_state_dict(new_state_dict, strict=True)
def set_device(self, device):
self.device = device
def forward(self, data, return_embeds=True):
text_data = data['text']
video_data = data['video']
text_embeddings = self.compute_text(text_data)
video_embeddings = self.compute_video(video_data)
if return_embeds:
return text_embeddings, video_embeddings
return sim_matrix(text_embeddings, video_embeddings)
def compute_text(self, text_data):
if self.text_params['model'].startswith('bert'):
text_embeddings = self.text_model(text_data['input_ids'], attention_mask=text_data['attention_mask'])[
'pooler_output']
elif self.text_params['model'].startswith('distilbert'):
text_embeddings = self.text_model(**text_data).last_hidden_state[:, 0, :]
else:
raise NotImplementedError
text_embeddings = self.txt_proj(text_embeddings)
return text_embeddings
def compute_video(self, video_data):
video_embeddings = self.video_model(video_data)
video_embeddings = self.vid_proj(video_embeddings)
return video_embeddings
def _inflate_positional_embeds(self, new_state_dict):
# allow loading of timesformer with fewer num_frames
curr_keys = list(self.state_dict().keys())
if 'video_model.temporal_embed' in new_state_dict and 'video_model.temporal_embed' in curr_keys:
load_temporal_embed = new_state_dict['video_model.temporal_embed']
load_num_frames = load_temporal_embed.shape[1]
curr_num_frames = self.video_params['num_frames']
embed_dim = load_temporal_embed.shape[2]
if load_num_frames != curr_num_frames:
if load_num_frames > curr_num_frames:
print(f'### loaded {self.video_params["model"]} model has MORE frames than current...'
f'### loading weights, filling in the extras via {self.load_temporal_fix}')
new_temporal_embed = load_temporal_embed[:, :curr_num_frames, :]
else:
print(f'### loaded {self.video_params["model"]} model has FEWER frames than current...'
f'### loading weights, filling in the extras via {self.load_temporal_fix}')
if self.load_temporal_fix == 'zeros':
new_temporal_embed = torch.zeros([load_temporal_embed.shape[0], curr_num_frames, embed_dim])
new_temporal_embed[:, :load_num_frames] = load_temporal_embed
elif self.load_temporal_fix in ['interp', 'bilinear']:
# interpolate
# unsqueeze so pytorch thinks its an image
mode = 'nearest'
if self.load_temporal_fix == 'bilinear':
mode = 'bilinear'
load_temporal_embed = load_temporal_embed.unsqueeze(0)
new_temporal_embed = F.interpolate(load_temporal_embed,
(curr_num_frames, embed_dim), mode=mode).squeeze(0)
else:
raise NotImplementedError
new_state_dict['video_model.temporal_embed'] = new_temporal_embed
# allow loading with smaller spatial patches. assumes custom border crop, to append the
# border patches to the input sequence
if 'video_model.pos_embed' in new_state_dict and 'video_model.pos_embed' in curr_keys:
load_pos_embed = new_state_dict['video_model.pos_embed']
load_num_patches = load_pos_embed.shape[1]
curr_pos_embed = self.state_dict()['video_model.pos_embed']
if load_num_patches != curr_pos_embed.shape[1]:
raise NotImplementedError(
'Loading models with different spatial resolution / patch number not yet implemented, sorry.')
return new_state_dict
def sim_matrix(a, b, eps=1e-8):
"""
added eps for numerical stability
"""
a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]
a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n))
b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))
sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1))
return sim_mt
def compute_similarity(a, b, a_mask=None, b_mask=None, style='single', eps=1e-8, return_raw=False, temp=0.5):
if style == 'single':
sim = sim_matrix(a, b, eps=eps)
return sim, sim.t()
else:
raise NotImplementedError
if __name__ == "__main__":
pass
| 7,958 | 43.463687 | 116 | py |
frozen-in-time | frozen-in-time-main/model/video_transformer.py | """
Implementations of Video Transformers in PyTorch
A PyTorch implementation of space-time transformer as described in
'Frozen in Time: A Joint Image and Video Encoder for End-to-End Retrieval' - https://arxiv.org/abs/2104.00650
A PyTorch implementation of timesformer as described in
'Is Space-Time Attention All You Need for Video Understanding?' - https://arxiv.org/abs/2102.05095
Acknowledgments:
- This code builds on Ross Wightman's vision_transformer code in pytorch-image-models:
https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
- It is also inspired by lucidrains timesformer implementation:
https://github.com/lucidrains/TimeSformer-pytorch
Hacked together by Max Bain
"""
from collections import OrderedDict
from functools import partial
import torch
from einops import rearrange, repeat
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from torch import einsum, nn
def attn(q, k, v):
sim = einsum('b i d, b j d -> b i j', q, k)
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)
return out
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class VideoPatchEmbed(nn.Module):
""" Video to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768,
num_frames=8):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) * num_frames
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.num_frames = num_frames
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
B, F, C, H, W = x.shape
assert F <= self.num_frames
x = x.view(-1, C, H, W)
x = self.proj(x)
return x
class VarAttention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0.,
initialize='random'):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
if initialize == 'zeros':
self.qkv.weight.data.fill_(0)
self.qkv.bias.data.fill_(0)
# fill proj weight with 1 here to improve training dynamics. Otherwise temporal attention inputs
# are multiplied by 0*0, which is hard for the model to move out of.
self.proj.weight.data.fill_(1)
self.proj.bias.data.fill_(0)
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, einops_from, einops_to, **einops_dims):
h = self.num_heads
# project x to q, k, v vaalues
q, k, v = self.qkv(x).chunk(3, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
q *= self.scale
# splice out CLS token at index 1
(cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v))
# let CLS token attend to key / values of all patches across time and space
cls_out = attn(cls_q, k, v)
# rearrange across time or space
q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), (q_, k_, v_))
# expand cls token keys and values across time or space and concat
r = q_.shape[0] // cls_k.shape[0]
cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v))
k_ = torch.cat((cls_k, k_), dim=1)
v_ = torch.cat((cls_v, v_), dim=1)
# attention
out = attn(q_, k_, v_)
# merge back time or space
out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims)
# concat back the cls token
out = torch.cat((cls_out, out), dim=1)
# merge back the heads
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
## to out
x = self.proj(out)
x = self.proj_drop(x)
return x
class SpaceTimeBlock(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, time_init='zeros',
attention_style='frozen-in-time'):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = VarAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.timeattn = VarAttention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop,
initialize=time_init)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
self.norm3 = norm_layer(dim)
self.attention_style = attention_style
def forward(self, x, einops_from_space, einops_to_space, einops_from_time, einops_to_time,
time_n, space_f):
time_output = self.timeattn(self.norm3(x), einops_from_time, einops_to_time, n=time_n)
time_residual = x + time_output
space_output = self.attn(self.norm1(time_residual), einops_from_space,
einops_to_space, f=space_f)
if self.attention_style == 'frozen-in-time':
space_residual = x + self.drop_path(space_output)
else:
raise NotImplementedError
x = space_residual + self.drop_path(self.mlp(self.norm2(space_residual)))
return x
class SpaceTimeTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `Space-Time Transformer` from Frozen-in-time - by Max Bain.
https://arxiv.org/abs/2104.00650
Based off:
- ViT implementation from the timm library [https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py]
lucidrains timesformer implementation [https://github.com/lucidrains/TimeSformer-pytorch].
Notable differences:
- allows for variable length input frames (<= num_frames)
- allows for variable length input resolution (<= (img_size, img_size)) [UNTESTED]
- different attention block mechanism
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, representation_size=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None,
num_frames=8, time_init='rand', attention_style='frozen-in-time'):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
representation_size (Optional[int]): enable and set representation layer (pre-logits) to this value if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
hybrid_backbone (nn.Module): CNN backbone to use in-place of PatchEmbed module
norm_layer: (nn.Module): normalization layer
num_frames: (int) maximum number of frames expected as input
time_init: (str) how to initialise the time attention layer, 'zeros' allows for the timesformer to start off
as ViT.
attention_style: (str) how to attend to space and time.
"""
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_frames = num_frames
self.embed_dim = embed_dim
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
print("######USING ATTENTION STYLE: ", attention_style)
if hybrid_backbone is not None:
raise NotImplementedError('hybrid backbone not implemented')
else:
self.patch_embed = VideoPatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, num_frames=num_frames)
num_patches = self.patch_embed.num_patches
self.patches_per_frame = num_patches // num_frames
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.patches_per_frame + 1,
embed_dim)) # remember to take pos_embed[1:] for tiling over time
self.temporal_embed = nn.Parameter(torch.zeros(1, num_frames, embed_dim))
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
SpaceTimeBlock(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, time_init=time_init,
attention_style=attention_style)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
# Representation layer
if representation_size:
self.num_features = representation_size
self.pre_logits = nn.Sequential(OrderedDict([
('fc', nn.Linear(embed_dim, representation_size)),
('act', nn.Tanh())
]))
else:
self.pre_logits = nn.Identity()
# Classifier head
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# if num_frames > 1, then we perform ViT inflation and initialise time attention to zero so not necessary.
if num_frames == 1:
self.apply(self._init_weights)
## einops transformations
self.einops_from_space = 'b (f n) d'
self.einops_to_space = '(b f) n d'
self.einops_from_time = 'b (f n) d'
self.einops_to_time = '(b n) f d'
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
b, curr_frames, channels, _, _ = x.shape
x = self.patch_embed(x)
x = x.flatten(2).transpose(2, 1)
x = x.reshape(b, -1, self.patch_embed.embed_dim)
BF = x.shape[0]
cls_tokens = self.cls_token.expand(BF, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
# positional embed needs to be tiled for each frame (this does [1,2,3] --> [1,2,3,1,2,3]...)
cls_embed = self.pos_embed[:, 0, :].unsqueeze(1)
tile_pos_embed = self.pos_embed[:, 1:, :].repeat(1, self.num_frames, 1)
# temporal embed needs to be repeated within each frame (this does [1,2,3] --> [1,1,1,2,2,2,3,3,3]...)
tile_temporal_embed = self.temporal_embed.repeat_interleave(self.patches_per_frame, 1)
total_pos_embed = tile_pos_embed + tile_temporal_embed
total_pos_embed = torch.cat([cls_embed, total_pos_embed], dim=1)
curr_patches = x.shape[1]
x = x + total_pos_embed[:, :curr_patches]
x = self.pos_drop(x)
n = self.patches_per_frame
f = curr_frames
for blk in self.blocks:
x = blk(x, self.einops_from_space, self.einops_to_space, self.einops_from_time,
self.einops_to_time,
time_n=n, space_f=f)
x = self.norm(x)[:, 0]
x = self.pre_logits(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
| 14,164 | 40.784661 | 145 | py |
frozen-in-time | frozen-in-time-main/model/metric.py | """Module for computing performance metrics
"""
from pathlib import Path
import numpy as np
import scipy.stats
import torch
def t2v_metrics(sims, query_masks=None):
"""Compute retrieval metrics from a similarity matrix.
Args:
sims (th.Tensor): N x M matrix of similarities between embeddings, where
x_{i,j} = <text_embd[i], vid_embed[j]>
query_masks (th.Tensor): mask any missing queries from the dataset (two videos
in MSRVTT only have 19, rather than 20 captions)
Returns:
(dict[str:float]): retrieval metrics
"""
assert sims.ndim == 2, "expected a matrix"
num_queries, num_vids = sims.shape
dists = -sims
sorted_dists = np.sort(dists, axis=1)
# The indices are computed such that they slice out the ground truth distances
# from the psuedo-rectangular dist matrix
queries_per_video = num_queries // num_vids
gt_idx = [[np.ravel_multi_index([ii, jj], (num_queries, num_vids))
for ii in range(jj * queries_per_video, (jj + 1) * queries_per_video)]
for jj in range(num_vids)]
gt_idx = np.array(gt_idx)
gt_dists = dists.reshape(-1)[gt_idx.reshape(-1)]
gt_dists = gt_dists[:, np.newaxis]
rows, cols = np.where((sorted_dists - gt_dists) == 0) # find column position of GT
# --------------------------------
# NOTE: Breaking ties
# --------------------------------
# We sometimes need to break ties (in general, these should occur extremely rarely,
# but there are pathological cases when they can distort the scores, such as when
# the similarity matrix is all zeros). Previous implementations (e.g. the t2i
# evaluation function used
# here: https://github.com/niluthpol/multimodal_vtt/blob/master/evaluation.py and
# here: https://github.com/linxd5/VSE_Pytorch/blob/master/evaluation.py#L87) generally
# break ties "optimistically". However, if the similarity matrix is constant this
# can evaluate to a perfect ranking. A principled option is to average over all
# possible partial orderings implied by the ties. See # this paper for a discussion:
# McSherry, Frank, and Marc Najork,
# "Computing information retrieval performance measures efficiently in the presence
# of tied scores." European conference on information retrieval. Springer, Berlin,
# Heidelberg, 2008.
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.145.8892&rep=rep1&type=pdf
break_ties = "optimistically"
#break_ties = "averaging"
if rows.size > num_queries:
assert np.unique(rows).size == num_queries, "issue in metric evaluation"
if break_ties == "optimistically":
_, idx = np.unique(rows, return_index=True)
cols = cols[idx]
elif break_ties == "averaging":
# fast implementation, based on this code:
# https://stackoverflow.com/a/49239335
locs = np.argwhere((sorted_dists - gt_dists) == 0)
# Find the split indices
steps = np.diff(locs[:, 0])
splits = np.nonzero(steps)[0] + 1
splits = np.insert(splits, 0, 0)
# Compute the result columns
summed_cols = np.add.reduceat(locs[:, 1], splits)
counts = np.diff(np.append(splits, locs.shape[0]))
avg_cols = summed_cols / counts
if False:
print("Running slower code to verify rank averaging across ties")
# slow, but more interpretable version, used for testing
avg_cols_slow = [np.mean(cols[rows == idx]) for idx in range(num_queries)]
assert np.array_equal(avg_cols, avg_cols_slow), "slow vs fast difference"
print("passed num check")
cols = avg_cols
msg = "expected ranks to match queries ({} vs {}) "
if cols.size != num_queries:
import ipdb;
ipdb.set_trace()
assert cols.size == num_queries, msg
if False:
# overload mask to check that we can recover the scores for single-query
# retrieval
print("DEBUGGING MODE")
query_masks = np.zeros_like(query_masks)
query_masks[:, 0] = 1 # recover single query score
if query_masks is not None:
# remove invalid queries
assert query_masks.size == num_queries, "invalid query mask shape"
cols = cols[query_masks.reshape(-1).astype(np.bool)]
assert cols.size == query_masks.sum(), "masking was not applied correctly"
# update number of queries to account for those that were missing
num_queries = query_masks.sum()
if False:
# sanity check against old logic for square matrices
gt_dists_old = np.diag(dists)
gt_dists_old = gt_dists_old[:, np.newaxis]
_, cols_old = np.where((sorted_dists - gt_dists_old) == 0)
assert np.array_equal(cols_old, cols), "new metric doesn't match"
return cols2metrics(cols, num_queries)
def v2t_metrics(sims, query_masks=None):
"""Compute retrieval metrics from a similarity matrix.
Args:
sims (th.Tensor): N x M matrix of similarities between embeddings, where
x_{i,j} = <text_embd[i], vid_embed[j]>
query_masks (th.Tensor): mask any missing captions from the dataset
Returns:
(dict[str:float]): retrieval metrics
NOTES: We find the closest "GT caption" in the style of VSE, which corresponds
to finding the rank of the closest relevant caption in embedding space:
github.com/ryankiros/visual-semantic-embedding/blob/master/evaluation.py#L52-L56
"""
# switch axes of text and video
sims = sims.T
if False:
# experiment with toy example
sims = np.ones((3, 3))
sims[0, 0] = 2
sims[1, 1:2] = 2
sims[2, :] = 2
query_masks = None
assert sims.ndim == 2, "expected a matrix"
num_queries, num_caps = sims.shape
dists = -sims
caps_per_video = num_caps // num_queries
break_ties = "averaging"
MISSING_VAL = 1E8
query_ranks = []
for ii in range(num_queries):
row_dists = dists[ii, :]
if query_masks is not None:
# Set missing queries to have a distance of infinity. A missing query
# refers to a query position `n` for a video that had less than `n`
# captions (for example, a few MSRVTT videos only have 19 queries)
row_dists[np.logical_not(query_masks.reshape(-1))] = MISSING_VAL
# NOTE: Using distance subtraction to perform the ranking is easier to make
# deterministic than using argsort, which suffers from the issue of defining
# "stability" for equal distances. Example of distance subtraction code:
# github.com/antoine77340/Mixture-of-Embedding-Experts/blob/master/train.py
sorted_dists = np.sort(row_dists)
min_rank = np.inf
for jj in range(ii * caps_per_video, (ii + 1) * caps_per_video):
if row_dists[jj] == MISSING_VAL:
# skip rankings of missing captions
continue
ranks = np.where((sorted_dists - row_dists[jj]) == 0)[0]
if break_ties == "optimistically":
rank = ranks[0]
elif break_ties == "averaging":
# NOTE: If there is more than one caption per video, its possible for the
# method to do "worse than chance" in the degenerate case when all
# similarities are tied. TODO(Samuel): Address this case.
rank = ranks.mean()
if rank < min_rank:
min_rank = rank
query_ranks.append(min_rank)
query_ranks = np.array(query_ranks)
# sanity check against old version of code
if False:
sorted_dists = np.sort(dists, axis=1)
gt_dists_old = np.diag(dists)
gt_dists_old = gt_dists_old[:, np.newaxis]
rows_old, cols_old = np.where((sorted_dists - gt_dists_old) == 0)
if rows_old.size > num_queries:
_, idx = np.unique(rows_old, return_index=True)
cols_old = cols_old[idx]
num_diffs = (1 - (cols_old == query_ranks)).sum()
msg = f"new metric doesn't match in {num_diffs} places"
assert np.array_equal(cols_old, query_ranks), msg
# visualise the distance matrix
import sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
sys.path.insert(0, str(Path.home() / "coding/src/zsvision/python"))
from zsvision.zs_iterm import zs_dispFig # NOQA
plt.matshow(dists)
zs_dispFig()
return cols2metrics(query_ranks, num_queries)
def retrieval_as_classification(sims, query_masks=None):
"""Compute classification metrics from a similarity matrix.
"""
assert sims.ndim == 2, "expected a matrix"
# switch axes of query-labels and video
sims = sims.T
query_masks = query_masks.T
dists = -sims
num_queries, num_labels = sims.shape
break_ties = "averaging"
query_ranks = []
for ii in range(num_queries):
row_dists = dists[ii, :]
# NOTE: Using distance subtraction to perform the ranking is easier to make
# deterministic than using argsort, which suffers from the issue of defining
# "stability" for equal distances. Example of distance subtraction code:
# github.com/antoine77340/Mixture-of-Embedding-Experts/blob/master/train.py
sorted_dists = np.sort(row_dists)
# min_rank = np.inf
label_ranks = []
for gt_label in np.where(query_masks[ii, :])[0]:
ranks = np.where((sorted_dists - row_dists[gt_label]) == 0)[0]
if break_ties == "optimistically":
rank = ranks[0]
elif break_ties == "averaging":
# NOTE: If there is more than one caption per video, its possible for the
# method to do "worse than chance" in the degenerate case when all
# similarities are tied. TODO(Samuel): Address this case.
rank = ranks.mean()
else:
raise ValueError(f"unknown tie-breaking method: {break_ties}")
label_ranks.append(rank)
# Avoid penalising for assigning higher similarity to other gt labels. This is
# done by subtracting out the better ranked query labels. Note that this step
# introduces a slight skew in favour of videos with lots of labels. We can
# address this later with a normalisation step if needed.
label_ranks = [x - idx for idx, x in enumerate(label_ranks)]
# Include all labels in the final calculation
query_ranks.extend(label_ranks)
query_ranks = np.array(query_ranks)
# sanity check against old version of code
if False:
# visualise the distance matrix
import sys
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
sys.path.insert(0, str(Path.home() / "coding/src/zsvision/python"))
from zsvision.zs_iterm import zs_dispFig # NOQA
# plt.matshow(dists)
# zs_dispFig()
plt.hist(query_ranks, bins=313, alpha=0.5)
plt.grid()
zs_dispFig()
import ipdb;
ipdb.set_trace()
return cols2metrics(query_ranks, num_queries=len(query_ranks))
def cols2metrics(cols, num_queries):
metrics = {}
metrics["R1"] = 100 * float(np.sum(cols == 0)) / num_queries
metrics["R5"] = 100 * float(np.sum(cols < 5)) / num_queries
metrics["R10"] = 100 * float(np.sum(cols < 10)) / num_queries
metrics["R50"] = 100 * float(np.sum(cols < 50)) / num_queries
metrics["MedR"] = np.median(cols) + 1
metrics["MeanR"] = np.mean(cols) + 1
stats = [metrics[x] for x in ("R1", "R5", "R10")]
metrics["geometric_mean_R1-R5-R10"] = scipy.stats.mstats.gmean(stats)
return metrics
def mean_average_precision(sims, query_masks=None):
ap_meter = APMeter()
ap_meter.add(output=sims.T, target=query_masks.T)
return {"mAP": ap_meter.value().mean()}
def acc(output, target):
with torch.no_grad():
pred = torch.argmax(output, dim=1)
assert pred.shape[0] == len(target)
correct = 0
correct += torch.sum(pred == target).item()
return correct / len(target)
def my_metric2(output, target, k=3):
with torch.no_grad():
pred = torch.topk(output, k, dim=1)[1]
assert pred.shape[0] == len(target)
correct = 0
for i in range(k):
correct += torch.sum(pred[:, i] == target).item()
return correct / len(target)
def video_precision(output, target):
""" percentage of videos which have been aligned to a matching text pair"""
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
correct = 0
for bout, btarg in zip(output, target):
for pair in bout:
eq = torch.eq(pair, btarg)
if torch.logical_and(eq[:, 0], eq[:, 1]).any():
correct += 1
return correct / (target.shape[0] * target.shape[1])
def video_precision_adj(output, target):
""" adjusts the video precision metric by ignoring videos which have no aligning text."""
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
correct = 0
for bout, btarg in zip(output, target):
for pair in bout:
eq = torch.eq(pair, btarg)
if torch.logical_and(eq[:, 0], eq[:, 1]).any():
correct += 1
denom = len(target[:, :, 0].unique())
return correct / denom
def video_precision_adj(output, target):
""" adjusts the video precision metric by ignoring videos which have no aligning text."""
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
assert output.shape[0] == target.shape[0]
assert output.shape[2] == target.shape[2] == 2
correct = 0
for bout, btarg in zip(output, target):
for pair in bout:
eq = torch.eq(pair, btarg)
if torch.logical_and(eq[:, 0], eq[:, 1]).any():
correct += 1
denom = len(target[:, :, 0].unique())
return correct / denom
| 14,381 | 38.839335 | 93 | py |
corrfitter | corrfitter-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# corrfitter documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 14 23:21:34 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import corrfitter
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# extensions = ['sphinx.ext.autodoc','sphinx.ext.pngmath']
# extensions = ['sphinx.ext.autodoc','sphinx.ext.napoleon', 'sphinx.ext.pngmath']
extensions = ['sphinx.ext.autodoc','sphinx.ext.napoleon', 'sphinx.ext.imgmath']
imgmath_image_format = 'svg' # "png"
imgmath_use_preview = True
imgmath_latex_preamble = r"\usepackage{arev}"
# imgmath_dvipng_args = ['-gamma', '0.35', '-D', '110', '-bg', 'Transparent']
# imgmath_dvipng_args = ['-gamma', '0.5', '-D', '100', '-bg', 'Transparent']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'corrfitter'
copyright = '2010-21, G.P. Lepage'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = corrfitter.__version__
# The full version, including alpha/beta/rc tags.
release = corrfitter.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'sphinxdoc'
# html_theme = 'scrolls'
# html_theme = 'agogo'
# html_theme = 'traditional'
# html_theme = 'haiku'
html_theme = 'pyramid'
# html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'corrfitterdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'corrfitter.tex', 'corrfitter Documentation',
'G.P. Lepage', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| 6,976 | 32.382775 | 81 | py |
deepgoplus-stability | deepgoplus-stability-master/modified_files/main.py | #!/usr/bin/env python
import os
from threadpoolctl import threadpool_limits, threadpool_info
import click as ck
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
from subprocess import Popen, PIPE
import time
from utils import Ontology, NAMESPACES
from aminoacids import to_onehot
import gzip
import sys
import logging
import subprocess
import math
import tensorflow as tf
import pickle
MAXLEN = 2000
@ck.command()
@ck.option('--data-root', '-dr', default='data/', help='Data root folder', required=True)
@ck.option('--in-file', '-if', help='Input FASTA file', required=True)
@ck.option('--out-file', '-of', default='results.tsv', help='Output result file')
@ck.option('--go-file', '-gf', default='go.obo', help='Gene Ontology')
@ck.option('--model-file', '-mf', default='model.h5', help='Tensorflow model file')
@ck.option('--terms-file', '-tf', default='terms.pkl', help='List of predicted terms')
@ck.option('--annotations-file', '-tf', default='train_data.pkl', help='Experimental annotations')
@ck.option('--diamond-db', '-dd', default='train_data.dmnd', help='Diamond Database file')
@ck.option('--diamond-file', '-df', default='diamond.res', help='Diamond Mapping file')
@ck.option('--chunk-size', '-cs', default=1000, help='Number of sequences to read at a time')
@ck.option('--threshold', '-t', default=0.1, help='Prediction threshold')
@ck.option('--batch-size', '-bs', default=32, help='Batch size for prediction model')
@ck.option('--alpha', '-a', default=0.5, help='Alpha weight parameter')
def main(data_root, in_file, out_file, go_file, model_file, terms_file, annotations_file,
diamond_db, diamond_file, chunk_size, threshold, batch_size, alpha):
# Check data folder and required files
try:
if os.path.exists(data_root):
go_file = os.path.join(data_root, go_file)
model_file = os.path.join(data_root, model_file)
terms_file = os.path.join(data_root, terms_file)
annotations_file = os.path.join(data_root, annotations_file)
diamond_db = os.path.join(data_root, diamond_db)
diamond_file = os.path.join(data_root, diamond_file)
if not os.path.exists(go_file):
raise Exception(f'Gene Ontology file ({go_file}) is missing!')
if not os.path.exists(model_file):
raise Exception(f'Model file ({model_file}) is missing!')
if not os.path.exists(terms_file):
raise Exception(f'Terms file ({terms_file}) is missing!')
if not os.path.exists(annotations_file):
raise Exception(f'Annotations file ({annotations_file}) is missing!')
if not os.path.exists(diamond_db):
raise Exception(f'Diamond database ({diamond_db}) is missing!')
else:
raise Exception(f'Data folder {data_root} does not exist!')
except Exception as e:
logging.error(e)
sys.exit(1)
tf.config.threading.set_inter_op_parallelism_threads(1)
tf.config.threading.set_intra_op_parallelism_threads(1)
# Load GO and read list of all terms
go = Ontology(go_file, with_rels=True)
terms_df = pd.read_pickle(terms_file)
terms = terms_df['terms'].values.flatten()
# Read known experimental annotations
annotations = {}
df = pd.read_pickle(annotations_file)
for row in df.itertuples():
annotations[row.proteins] = set(row.exp_annotations)
# Generate diamond predictions
# cmd = [
# "diamond", "blastp", "-d", diamond_db, "--more-sensitive", "-t", "/tmp",
# "-q", in_file, "--outfmt", "6", "qseqid", "sseqid", "bitscore", "-o",
# diamond_file]
# proc = subprocess.run(cmd)
# if proc.returncode != 0:
# logging.error('Error running diamond!')
# sys.exit(1)
# diamond_preds = {}
# mapping = {}
# with open(diamond_file, 'r') as f:
# for line in f:
# it = line.strip().split()
# if it[0] not in mapping:
# mapping[it[0]] = {}
# mapping[it[0]][it[1]] = float(it[2])
# for prot_id, sim_prots in mapping.items():
# annots = {}
# allgos = set()
# total_score = 0.0
# for p_id, score in sim_prots.items():
# allgos |= annotations[p_id]
# total_score += score
# allgos = list(sorted(allgos)) #modification
# sim = np.zeros(len(allgos), dtype=np.float32)
# for j, go_id in enumerate(allgos):
# s = 0.0
# for p_id, score in sim_prots.items():
# if go_id in annotations[p_id]:
# s += score
# sim[j] = s / total_score
# for go_id, score in zip(allgos, sim):
# annots[go_id] = score
# diamond_preds[prot_id] = annots
# Load CNN model
model = load_model(model_file)
# Alphas for the latest model
alphas = {NAMESPACES['mf']: 0.55, NAMESPACES['bp']: 0.59, NAMESPACES['cc']: 0.46}
# Alphas for the cafa2 model
# alphas = {NAMESPACES['mf']: 0.63, NAMESPACES['bp']: 0.68, NAMESPACES['cc']: 0.48}
with threadpool_limits(limits=1, user_api='blas'):
start_time = time.time()
total_seq = 0
w = open(out_file, 'wb')
w_dict = {}
for prot_ids, sequences in read_fasta(in_file, chunk_size):
total_seq += len(prot_ids)
deep_preds = {}
ids, data = get_data(sequences)
preds = model.predict(data, batch_size=batch_size)
assert preds.shape[1] == len(terms)
for i, j in enumerate(ids):
prot_id = prot_ids[j]
if prot_id not in deep_preds:
deep_preds[prot_id] = {}
for l in range(len(terms)):
#if preds[i, l] >= 0.01: #modification
if terms[l] not in deep_preds[prot_id]:
deep_preds[prot_id][terms[l]] = preds[i, l]
else:
deep_preds[prot_id][terms[l]] = max(
deep_preds[prot_id][terms[l]], preds[i, l])
# Combine diamond preds and deepgo
for prot_id in prot_ids:
annots = {}
#if prot_id in diamond_preds:
# for go_id, score in diamond_preds[prot_id].items():
# if go.has_term(go_id):
# annots[go_id] = np.float64(0) #score * alphas[go.get_namespace(go_id)] #modification
for go_id, score in deep_preds[prot_id].items():
if go_id in annots:
annots[go_id] += np.float64(score) #modification
#annots[go_id] += (1 - alphas[go.get_namespace(go_id)]) * score
else:
annots[go_id] = np.float64(score) #modification
#annots[go_id] = (1 - alphas[go.get_namespace(go_id)]) * score
# Propagate scores with ontology structure
gos = list(annots.keys())
for go_id in gos:
for g_id in go.get_anchestors(go_id):
if g_id in annots:
annots[g_id] = max(annots[g_id], annots[go_id])
else:
annots[g_id] = annots[go_id]
#w.write(prot_id)
w_dict[prot_id] = {} #modification
for go_id, score in annots.items():
#if score >= threshold: #modification
#w.write('\t' + go_id + '|%s' % score)
w_dict[prot_id][go_id] = np.float64(score)
#w.write('\n')
pickle.dump(w_dict, w) #modification
w.close()
total_time = time.time() - start_time
print('Total prediction time for %d sequences is %d' % (total_seq, total_time))
def read_fasta(filename, chunk_size):
seqs = list()
info = list()
seq = ''
inf = ''
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('>'):
if seq != '':
seqs.append(seq)
info.append(inf)
if len(info) == chunk_size:
yield (info, seqs)
seqs = list()
info = list()
seq = ''
inf = line[1:].split()[0]
else:
seq += line
seqs.append(seq)
info.append(inf)
yield (info, seqs)
def get_data(sequences):
pred_seqs = []
ids = []
for i, seq in enumerate(sequences):
if len(seq) > MAXLEN:
st = 0
while st < len(seq):
pred_seqs.append(seq[st: st + MAXLEN])
ids.append(i)
st += MAXLEN - 128
else:
pred_seqs.append(seq)
ids.append(i)
n = len(pred_seqs)
data = np.zeros((n, MAXLEN, 21), dtype=np.float32)
for i in range(n):
seq = pred_seqs[i]
data[i, :, :] = to_onehot(seq)
return ids, data
if __name__ == '__main__':
main()
| 9,185 | 37.596639 | 109 | py |
torchTT | torchTT-main/setup.py | from setuptools import setup, Extension
import platform
logo_ascii = """
_ _ _____ _____
| |_ ___ _ __ ___| |_|_ _|_ _|
| __/ _ \| '__/ __| '_ \| | | |
| || (_) | | | (__| | | | | | |
\__\___/|_| \___|_| |_|_| |_|
"""
try:
from torch.utils.cpp_extension import BuildExtension, CppExtension
except:
raise Exception("Torch has to be installed first")
os_name = platform.system()
print()
print(logo_ascii)
print()
if os_name == 'Linux' or os_name == 'Darwin':
setup(name='torchTT',
version='2.0',
description='Tensor-Train decomposition in pytorch',
url='https://github.com/ion-g-ion/torchTT',
author='Ion Gabriel Ion',
author_email='ion.ion.gabriel@gmail.com',
license='MIT',
packages=['torchtt'],
install_requires=['numpy>=1.18','torch>=1.7','opt_einsum'],
ext_modules=[
CppExtension('torchttcpp', ['cpp/cpp_ext.cpp'], extra_compile_args=['-lblas', '-llapack', '-std=c++14', '-Wno-c++11-narrowing', '-g', '-w', '-O3']),
],
cmdclass={
'build_ext': BuildExtension
},
test_suite='tests',
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
])
else:
import warnings
warnings.warn("\x1B[33m\nC++ implementation not available. Using pure Python.\n\033[0m")
setup(name='torchTT',
version='2.0',
description='Tensor-Train decomposition in pytorch',
url='https://github.com/ion-g-ion/torchTT',
author='Ion Gabriel Ion',
author_email='ion.ion.gabriel@gmail.com',
license='MIT',
packages=['torchtt'],
install_requires=['numpy>=1.18','torch>=1.7','opt_einsum'],
test_suite='tests',
zip_safe=False)
| 1,847 | 27.875 | 156 | py |
torchTT | torchTT-main/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = 'torchtt'
copyright = '2023, Ion Gabriel Ion'
author = 'Ion Gabriel Ion'
release = '2.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.napoleon', 'sphinx.ext.intersphinx']
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
# import sphinx_bootstrap_theme
#html_theme = 'bootstrap'
#html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_theme = 'sphinx_rtd_theme'
html_static_path = ['_static']
html_logo = 'https://github.com/ion-g-ion/torchTT/blob/main/logo_small.png?raw=true'
# #html_logo = "logo_small.png"
#
# # Theme options are theme-specific and customize the look and feel of a
# # theme further.
# html_theme_options = {
# # Navigation bar title. (Default: ``project`` value)
# 'navbar_title': "torchTT",
#
# # Tab name for entire site. (Default: "Site")
# 'navbar_site_name': "Site",
#
# # A list of tuples containing pages or urls to link to.
# # Valid tuples should be in the following forms:
# # (name, page) # a link to a page
# # (name, "/aa/bb", 1) # a link to an arbitrary relative url
# # (name, "http://example.com", True) # arbitrary absolute url
# # Note the "1" or "True" value above as the third argument to indicate
# # an arbitrary url.
# 'navbar_links': [
# ("Installation guide", "./docs/install"),
# ("Overview", "./docs/package-overview"),
# ("Reference", "./docs/modules"),
# ("Github", "https://github.com/ion-g-ion/torchTT", True),
# ],
#
# # Render the next and previous page links in navbar. (Default: true)
# 'navbar_sidebarrel': False,
#
# # Render the current pages TOC in the navbar. (Default: true)
# 'navbar_pagenav': False,
#
# # Tab name for the current pages TOC. (Default: "Page")
# # 'navbar_pagenav_name': "Page",
#
# # Global TOC depth for "site" navbar tab. (Default: 1)
# # Switching to -1 shows all levels.
# 'globaltoc_depth': 2,
#
# # Include hidden TOCs in Site navbar?
# #
# # Note: If this is "false", you cannot have mixed ``:hidden:`` and
# # non-hidden ``toctree`` directives in the same page, or else the build
# # will break.
# #
# # Values: "true" (default) or "false"
# 'globaltoc_includehidden': "true",
#
# # HTML navbar class (Default: "navbar") to attach to <div> element.
# # For black navbar, do "navbar navbar-inverse"
# 'navbar_class': "navbar navbar-inverse",
#
# # Fix navigation bar to top of page?
# # Values: "true" (default) or "false"
# 'navbar_fixed_top': "true",
#
# # Location of link to source.
# # Options are "nav" (default), "footer" or anything else to exclude.
# # 'source_link_position': "nav",
#
# # Bootswatch (http://bootswatch.com/) theme.
# #
# # Options are nothing (default) or the name of a valid theme
# # such as "cosmo" or "sandstone".
# #
# # The set of valid themes depend on the version of Bootstrap
# # that's used (the next config option).
# #
# # Currently, the supported themes are:
# # - Bootstrap 2: https://bootswatch.com/2
# # - Bootstrap 3: https://bootswatch.com/3
# 'bootswatch_theme': "united",
#
# # Choose Bootstrap version.
# # Values: "3" (default) or "2" (in quotes)
# 'bootstrap_version': "3",
# } | 4,105 | 36.669725 | 107 | py |
torchTT | torchTT-main/examples/random_tt.py | #%% Imports
import torch as tn
import torchtt as tntt
#%% Variance 1.0
x = tntt.randn([30]*5,[1,8,16,16,8,1])
x_full = x.full()
print('Var = ',tn.std(x_full).numpy()**2,' (has to be comparable to 1.0)')
#%% Variance 4.0
x = tntt.randn([30]*5,[1,8,16,16,8,1],var = 4.0)
x_full = x.full()
print('Var = ',tn.std(x_full).numpy()**2,' (has to be comparable to 4.0)')
#%% Variance 0.01
x = tntt.randn([30]*5,[1,8,16,16,8,1], var = 0.01)
x_full = x.full()
print('Var = ',tn.std(x_full).numpy()**2,' (has to be comparable to 0.01)')
#%% Variance 1.0 (longer train)
x = tntt.randn([10]*7, [1,4,4,4,4,4,4,1], var = 1.0)
x_full = x.full()
print('Var = ',tn.std(x_full).numpy()**2,' (has to be comparable to 1.0)')
| 708 | 28.541667 | 75 | py |
torchTT | torchTT-main/examples/automatic_differentiation.py | """
# Automatic differentiation
Being based on `pytorch`, `torchtt` can handle automatic differentiation with respect to the TT cores.
"""
#%% Imports
import torch as tn
import torchtt as tntt
#%% First, a function to differentiate is created and some tensors:
N = [2,3,4,5]
A = tntt.randn([(n,n) for n in N],[1]+[2]*(len(N)-1)+[1])
y = tntt.randn(N,A.R)
x = tntt.ones(N)
def f(x,A,y):
z = tntt.dot(A @ (x-y),(x-y))
return z.norm()
#%% In order to compute the derivative of a scalar with respect to all cores of a TT object, the AD graph recording has to be started:
tntt.grad.watch(x)
#%% Using the `torchtt.grad.grad()` method, the gradient is computed:
val = f(x,A,y)
grad_cores = tntt.grad.grad(val, x)
#%% The variable `grad_cores` is a list of tensors representing the derivatives of `f()` with resect to the individual core entries.
# For checking, we compute the derivative of teh function with respect to one element of the core
h = 1e-7
x1 = x.clone()
x1.cores[1][0,0,0] += h
x2 = x.clone()
x2.cores[1][0,0,0] -= h
derivative = (f(x1,A,y)-f(x2,A,y))/(2*h)
print(tn.abs(derivative-grad_cores[1][0,0,0])/tn.abs(derivative))
# The functions `torchtt.grad.grad()` and `torchtt.grad.watch()` can take an additional list of modes `core_indices` as argument which decides which cores are watched and differentiaated with respect to.
| 1,353 | 33.717949 | 203 | py |
torchTT | torchTT-main/examples/system_solvers.py | """
Linear solvers in the TT format
This tutorial addresses solving multilinear systems $\mathsf{Ax}=\mathsf{b}$ in the TT format.
"""
#%% Imports
import torch as tn
import torchtt as tntt
import datetime
#%% Small example
# A random tensor operator $\mathsf{A}$ is created in the TT format. We create a random right-hand side $\mathsf{b} = \mathsf{Ax}$, where $\mathsf{x}$ is a random tensor in the TT format.
# This way the solution of $\mathsf{Ax}=\mathsf{b}$ is known and we can compare it as a reference. This works only for small random tensors.
A = tntt.random([(4,4),(5,5),(6,6)],[1,2,3,1])
x = tntt.random([4,5,6],[1,2,3,1])
b = A @ x
# Solve the multilinear system $\mathsf{Ax}=\mathsf{b}$ using the method torchtt.solvers.amen_solve().
xs = tntt.solvers.amen_solve(A,b, x0 = b, eps = 1e-7)
# The relative residual norm and the relative error of the solution are reported:
print(xs)
print('Relative residual error ',(A@xs-b).norm()/b.norm())
print('Relative error of the solution ',(xs-x).norm()/x.norm())
#%% Finite differences
# We now solve the problem $\Delta u = 1$ in $[0,1]^d$ with $ u = 0 $ on the entire boundary using finite differences.
# First, set the size of the problem (n is the mode size and d is the number of dimensions):
dtype = tn.float64
n = 64
d = 8
# Create the finite differences matrix corresponding to the problem. The operator is constructed directly in the TT format as it follows
L1d = -2*tn.eye(n, dtype = dtype)+tn.diag(tn.ones(n-1,dtype = dtype),-1)+tn.diag(tn.ones(n-1,dtype = dtype),1)
L1d[0,1] = 0
L1d[-1,-2] = 0
L1d *= (n-1)**2
L1d = tntt.TT(L1d, [(n,n)])
L_tt = tntt.zeros([(n,n)]*d)
for i in range(1,d-1):
L_tt = L_tt+tntt.eye([n]*i)**L1d**tntt.eye([n]*(d-2))
L_tt = L_tt + L1d**tntt.eye([n]*(d-1)) + tntt.eye([n]*(d-1))**L1d
L_tt = L_tt.round(1e-14)
# The right hand site of the finite difference system is also computed in the TT format
b1d = tn.ones(n, dtype=dtype)
#b1d[0] = 0
#b1d[-1] = 0
b1d = tntt.TT(b1d)
b_tt = b1d
for i in range(d-1):
b_tt = b_tt**b1d
# Solve the system
time = datetime.datetime.now()
x = tntt.solvers.amen_solve(L_tt, b_tt ,x0 = b_tt, nswp = 20, eps = 1e-7, verbose = True, preconditioner='c', use_cpp = True)
time = datetime.datetime.now() - time
print('Relative residual: ',(L_tt@x-b_tt).norm()/b_tt.norm())
print('Solver time: ',time)
# Display the structure of the TT
print(x)
#%% Try one more time on the GPU (if available).
if tn.cuda.is_available():
time = datetime.datetime.now()
x = tntt.solvers.amen_solve(L_tt.cuda(), b_tt.cuda() ,x0 = b_tt.cuda(), nswp = 20, eps = 1e-8, verbose = True, preconditioner='c')
time = datetime.datetime.now() - time
x = x.cpu()
print('Relative residual: ',(L_tt@x-b_tt).norm()/b_tt.norm())
print('Solver time: ',time)
else:
print('GPU not available...') | 2,835 | 34.898734 | 188 | py |
torchTT | torchTT-main/examples/tensor_completion.py | #%% Imports
import torchtt as tntt
import torch as tn
import numpy as np
import datetime
#%% Preparation
# create a random tensor
N = 20
target = tntt.random([N]*4,[1,4,5,3,1])
Xs = tntt.meshgrid([tn.linspace(0,1,N, dtype = tn.float64)]*4)
target = Xs[0]+1+Xs[1]+Xs[2]+Xs[3]+Xs[0]*Xs[1]+Xs[1]*Xs[2]+tntt.TT(tn.sin(Xs[0].full()))
target = target.round(1e-10)
print(target.R)
M = 2500 # number of observations
indices = tn.randint(0,N,(M,4))
# observations are considered to be noisy
sigma_noise = 0.001
obs = tn.normal(target.apply_mask(indices), sigma_noise)
# define the loss function
loss = lambda x: (x.apply_mask(indices)-obs).norm()**2+1e-18*x.norm()**2
#%% Manifold learning
print('Riemannian gradient descent\n')
# starting point
x = tntt.randn([N]*4,[1,3,3,3,1])
tme = datetime.datetime.now()
# iterations
for i in range(10250):
# manifold gradient
gr = tntt.manifold.riemannian_gradient(x,loss)
step_size = 1.0
R = x.R
# step update
x = (x - step_size * gr).round(0,R)
# compute loss value
if (i+1)%10 == 0:
loss_value = loss(x)
print('Iteration %4d loss value %e error %e tensor norm %e'%(i+1,loss_value.numpy(),(x-target).norm()/target.norm(), x.norm()**2))
tme = datetime.datetime.now() - tme
print('')
print('Time elapsed',tme)
print('Number of observations %d, tensor shape %s, percentage of entries observed %6.4f'%(M,str(x.N),100*M/np.prod(x.N)))
print('Number of unknowns %d, number of observations %d, DoF/observations %.6f'%(tntt.numel(x),M,tntt.numel(x)/M))
print('Rank after rounding',x.round(1e-6))
#%% Classical gradient descent w.r.t. TT-cores
# x = tnt.random([N]*4,[1,5,5,5,1])
#
# for i in range(100):
# tnt.grad.watch(x)
# loss_val =loss(x)
# cores_update = tnt.grad.grad(loss_val,x)
# tnt.grad.unwatch(x)
# x = tnt.TT([c1-0.015*c2 for c1,c2 in zip(x.cores,cores_update)])
#
# print('Iteration %4d loss value %e error %e'%(i+1,loss_val.detach().numpy(),(x-target).norm()/target.norm()))
| 2,005 | 28.5 | 138 | py |
torchTT | torchTT-main/examples/basic_nn.py | #!/usr/bin/env python
# coding: utf-8
#%% Tensor Train layers for neural networks
# In this section, the TT layers are introduced.
# Imports:
import torch as tn
import torch.nn as nn
import datetime
import torchtt as tntt
#%% We consider a linear layer $\mathcal{LTT}(\mathsf{x}) = \mathsf{Wx}+\mathsf{b}$ acting on a tensor input $\mathsf{x}$ of shape $n_1 \times \cdots \times n_d$ and returning a tensor of shape $m_1\times\cdots\times m_d$. The corresponding weight matrix $\mathsf{W}$ would have the shape $(m_1\times\cdots\times m_d) \times (n_1 \times \cdots \times n_d)$. The goal is to represent the weights tensor operator in TT format and perform the learning with respect tot the cores of the TT decomposition (ranks have to be fixed a priori).
# Due to the AD functionality of `torchtt`, the gradient with respect tot the cores can be computed for any network structure.
# TT layers can be added using `torchtt.nn.LinearLayerTT()` class.
# In the following, a neural netywork with 3 hidden layers and one linear layer is created.
# The shapes of the individual layers are
# $\mathbb{R}^{16} \times\mathbb{R}^{16} \times\mathbb{R}^{16} \times\mathbb{R}^{16} \underset{}{\longrightarrow} \mathbb{R}^8 \times\mathbb{R}^8 \times\mathbb{R}^8 \times\mathbb{R}^8 \underset{}{\longrightarrow} \mathbb{R}^4 \times\mathbb{R}^4 \times\mathbb{R}^4 \times\mathbb{R}^4 \underset{}{\longrightarrow} \mathbb{R}^2 \times\mathbb{R}^4 \times\mathbb{R}^2 \times\mathbb{R}^4 \underset{}{\longrightarrow} \mathbb{R}^{10}$.
class BasicTT(nn.Module):
def __init__(self):
super().__init__()
self.ttl1 = tntt.nn.LinearLayerTT([16,16,16,16], [8,8,8,8], [1,3,3,3,1])
self.ttl2 = tntt.nn.LinearLayerTT([8,8,8,8], [4,4,4,4], [1,2,2,2,1])
self.ttl3 = tntt.nn.LinearLayerTT([4,4,4,4], [2,4,2,4], [1,2,2,2,1])
self.linear = nn.Linear(64, 10, dtype = tn.float32)
def forward(self, x):
x = self.ttl1(x)
x = tn.relu(x)
x = self.ttl2(x)
x = tn.relu(x)
x = self.ttl3(x)
x = tn.relu(x)
x = tn.reshape(x,[-1,64])
return self.linear(x)
#% Create the model and print the number of trainable parameters as well as the model structure.
model = BasicTT()
print('Number of trainable parameters:', len(list(model.parameters())))
print(model)
#%% A random input is created and passed as argument to the model. Batch evaluation is also possible by extending the dimensionality of the input before the leading mode.
input = tn.rand((16,16,16,16), dtype = tn.float32)
pred = model.forward(input)
input_batch = tn.rand((1000,16,16,16,16), dtype = tn.float32)
label_batch = tn.rand((1000,10), dtype = tn.float32)
#%% The obtained network can be trained similarily to other `torch` models.
# A loss function together with an optimizer are defined.
criterion = nn.CrossEntropyLoss()
optimizer = tn.optim.Adam(model.parameters(), lr = 0.001)
#%% A training loop is executed to exemplify the training parameters update procedure. An example where a true dataset is used is presented [here](https://github.com/ion-g-ion/torchTT/blob/main/examples/mnist_nn.ipynb).
for epoch in range(5):
optimizer.zero_grad()
outputs = model(input_batch)
loss = criterion(outputs, label_batch)
loss.backward()
optimizer.step()
# print statistics
print('Epoch %d, loss %e'%(epoch+1,loss.item()))
print('Finished Training')
#%% If the GPU is available, the model can be run on it to get a speedup (should be run 2 times to see the speedup due to CUDA warm-up).
if tn.cuda.is_available():
model_gpu = BasicTT().cuda()
input_batch_gpu = tn.rand((400,16,16,16,16)).cuda()
input_batch = tn.rand((400,16,16,16,16))
tme = datetime.datetime.now()
pred = model.forward(input_batch)
tme = datetime.datetime.now() - tme
print('Time on CPU ',tme)
tme = datetime.datetime.now()
pred_gpu = model_gpu.forward(input_batch_gpu).cpu()
tme = datetime.datetime.now() - tme
print('Time on GPU ',tme)
| 4,041 | 42.462366 | 534 | py |
torchTT | torchTT-main/examples/mnist_nn.py | #!/usr/bin/env python
# coding: utf-8
#%% Digit recognition using TT neural networks
# The TT layer is applied to the MNIST dataset.
# Imports:
import torch as tn
import torch.nn as nn
import torchtt as tntt
from torch import optim
from torchvision import datasets
from torchvision.transforms import ToTensor
from torch.utils.data import DataLoader
device = tn.device('cuda' if tn.cuda.is_available() else 'cpu')
#%% Download the dataset and store it to a subfolder 'data'.
train_data = datasets.MNIST(root = 'downloads', train = True, transform = ToTensor(), download = True)
test_data = datasets.MNIST(root = 'downloads', train = False, transform = ToTensor())
#%% Create 2 dataloaders for the training set and the test set.
dataloader_train = tn.utils.data.DataLoader(train_data, batch_size=1000, shuffle=True, num_workers=10)
dataloader_test = tn.utils.data.DataLoader(test_data, batch_size=100, shuffle=True, num_workers=10)
#%% Define the neural network arhitecture. I contains 2 hidden TT layers (with RELU activation function) with a linear output layer. A sotmax is applied at the output.
class BasicTT(nn.Module):
def __init__(self):
super().__init__()
self.ttl1 = tntt.nn.LinearLayerTT([1,7,4,7,4], [8,10,10,10,10], [1,4,2,2,2,1])
self.ttl2 = tntt.nn.LinearLayerTT([8,10,10,10,10], [8,3,3,3,3], [1,2,2,2,2,1])
self.linear = nn.Linear(81*8, 10, dtype = tn.float32)
self.logsoftmax = nn.LogSoftmax(1)
def forward(self, x):
x = self.ttl1(x)
x = tn.relu(x)
x = self.ttl2(x)
x = tn.relu(x)
x = x.view(-1,81*8)
x = self.linear(x)
return self.logsoftmax(x)
#%% Instantiate the model and choose the optimizer and the loss function.
model = BasicTT().to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr = 0.001)
#%% Start the training for 30 epochs
n_epochs = 30
for epoch in range(n_epochs):
for i,(input,label) in enumerate(dataloader_train):
input = tn.reshape(input.to(device),[-1,1,7,4,7,4])
label = label.to(device)
optimizer.zero_grad()
output = model(input)
loss = loss_function(output, label)
loss.backward()
optimizer.step()
print('Epoch %d/%d iteration %d/%d loss %e'%(epoch+1,n_epochs,i+1,len(dataloader_train),loss))
#%% Compute the accuracy over the test set.
n_correct = 0
n_total = 0
for (input,label) in dataloader_test:
input = tn.reshape(input.to(device),[-1,1,7,4,7,4])
output = model(input).cpu()
n_correct += tn.sum(tn.max(output,1)[1] == label)
n_total += input.shape[0]
print('Test accuracy ',n_correct/n_total)
| 2,759 | 31.470588 | 167 | py |
torchTT | torchTT-main/examples/cuda.py | """
# GPU acceleration
The package `torchtt` can use the built-in GPU acceleration from `pytorch`.
"""
#%% Imports and check if any CUDA device is available.
import datetime
import torch as tn
try:
import torchtt as tntt
except:
print('Installing torchTT...')
# %pip install git+https://github.com/ion-g-ion/torchTT
print('CUDA available:',tn.cuda.is_available())
print('Device name: ' + tn.cuda.get_device_name())
#%% Define a function to test. It performs 2 matrix vector products in TT-format and a rank rounding.
# The return result is a scalar.
def f(x,A,y):
"""
fonction that performs operations of tensors in TT.
Args:
x (tnt.TT): input TT tensor
A (tnt.TT): input TT matrix
y (tnt.TT): input TT tensor
Returns:
torch.tensor: result
"""
z = A @ y + A @ y # operatio that grows the rank
z = z.round(1e-12) # rank rounding (contains QR and SVD decomposition)
z += z*x # some other operation
return tntt.dot(x,z) # contract the tensor
#%% Generate random tensors in the TT-format (on the CPU).
x = tntt.random([200,300,400,500],[1,10,10,10,1])
y = tntt.random([200,300,400,500],[1,8,8,8,1])
A = tntt.random([(200,200),(300,300),(400,400),(500,500)],[1,8,8,8,1])
#%% Run the function f() and report the time.
tme_cpu = datetime.datetime.now()
f(x,A,y)
tme_cpu = datetime.datetime.now() - tme_cpu
print('Time on CPU: ',tme_cpu)
#%% Move the defined tensors on GPU. Similarily to pytorch tensors one can use the function cuda() to return a copy of a TT instance on the GPU.
# All the cores of the returned TT object are on the GPU.
x = x.cuda()
y = y.cuda()
A = A.cuda()
#%% The function is executed once without timing to "warm-up" the CUDA.
f(x*0,A*0,0*y).cpu()
#%% Run the function again. This time the runtime is reported.
# The return value is moved to CPU to assure blocking until all computations are done.
tme_gpu = datetime.datetime.now()
f(x,A,y).cpu()
tme_gpu = datetime.datetime.now() - tme_gpu
print('Time with CUDA: ',tme_gpu)
#%% The speedup is reported
print('Speedup: ',tme_cpu.total_seconds()/tme_gpu.total_seconds(),' times.')
#%% This time we perform the same test without using the rank rounding.
# The expected result is better since the rank rounding contains QR and SVD which are not that parallelizable.
def g(x,A,y):
"""
fonction that performs operations of tensors in TT.
Args:
x (tnt.TT): input TT tensor
A (tnt.TT): input TT matrix
y (tnt.TT): input TT tensor
Returns:
torch.tensor: result
"""
z = A @ y + A @ y # operatio that grows the rank
z += z+x # some other operation
return tntt.dot(x,z) # contract the tensor
# put tensors on CPU
x, y, A = x.cpu(), y.cpu(), A.cpu()
# perform the test
tme_cpu = datetime.datetime.now()
g(x,A,y)
tme_cpu = datetime.datetime.now() - tme_cpu
print('Time on CPU: ',tme_cpu)
# move the tensors back to GPU
x, y, A = x.cuda(), y.cuda(), A.cuda()
# execute the function
tme_gpu = datetime.datetime.now()
g(x,A,y).cpu()
tme_gpu = datetime.datetime.now() - tme_gpu
print('Time with CUDA: ',tme_gpu)
print('Speedup: ',tme_cpu.total_seconds()/tme_gpu.total_seconds(),' times.')
#%% A tensor can be copied to a differenct device using the to() method. Usage is similar to torch.tensor.to()
dev = tn.cuda.current_device()
x_cuda = x.to(dev)
x_cpu = x_cuda.to(None) | 3,398 | 30.472222 | 145 | py |
torchTT | torchTT-main/examples/basic_tutorial.py | """
Basic tutorial
This notebook is a tutorial on how to use the basic functionalities of the `torchtt` package.
"""
#%% Imports
import torch as tn
import torchtt as tntt
#%% Decomposition of a full tensor in TT format
# We now create a 4d `torch.tensor` which we will use later
tens_full = tn.reshape(tn.arange(32*16*8*10, dtype = tn.float64),[32,16,8,10])
# The TT approximation of a given tensor is $\mathsf{x}_{i_1i_2...i_d} \approx \sum\limits_{r_1,...,r_{d-1}=1}^{R_1,...,R_{d-1}} \mathsf{g}^{(1)}_{1i_1r_1}\cdots\mathsf{g}^{(d)}_{r_{d-1}i_d1} $. Using the constructor `torchtt.TT()` a full tensor can be decomposed in the TT format.
tens_tt = tntt.TT(tens_full)
# The newly instantiated object contains the cores as a list, the mode sizes and the rank.
print('TT cores', tens_tt.cores)
print('Mode size ', tens_tt.N)
print('TT rank ', tens_tt.R)
# Since the TT decomposition is not exact in most of the cases, an approximation is made. If the argument `eps` is provided to the `torchtt.TT()` function the decomposition can be performed upt to the given relative accuracy.
# Moreover the maximum rank can also be provided as the argument `rmax`.
tens_full2 = tens_full+1e-5*tn.randn(tens_full.shape, dtype=tens_full.dtype)
tens_tt2 = tntt.TT(tens_full2, eps = 1e-4)
print(tens_tt2.R)
# The original tensor can be recovered using the `torchtt.TT.full()` method (also check if it equals the original full tensor):
tens_full_rec = tens_tt.full()
print(tn.linalg.norm(tens_full-tens_full_rec)/tn.linalg.norm(tens_full))
# Using the print() function, information about the newly created torchtt.TT instance can be displayed:
print(tens_tt)
#%% Tensor operators
# As a generalization of the matrix vector algebra, one can define tensor operators that act on tensors. If the tensor si $d$-dimensional, the tensor operator will be $2d$-dimensional.
# The goal is to perform a product $\mathsf{Ax}\in\mathbb{M_1\times \cdots \times M_d}$ between a tensor $\mathsf{x}\in\mathbb{R}^{N_1\times \cdots \times N_d}$ and the operator $\mathsf{A}\in \mathbb{R}^{(M_1\times \cdots \times M_d)\times(N_1\times \cdots \times N_d)}$. For the operators the following TT matrix format is used $\mathsf{A}_{i_1...i_d,j_1...j_d}\approx \sum\limits_{r_1,...,r_{d-1}=1}^{R_1,...,R_{d-1}} \mathsf{g}^{(1)}_{1i_1j_1r_1}\cdots\mathsf{g}^{(d)}_{r_{d-1}i_dj_d1}$.
# If a tensor operator needs to be decomposed from full, the additional argument `shape` of the `torchtt.TT()` constructor has to be used to provide the shape.
# If the tensor operator has the shape $(M_1\times \cdots \times M_d)\times(N_1\times \cdots \times N_d)$ the argument must be passed as `[(M1,N1),(M2,N2),(M3,N2),...]`.
A_full = tn.reshape(tn.arange(8*4*6*3*7*9, dtype = tn.float64),[8,4,6,3,7,9])
# create an instance of torchtt.TT
A_ttm = tntt.TT(A_full, eps = 1e-12, shape = [(8,3),(4,7),(6,9)])
#%% Slicing
# Slicing operation can be performed on a tensor in TT format.
# If all the dimensions are indexed with an integer and the multiindices are valid, a torch.tensor with the corresponding value is returned.
# Slices can be also used, however the returned object in this case is again a torchtt.TT instance.
print(tens_tt[1,2,3,4])
print(tens_tt[1,1:4,2,:])
#%% TT rank rounding
# In some cases the TT rank becomes too large and a reduction is desired. The goal is to perform a reduction of the rank while maintaining an accuracy.
# The problem statement of the rounding operation is: given a tensor $\mathsf{x}$ in the TT format with the TT rank $\mathbf{R}$ and an $\epsilon>0$, find a tensor $\tilde{\mathsf{x}}$ with TT rank $\tilde{\mathbf{R}}\leq \mathbf{R}$ such that $ ||\mathsf{x}-\tilde{\mathsf{x}}||_F\leq \epsilon || \mathsf{x} ||_F$.
# This is implemented using the member method of a TT object `torchtt.TT.round()`. The argument `epsilon` is passed to the function as well as the optional argument `rmax` which also restricts the rank of the rounding.
#We will create a tensor of TT rank $(1,6,6,6,1)$ in the TT format.
t1 = tntt.randn([10,20,30,40],[1,2,2,2,1])
t2 = tntt.randn([10,20,30,40],[1,2,2,2,1])
t3 = tntt.randn([10,20,30,40],[1,2,2,2,1])
t1, t2, t3 = t1/t1.norm(), t2/t2.norm(), t3/t3.norm()
tt = t1+1e-3*t2+1e-6*t3
t_full = tt.full()
print(tt)
# Rounding the tensor to a relative `epsilon` of 1e-5 yields.
tt1 = tt.round(1e-5)
print(tt1)
print('Error ',tn.linalg.norm(tt1.full()-tt.full())/tn.linalg.norm(tt.full()))
# This is equivalent to removing the t3 from tt and the error will be less than 1e-6.
# If a truncation with epsilon=1e-2 is done, the resulting tensor will have the rank [1,2,2,2,1].
tt1 = tt.round(1e-2)
print(tt1)
print('Error ',tn.linalg.norm(tt1.full()-tt.full())/tn.linalg.norm(tt.full()))
# The maximum rank of a truncation can also be provided as argument.
tt3 = tt.round(1e-12,2)
print(tt3)
tt4 = tt.round(1e-12,[1,2,3,2,1])
print(tt4)
#%% Special tensors
# Some tensors can be directly constructed in the TT format: the one tensor, the zeros tensor, the identity tensor operator adn random tensors with a given rank.
# The one tensor can be created directly in the TT format using torchtt.ones().
print(tntt.ones([2,3,4]).full())
# The zero tensor ca be created in the TT format using torchtt.zeros().
print(tntt.zeros([2,3,4]).full())
# The identity tensor operator is created using torchtt.eye().
print(tntt.eye([10,20,30]))
# Tensors with random TT cores and a given rank can be created with torchtt.random().
print(tntt.random([3,4,5,6,7],[1,2,5,5,2,1]))
print(tntt.random([(3,7),(4,6),(5,5),(6,10),(7,2)],[1,2,5,5,2,1]))
# Random tensors with a given rank and random entries with expected value 0 and given variance can be created using torchtt.randn().
# Variance 1.0
x = tntt.randn([30]*5,[1,8,16,16,8,1])
x_full = x.full()
print('Var = ',tn.std(x_full).numpy()**2,' (has to be comparable to 1.0)')
# Variance 4.0
x = tntt.randn([30]*5,[1,8,16,16,8,1],var = 4.0)
x_full = x.full()
print('Var = ',tn.std(x_full).numpy()**2,' (has to be comparable to 4.0)')
# Variance 0.01
x = tntt.randn([30]*5,[1,8,16,16,8,1], var = 0.001)
x_full = x.full()
print('Var = ',tn.std(x_full).numpy()**2,' (has to be comparable to 0.001)')
# Variance 1.0 (longer train)
x = tntt.randn([10]*7, [1,4,4,4,4,4,4,1], var = 1.0)
x_full = x.full()
print('Var = ',tn.std(x_full).numpy()**2,' (has to be comparable to 1.0)') | 6,346 | 48.976378 | 491 | py |
torchTT | torchTT-main/examples/cross_interpolation.py | """
# Cross approximation in the TT format
Using the `torchtt.TT` constructor, a TT decomposition of a given tensor can be obtained.
However, in the cases where the entries of the tensor are computed using a given function, building full tensors becomes unfeasible.
It is possible to construct a TT decomposition using only a part of the entries of the full tensor.
This is called the cress approximation method.
"""
#%% Imports
import torch as tn
import torchtt as tntt
#%% Cross interpolation of a tensor in TT format
# We want to approximate the tensor $\mathsf{x}_{i_1...i_d}=\frac{1}{2+i_1+\cdots+i_d}$.
# Since the passed indices are integers of type torch.int64, casting is used.
func1 = lambda I: 1/(2+tn.sum(I+1,1).to(dtype=tn.float64))
# Call the torchtt.interpolate.dmrg_cross() method.
N = [20]*4
x = tntt.interpolate.dmrg_cross(func1, N, eps = 1e-7)
# Compute the full tensor and compare to the reference.
Is = tntt.meshgrid([tn.arange(0,n,dtype=tn.float64) for n in N])
x_ref = 1/(2+Is[0].full()+Is[1].full()+Is[2].full()+Is[3].full()+4)
print('Relative error ',tn.linalg.norm(x.full()-x_ref)/tn.linalg.norm(x_ref))
# We consider the case $d=10$, $n_i=32$. the full tensor would contain $32^{10}$ entries.
# The total number of functions calls is in this case 25000000 compared to $32^{10}$ of the total number of entries.
N = [32]*10
x = tntt.interpolate.dmrg_cross(func1, N, eps = 1e-10, verbose=True)
# The adaptive cross method used only a fraction of function calls from the original tensor.
# Check some entries (full tensor cannot be computed this time) and show the rank and the storage requirements.
print(x[1,2,3,4,5,6,7,8,9,11], ' reference ', func1(tn.tensor([[1,2,3,4,5,6,7,8,9,11]])))
print(x[12,23,17,25,30,0,7,8,9,11], ' reference ', func1(tn.tensor([[12,23,17,25,30,0,7,8,9,11]])))
print(x)
#%% Element wise application of an univariate function on a TT tensor.
# Let $f:\mathbb{R}\rightarrow\mathbb{R}$ be a function and $\mathsf{x}\in\mathbb{R}^{N_1\times\cdots\times N_d}$ be a tensor with a known TT approximation. The goal is to determine the TT approximation of $\mathsf{y}_{i_1...i_d}=f(\mathsf{x}_{i_1...i_d})$ within a prescribed relative accuracy $\epsilon$ (passed as argument).
# In this case the function is torchtt.interpoalte.function_interpolate() and takes as arguments a function handle, the tensor $\mathsf{x}$, the accuracy epsilon, a initial tensor (starting point), number of sweeps (nswp) and the size of the rank enrichment (kick).
# Further arguments are the dtype of the result and the verbose flag.
# The function handle as argument gets as arguments torch vectors and has to return torch vectors of the same size.
# The following example computes the elemntwise natural logarithm of a tensor. The relative error of the result is also reported.
x = tntt.TT(x_ref)
func = lambda t: tn.log(t)
y = tntt.interpolate.function_interpolate(func, x, 1e-9)
print('Relative error ',tn.linalg.norm(y.full()-func(x_ref))/tn.linalg.norm(func(x_ref)))
#%% Element wise application of an multivariate function on a TT tensor.
# Let $f:\mathbb{R}\rightarrow\mathbb{R}$ be a function and $\mathsf{x}^{(1)},...,\mathsf{x}^{(d)}\in\mathbb{R}^{N_1\times\cdots\times N_d}$ be a tensor with a known TT approximation.
# The goal is to determine the TT approximation of $\mathsf{y}_{i_1...i_d}=f(\mathsf{x}_{i_1...i_d}^{(1)},...,\mathsf{x}^{(d)})$ within a prescribed relative accuracy $\epsilon$ (passed as argument).
# The function is the same as in the previous case tochtt.interpoalte.function_interpolate(), but the second argument in this case is a list of torchtt.TT tensors. The function handle takes as argument a $M\times d$ torch.tensor and every of the $M$ lines corresponds to an evaluation of the function $f$ at a certain tensor entry. The function handle returns a torch tensor of length $M$.
# The following example computes the same tensor as in the previous case, but with the tochtt.interpoalte.function_interpolate() method.
z = tntt.interpolate.function_interpolate(func1, Is)
print('Relative error ',tn.linalg.norm(z.full()-x_ref)/tn.linalg.norm(x_ref))
| 4,130 | 66.721311 | 389 | py |
torchTT | torchTT-main/examples/manifold.py |
import torch as tn
import torchtt as tntt
N = [10,11,12,13,14]
Rt = [1,3,4,5,6,1]
Rx = [1,6,6,6,6,1]
target = tntt.randn(N,Rt).round(0)
func = lambda x: 0.5*(x-target).norm(True)
x0 = tntt.randn(N,Rx)
x =x0.clone()
for i in range(20):
# compute riemannian gradient using AD
gr = tntt.manifold.riemannian_gradient(x,func)
#stepsize length
alpha = 1.0
# update step
x = (x-alpha*gr).round(0,Rx)
print('Value ' , func(x).numpy())
y = x0.detach().clone()
for i in range(10000):
tntt.grad.watch(y)
fval = func(y)
deriv = tntt.grad.grad(fval,y)
alpha = 0.00001 # for stability
y = tntt.TT([y.cores[i].detach()-alpha*deriv[i] for i in range(len(deriv))])
print(func(y)) | 746 | 19.189189 | 80 | py |
torchTT | torchTT-main/examples/efficient_linalg.py | """
# AMEN and DMRG for fast TT operations
The torchtt package includes DMRG and AMEN schemes for fast matrix vector product and elementwise inversion in the TT format.
"""
#%% Imports
import torch as tn
import torchtt as tntt
import datetime
#%% Efficient matrix vector product
# When performing the multiplication between a a TT matrix and a TT tensor the rank of the result is the product of the ranks of the inputs.
# Therefore rank rounding has to be performed. This increases the complexity to $\mathcal{O}(Ndr^6)$.
# In order to overcome this, Oseledets proposed in "DMRG Approach to Fast Linear Algebra in the TT-Format" the DMRG optimization scheme to reduce the complexity.
# This feature is implemented in torchtt by the member function fast_matvec() of the TT class. An example is showed in the following.
# Create a random TT object and a TT matrix.
n = 4 # mode size
A = tntt.random([(n,n)]*8,[1]+7*[4]+[1]) # random array
x = tntt.random([n]*8,[1]+7*[5]+[1]) # random tensor
# Increase the rank without adding redundant information.
# The multiplication performed in this case is actually equivalent to $32\mathbf{\mathsf{Ax}}$.
A = A + A + A + A - A + A - A + A
x = x + x + x + x + x + x + x + x - x + x - x + x
print(A)
print(x)
# Perform the TT matvec directly and round the result. The runtime is reported.
tme = datetime.datetime.now()
y = (A @ x).round(1e-12)
tme = datetime.datetime.now() - tme
print('Time classic ', tme)
# This time run the fast matvec routine.
tme = datetime.datetime.now()
yf = A.fast_matvec(x)
tme = datetime.datetime.now() - tme
print('Time DMRG ', tme)
# Check if the error is the same (debugging purpose).
print('Relative error ',(y-yf).norm().numpy()/y.norm().numpy())
#%% Elementwise division in the TT format
# One other basic linear algebra function that cannot be done without optimization is the elementwise division of two tensors in the TT format.
# In contrast to the elemntwise multiplication (where the resulting TT cores can be explicitly computed), the elementwise inversion has to be solved by means of an optimization problem (the method of choice is AMEN).
# The operator "/" can be used for elemntwise division between tensors. Moreover one can use "/" between a scalar and a torchtt.TT instance.
# Create 2 tensors:
# - $\mathsf{x}_{i_1i_2i_3i_4} = 2 + i_1$
# - $\mathsf{y}_{i_1i_2i_3i_4} = i_1^2+i_2+i_3+1$
# and express them in the TT format. For both of them a TT decomposition of the elemmentwise inverse cannot be explicitly formed.
N = [32,50,44,64]
I = tntt.meshgrid([tn.arange(n,dtype = tn.float64) for n in N])
x = 2+I[0]
x = x.round(1e-15)
y = I[0]*I[0]+I[1]+I[2]+I[3]+1
y = y.round(1e-15)
# Perform $\mathsf{z}_{\mathbf{i}} = \frac{\mathsf{x}_{\mathbf{i}}}{\mathsf{z}_{\mathbf{i}}}$ and report the relative error.
z = x/y
print('Relative error', tn.linalg.norm(z.full()-x.full()/y.full())/tn.linalg.norm(z.full()))
# Perform $\mathsf{u}_{\mathbf{i}} = \frac{1}{\mathsf{z}_{\mathbf{i}}}$ and report the relative error.
u = 1/y
print('Relative error', tn.linalg.norm(u.full()-1/y.full())/tn.linalg.norm(u.full()))
# Following are also possible:
# - scalar (float, int) divided elementwise by a tensor in the TT format.
# - torch.tensor with 1 element divided elementwise by a tensor in the TT format.
w = 1.0/y
a = tn.tensor(1.0)/y
| 3,337 | 42.921053 | 217 | py |
torchTT | torchTT-main/examples/basic_linalg.py | """
# Basic linear algebra in torchTT
This notebook is an introduction into the basic linar algebra operations that can be perfromed using the `torchtt` package.
The basic operations such as +,-,*,@,norm,dot product can be performed between `torchtt.TT` instances without computing the full format by computing the TT cores of the result.
One exception is the elementwise division between TT objects. For this, no explicit form of the resulting TT cores can be derived and therefore optimization techniques have to be employed (see the notebook `fast_tt_operations.ipynb`).
"""
#%% Imports
import torch as tn
import torchtt as tntt
#%% We will create a couple of tensors for the opperations that follow
N = [10,10,10,10]
o = tntt.ones(N)
x = tntt.randn(N,[1,4,4,4,1])
y = tntt.TT(tn.reshape(tn.arange(N[0]*N[1]*N[2]*N[3], dtype = tn.float64),N))
A = tntt.randn([(n,n) for n in N],[1,2,3,4,1])
B = tntt.randn([(n,n) for n in N],[1,2,3,4,1])
#%% Addition
# The TT class has the "+" operator implemeted. It performs the addition between TT objects (must have compatible shape and type) and it returns a TT object. One can also add scalars to a TT object (float/int/torch.tensor with 1d).
# The TT rank of the result is the sum of the ranks of the inputs. This is usually an overshoot and rounding can decrease the rank while maintaining the accuracy.
# Here are a few examples:
z = x+y
print(z)
# adding scalars is also possible
z = 1+x+1.0
z = z+tn.tensor(1.0)
# it works for the TT amtrices too
M = A+A+1
print(M)
# Broadcasting is also available and is similar to the `PyTorch` [broadcasting](https://pytorch.org/docs/stable/notes/broadcasting.html).
# Tensors in the TT-format can be added even if their shapes are different. The rule is that the number of dimensions of the first operand must be greater or equal to the number of dimensions of the second operand.
# In the following example a `(4,5)` tensor is added to a `(2,3,4,5)` tensor:
xx = tntt.random([2,3,4,5],[1,2,3,4,1])
yy = tntt.random([4,5],[1,2,1])
print(xx+yy)
#The mode sizes should match starting from the end or the mode size of the second tensor can be 1:
xx = tntt.random([2,3,4,5],[1,2,3,4,1])
yy = tntt.random([1,1,4,5],[1,2,2,2,1])
print(xx+yy)
#%% Subtraction
# The "-" operator is also implemented in the `torchtt.TT` class. It can be used similarily to "+" between 2 `torchtt.TT` objects and between a `torchtt.TT` and a scalar.
# It can also be used as a negation.
v = x-y-1-0.5
C = A-B-3.14
w = -x+x
print(tn.linalg.norm(w.full()))
# Broadcasting is available for the "-" operation as well.
#%% Multiplication (elementwise)¶
# One can perform the elementwise multiplication $\mathsf{u}_{i_1...i_d} = \mathsf{x}_{i_1...i_d} \mathsf{y}_{i_1...i_d}$ between 2 tensors in the TT format without goin to full format.
# The main issues of this is that the rank of the result is the product of the ranks of the input TT tensors.
u = x*y
print(u)
M2 = A*A
# Broadcasting is available for the "*" operation as well.
#%% Matrix vector product and matrix matrix product
# * TT matrix and TT tensor: $(\mathsf{Ax})_{i_1...i_d} = \sum\limits_{j_1...j_d}\mathsf{A}_{i_1...i_d,j_1...j_d} \mathsf{x}_{j_1...j_d}$
# * TT matrix and TT matrix: $(\mathsf{AB})_{i_1...i_d,k_1...k_d} = \sum\limits_{j_1...j_d}\mathsf{A}_{i_1...i_d,j_1...j_d} \mathsf{B}_{j_1...j_d,k_1...k_d}$
print(A@x)
print(A@B)
print(A@B@x)
# Multiplication can be performed between a TT operator and a full tensor (in torch.tensor format) the result in this case is a full tn.tensor
print(A@tn.rand(A.N, dtype = tn.float64))
#%% Kronecker product
# For computing the Kronecker product one can either use the "**" operator or the method torchtt.kron().
print(x**y)
print(A**A)
#%% Norm
# Frobenius norm of a tensor $||\mathsf{x}||_F^2 = \sum\limits_{i_1,...,i_d} \mathsf{x}_{i_1...i_d}$ can be directly domputed from a TT decomposition.
print(y.norm())
print(A.norm())
#%% Dot product and summing along modes
# One can sum alonf dimensions in torchtt. The function is torchtt.TT.sum() and can be used without arguments to sum along all dimensions, returning a scalar:
print('sum() result ', y.sum())
print('Must be equal to ', tn.sum(y.full()))
# If a list of modes is additionally provided, the summing will be performed along the given modes and a torchtt.TT object is returned.
print(x.sum(1))
print(x.sum([0,1,3]))
print(A.sum([1,2]))
# Dot product between 2 tensors is also possible using the function tirchtt.dot().
print(tntt.dot(y,y))
# Dot product can be performed between 2 tensors of different mode lengths. The modes alonnd the dot product is performed must be equal. And they are given as a list of integers as an additional argument. The modes given are relative to the first tensor. The returned value is a torchtt.TT instance.
t1 = tntt.randn([4,5,6,7,8,9],[1,2,4,4,4,4,1])
t2 = tntt.randn([5,7,9],[1,3,3,1])
print(tntt.dot(t1,t2,[1,3,5]))
#%% Reshaping
# Given a tensor in the TT format, one can reshape it similarily as in pytorch or numpy.
# The method is torchtt.reshape() and it taks as argument a torchtt.TT object, the new shape, the relative accuracy epsilon and a maximum rank. The last 2 are optional.
# The method also performs rounding up to the desired accuracy.
q = tntt.TT(tn.reshape(tn.arange(2*3*4*5*7*3, dtype = tn.float64),[2,3,4,5,7,3]))
# perform a series of reshapes
w = tntt.reshape(q,[12,10,21])
print(w)
w = tntt.reshape(w,[360,7])
print(w)
w = tntt.reshape(w,[2,3,4,5,7,3])
print('Error ',(w-q).norm()/q.norm())
# Reshape works also for TT matrices. However there are some restrictions such as the merging or spliting of the dimensions must happen within the same core for both row/column indices.
A = tntt.randn([(4,8),(6,4),(5,6),(8,8)],[1,2,3,2,1])
B = tntt.reshape(A,[(2,4),(6,4),(10,12),(8,8)])
print(B)
B = tntt.reshape(B,[(60,32),(16,48)])
print(B)
B = tntt.reshape(B,[(4,8),(6,4),(5,6),(8,8)])
print('Error ',(B-A).norm()/A.norm())
# this will not work: tntt.reshape(A,[(24,4),(5,16),(8,24)])
| 6,009 | 44.530303 | 299 | py |
torchTT | torchTT-main/tests/test_decomposition.py | import unittest
import torchtt as tntt
import torch as tn
import numpy as np
err_rel = lambda t, ref : tn.linalg.norm(t-ref).numpy() / tn.linalg.norm(ref).numpy() if ref.shape == t.shape else np.inf
class TestDecomposition(unittest.TestCase):
basic_dtype = tn.complex128
def test_init(self):
"""
Checks the constructor and the TT.full() function.
A list of cores is passed and is checked if the recomposed tensor is correct.
Returns
-------
bool
True if test is passed.
"""
# print('Testing: Initialization from list of cores.')
cores = [tn.rand([1,20,3],dtype = self.basic_dtype),tn.rand([3,10,4], dtype = self.basic_dtype),tn.rand([4,5,1], dtype = self.basic_dtype)]
T = tntt.TT(cores)
Tfull = T.full()
T_ref = tn.squeeze(tn.einsum('ijk,klm,mno->ijlno',cores[0],cores[1],cores[2]))
self.assertTrue(err_rel(Tfull,T_ref) < 1e-14)
def test_decomposition_random(self):
'''
Perform a TT decomposition of a random full random tensor and check if the decomposition is accurate.
Returns
-------
None.
'''
# print('Testing: TT-decomposition from full (random tensor).')
T_ref = tn.rand([10,20,30,5], dtype = self.basic_dtype)
T = tntt.TT(T_ref,eps = 1e-19,rmax = 1000)
Tfull = T.full()
self.assertTrue(err_rel(Tfull,T_ref) < 1e-12)
def test_decomposition_lowrank(self):
"""
Check the decomposition of a tensor which is already in the low rank format.
Returns
-------
None.
"""
# print('Testing: TT-decomposition from full (already low-rank).')
cores = [tn.rand([1,200,30], dtype = self.basic_dtype), tn.rand([30,100,4], dtype = self.basic_dtype), tn.rand([4,50,1], dtype = self.basic_dtype)]
T_ref = tn.squeeze(tn.einsum('ijk,klm,mno->ijlno',cores[0],cores[1],cores[2]))
T = tntt.TT(T_ref,eps = 1e-19)
Tfull = T.full()
self.assertTrue(err_rel(Tfull,T_ref)<1e-12)
def test_decomposition_highd(self):
"""
Decompose a 20d tensor with all modes 2.
Returns
-------
None.
"""
# print('Testing: TT-decomposition from full (long 20d TT).')
cores = [tn.rand([1,2,16], dtype = self.basic_dtype)] + [tn.rand([16,2,16], dtype = self.basic_dtype) for i in range(18)] + [tn.rand([16,2,1], dtype = self.basic_dtype)]
T_ref = tntt.TT(cores).full()
T = tntt.TT(T_ref,eps = 1e-12)
Tfull = T.full()
self.assertTrue(err_rel(Tfull,T_ref)<1e-12)
def test_decomposition_ttm(self):
"""
Decompose a TT-matrix.
Returns
-------
bool
True if test is passed.
"""
T_ref = tn.rand([10,11,12,15,17,19], dtype = self.basic_dtype)
T = tntt.TT(T_ref, shape = [(10,15),(11,17),(12,19)], eps = 1e-19, rmax = 1000)
Tfull = T.full()
self.assertTrue(err_rel(Tfull,T_ref)<1e-12)
def test_decomposition_orthogonal(self):
"""
Checks the lr_orthogonal function. The reconstructed tensor should remain the same.
"""
# print('Testing: TT-orthogonalization.')
cores = [tn.rand([1,20,3], dtype = self.basic_dtype), tn.rand([3,10,4], dtype = self.basic_dtype), tn.rand([4,5,20], dtype = self.basic_dtype), tn.rand([20,5,2], dtype = self.basic_dtype), tn.rand([2,10,1], dtype = self.basic_dtype)]
T = tntt.TT(cores)
T = tntt.random([3,4,5,3,8,7,10,3,5,6],[1,20,12,34,3,50,100,12,2,80,1], dtype = self.basic_dtype)
T_ref = T.full()
cores, R = tntt._decomposition.lr_orthogonal(T.cores, T.R, T.is_ttm)
Tfull = tntt.TT(cores).full()
self.assertTrue(err_rel(Tfull,T_ref)<1e-12,'Left to right ortho error too high.')
for i in range(len(cores)):
c = cores[i]
L = tn.reshape(c,[-1,c.shape[-1]]).numpy()
self.assertTrue(np.linalg.norm(L.T @ np.conj(L) - np.eye(L.shape[1])) < 1e-12 or i==len(cores)-1,'Cores are not left orthogonal after LR orthogonalization.')
cores, R = tntt._decomposition.rl_orthogonal(T.cores, T.R, T.is_ttm)
Tfull = tntt.TT(cores).full()
self.assertTrue(err_rel(Tfull,T_ref)<1e-12,'Right to left ortho error too high.')
for i in range(len(cores)):
c = cores[i]
R = tn.reshape(c,[c.shape[0],-1]).numpy()
self.assertTrue(np.linalg.norm(np.conj(R) @ R.T - np.eye(R.shape[0])) < 1e-12 or i==0)
def test_decomposition_orthogonal_ttm(self):
"""
Test the lr and rt orthogonal functions for a TT matrix.
"""
T = tntt.random([(3,4),(5,6),(7,8),(9,4)],[1,2,3,4,1], dtype = self.basic_dtype)
T_ref = T.full()
cores, R = tntt._decomposition.lr_orthogonal(T.cores, T.R, T.is_ttm)
Tfull = tntt.TT(cores).full()
self.assertTrue(err_rel(Tfull,T_ref)<1e-12,'Left to right ortho error too high.')
for i in range(len(cores)):
c = cores[i]
L = tn.reshape(c,[-1,c.shape[-1]]).numpy()
self.assertTrue(np.linalg.norm(L.T @ np.conj(L) - np.eye(L.shape[1])) < 1e-12 or i==len(cores)-1,'Cores are not left orthogonal after LR orthogonalization.')
cores, R = tntt._decomposition.rl_orthogonal(T.cores, T.R, T.is_ttm)
Tfull = tntt.TT(cores).full()
self.assertTrue(err_rel(Tfull,T_ref)<1e-12,'Right to left ortho error too high.')
for i in range(len(cores)):
c = cores[i]
R = tn.reshape(c,[c.shape[0],-1]).numpy()
self.assertTrue(np.linalg.norm(np.conj(R) @ R.T - np.eye(R.shape[0])) < 1e-12 or i==0)
def test_decomposition_rounding(self):
"""
Testing the rounding of a TT-tensor.
A rank-4tensor is constructed and successive approximations are performed.
"""
# print('Testing: TT-rounding.')
T1 = tn.einsum('i,j,k->ijk',tn.rand([20], dtype = self.basic_dtype),tn.rand([30], dtype = self.basic_dtype),tn.rand([32], dtype = self.basic_dtype))
T2 = tn.einsum('i,j,k->ijk',tn.rand([20], dtype = self.basic_dtype),tn.rand([30], dtype = self.basic_dtype),tn.rand([32], dtype = self.basic_dtype))
T3 = tn.einsum('i,j,k->ijk',tn.rand([20], dtype = self.basic_dtype),tn.rand([30], dtype = self.basic_dtype),tn.rand([32], dtype = self.basic_dtype))
T4 = tn.einsum('i,j,k->ijk',tn.rand([20], dtype = self.basic_dtype),tn.rand([30], dtype = self.basic_dtype),tn.rand([32], dtype = self.basic_dtype))
T_ref = T1 / tn.linalg.norm(T1) + 1e-3*T2 / tn.linalg.norm(T2) + 1e-6*T3 / tn.linalg.norm(T3) + 1e-9*T4 / tn.linalg.norm(T4)
T3 = T1 / tn.linalg.norm(T1) + 1e-3*T2 / tn.linalg.norm(T2) + 1e-6*T3 / tn.linalg.norm(T3)
T2 = T1 / tn.linalg.norm(T1) + 1e-3*T2 / tn.linalg.norm(T2)
T1 = T1 / tn.linalg.norm(T1)
T = tntt.TT(T_ref)
T = T.round(1e-9)
Tfull = T.full()
self.assertEqual(T.R,[1,3,3,1],'Case 1: Ranks not equal')
self.assertTrue(err_rel(Tfull,T_ref) < 1e-9,'Case 1: error too high')
T = tntt.TT(T_ref)
T = T.round(1e-6)
Tfull = T.full()
self.assertEqual(T.R,[1,2,2,1],'Case 2: Ranks not equal')
self.assertTrue(err_rel(Tfull,T_ref) < 1e-6,'Case 1: error too high')
T = tntt.TT(T_ref)
T = T.round(1e-3)
Tfull = T.full()
self.assertEqual(T.R,[1,1,1,1],'Case 3: Ranks not equal')
self.assertTrue(err_rel(Tfull,T_ref) < 1e-3,'Case 1: error too high')
def test_dimension_permute(self):
"""
Test the permute function.
"""
x_tt = tntt.random([5,6,7,8,9],[1,2,3,4,2,1])
x_ref = x_tt.full()
xp_tt = tntt.permute(x_tt, [4,3,2,1,0], 1e-10)
xp_ref = tn.permute(x_ref, [4,3,2,1,0])
self.assertEqual(tuple(xp_tt.N), tuple(xp_ref.shape), 'Permute modex of a TT tensor: shape mismatch.')
self.assertTrue(err_rel(xp_tt.full(), xp_ref) < 1e-10,'Permute modex of a TT tensor: error too high.')
# Test for TT matrices
A_tt = tntt.random([(2,3),(4,5),(3,2),(6,7),(5,3)], [1,2,3,4,2,1])
A_ref = A_tt.full()
Ap_tt = tntt.permute(A_tt, [3,2,4,0,1])
Ap_ref = tn.permute(A_ref, [3,2,4,0,1,8,7,9,5,6])
self.assertEqual(Ap_tt.M, [6,3,5,2,4], 'Permute modex of a TT matrix: shape mismatch.')
self.assertEqual(Ap_tt.N, [7,2,3,3,5], 'Permute modex of a TT matrix: shape mismatch.')
self.assertTrue(err_rel(Ap_tt.full(), Ap_ref) < 1e-10,'Permute modex of a TT tensor: error too high.')
if __name__ == '__main__':
unittest.main()
| 9,242 | 37.836134 | 241 | py |
torchTT | torchTT-main/tests/test_solvers.py | """
Test the multilinear solvers.
"""
import unittest
import torchtt
import torch as tn
import numpy as np
err_rel = lambda t, ref : tn.linalg.norm(t-ref).numpy() / tn.linalg.norm(ref).numpy() if ref.shape == t.shape else np.inf
class TestSolvers(unittest.TestCase):
basic_dtype = tn.complex128
@unittest.skipUnless(torchtt.solvers.cpp_enabled(), "C++ extension must be present.")
def test_amen_solve(self):
"""
Test the AMEN solve on a small example.
"""
A = torchtt.random([(4,4),(5,5),(6,6)],[1,2,3,1], dtype = tn.float64)
x = torchtt.random([4,5,6],[1,2,3,1], dtype = tn.float64)
b = A @ x
xx = torchtt.solvers.amen_solve(A,b,verbose = False, eps=1e-10, preconditioner=None, use_cpp=True)
err = (A@xx-b).norm()/b.norm() # error residual
self.assertLess(err.numpy(),5*1e-8,"AMEN solve failed.")
@unittest.skipUnless(torchtt.solvers.cpp_enabled(), "C++ extension must be present.")
def test_amen_solve_cprec(self):
"""
Test AMEN with central Jacobi preconditioner.
"""
A = torchtt.random([(4,4),(5,5),(6,6)],[1,2,3,1], dtype = tn.float64)
x = torchtt.random([4,5,6],[1,2,3,1], dtype = tn.float64)
b = (A @ x).round(1e-16)
xx = torchtt.solvers.amen_solve(A,b,verbose = False, eps=1e-10, preconditioner='c', use_cpp=True)
err = (A@xx-b).norm()/b.norm() # error residual
self.assertLess(err.numpy(),5*1e-8,"AMEN solve failed (c preconditioner).")
@unittest.skipUnless(torchtt.solvers.cpp_enabled(), "C++ extension must be present.")
def test_amen_solve_rprec(self):
"""
Test AMEN with the right Jacobi reconditioner.
"""
A = torchtt.random([(4,4),(5,5),(6,6)],[1,2,3,1], dtype = tn.float64)
x = torchtt.random([4,5,6],[1,2,3,1], dtype = tn.float64)
b = A @ x
xx = torchtt.solvers.amen_solve(A,b,verbose = False, eps=1e-10, preconditioner='r', use_cpp=True)
err = (A@xx-b).norm()/b.norm() # error residual
self.assertLess(err.numpy(),5*1e-8,"AMEN solve failed (right preconditioner).")
def test_amen_solve_cprec_nocpp(self):
"""
Test AMEN with central Jacobi preconditioner without the C++.
"""
A = torchtt.random([(4,4),(5,5),(6,6)],[1,2,3,1], dtype = tn.float64)
x = torchtt.random([4,5,6],[1,2,3,1], dtype = tn.float64)
b = A @ x
xx = torchtt.solvers.amen_solve(A,b,verbose = False, eps=1e-10, preconditioner='c', use_cpp=False)
err = (A@xx-b).norm()/b.norm() # error residual
self.assertLess(err.numpy(),5*1e-8,"AMEN solve failed (c preconditioner, without C++).")
def test_amen_solve_rprec_nocpp(self):
"""
Test AMEN with the right Jacobi reconditioner without the C++.
"""
A = torchtt.random([(4,4),(5,5),(6,6)],[1,2,3,1], dtype = tn.float64)
x = torchtt.random([4,5,6],[1,2,3,1], dtype = tn.float64)
b = A @ x
xx = torchtt.solvers.amen_solve(A, b, verbose = False, eps=1e-10, nswp = 40, preconditioner='r', use_cpp=False)
err = (A@xx-b).norm()/b.norm() # error residual
self.assertLess(err.numpy(),5*1e-8,"AMEN solve failed (right preconditioner).")
def test_amen_solve_nocpp(self):
"""
Test the AMEN solve on a small example (disable C++).
"""
A = torchtt.random([(4,4),(5,5),(6,6)],[1,2,3,1], dtype = tn.float64)
x = torchtt.random([4,5,6],[1,2,3,1], dtype = tn.float64)
b = A @ x
xx = torchtt.solvers.amen_solve(A,b,verbose = False, eps=1e-10, preconditioner=None, use_cpp = False)
err = (A@xx-b).norm()/b.norm() # error residual
self.assertLess(err.numpy(),5*1e-8,"AMEN solve failed.")
xx = torchtt.solvers.amen_solve(A,b,verbose = False, eps=1e-10, preconditioner='c', use_cpp = False)
err = (A@xx-b).norm()/b.norm() # error residual
self.assertLess(err.numpy(),5*1e-8,"AMEN solve failed (c preconditioner).")
if __name__ == '__main__':
unittest.main()
| 4,164 | 41.938144 | 122 | py |
torchTT | torchTT-main/tests/test_algebra_2.py | """
Test the advanced multilinear algebra operations between torchtt.TT objects.
Some operations (matvec for large ranks and elemntwise division) can be only computed using optimization (AMEN and DMRG).
"""
import unittest
import torchtt as tntt
import torch as tn
import numpy as np
err_rel = lambda t, ref : tn.linalg.norm(t-ref).numpy() / tn.linalg.norm(ref).numpy() if ref.shape == t.shape else np.inf
class TestLinalgAdvanced(unittest.TestCase):
basic_dtype = tn.float64
def test_dmrg_hadamard(self):
"""
Test hadamard product using DMRG.
"""
n = 32
z = tntt.random([n]*8,[1]+7*[3]+[1], dtype = tn.float64)
zm = z + z
x = tntt.random([n]*8,[1]+7*[5]+[1], dtype = tn.float64)
xm = x + x
xm = xm + xm
# conventional method
y = 8 * (z * x).round(1e-12)
yf = tntt.dmrg_hadamard(zm, xm, eps = 1e-12, verb = False)
rel_error = (y-yf).norm().numpy()/y.norm().numpy()
self.assertLess(rel_error,1e-12,"DMRG elementwise multiplication.")
def test_dmrg_matvec(self):
"""
Test the fast matrix vector product using DMRG iterations.
"""
n = 32
A = tntt.random([(n,n)]*8,[1]+7*[3]+[1], dtype = tn.complex128)
Am = A + A
x = tntt.random([n]*8,[1]+7*[5]+[1], dtype = tn.complex128)
xm = x + x
xm = xm + xm
# conventional method
y = 8 * (A @ x).round(1e-12)
# dmrg matvec
yf = Am.fast_matvec(xm)
rel_error = (y-yf).norm().numpy()/y.norm().numpy()
self.assertLess(rel_error,1e-12,"DMRG matrix vector problem: square matrix.")
n = 32
A = tntt.random([(n+2,n)]*8,[1]+7*[3]+[1], dtype = tn.complex128)
Am = A + A
x = tntt.random([n]*8,[1]+7*[5]+[1], dtype = tn.complex128)
xm = x + x
xm = xm + xm
# conventional method
y = 8 * (A @ x).round(1e-12)
# dmrg matvec
yf = Am.fast_matvec(xm)
rel_error = (y-yf).norm().numpy()/y.norm().numpy()
self.assertLess(rel_error,1e-12,"DMRG matrix vector problem: not square matrix.")
def test_amen_division(self):
"""
Test the division between tensors performed with AMEN optimization.
"""
N = [7,8,9,10]
xs = tntt.meshgrid([tn.linspace(0,1,n, dtype = self.basic_dtype) for n in N])
x = xs[0]+xs[1]+xs[2]+xs[3]+xs[1]*xs[2]+(1-xs[3])*xs[2]+1
x = x.round(0)
y = tntt.ones(x.N, dtype = self.basic_dtype)
a = y/x
b = 1/x
c = tn.tensor(1.0)/x
self.assertLess(err_rel(a.full(),y.full()/x.full()),1e-11,"AMEN division problem: TT and TT.")
self.assertLess(err_rel(b.full(),1/x.full()),1e-11,"AMEN division problem: scalar and TT.")
self.assertLess(err_rel(c.full(),1/x.full()),1e-11,"AMEN division problem: scalar and TT part 2.")
def test_amen_division_preconditioned(self):
"""
Test the elemntwise division using AMEN (use preconditioner for the local subsystem).
"""
N = [7,8,9,10]
xs = tntt.meshgrid([tn.linspace(0,1,n, dtype = self.basic_dtype) for n in N])
x = xs[0]+xs[1]+xs[2]+xs[3]+xs[1]*xs[2]+(1-xs[3])*xs[2]+1
x = x.round(0)
y = tntt.ones(x.N)
a = tntt.elementwise_divide(y,x,preconditioner = 'c')
self.assertLess(err_rel(a.full(),y.full()/x.full()),1e-11,"AMEN division problem (preconditioner): TT and TT.")
if __name__ == '__main__':
unittest.main() | 3,746 | 31.868421 | 122 | py |
torchTT | torchTT-main/tests/test_ad.py | """
Test all the AD related functions.
@author: ion
"""
import torch as tn
import torchtt
import unittest
err_rel = lambda t, ref : tn.linalg.norm(t-ref).numpy() / tn.linalg.norm(ref).numpy() if ref.shape == t.shape else np.inf
class TestAD(unittest.TestCase):
def test_manifold(self):
"""
Compare the result of the manifold projection and the manifold gradient computed using AD.
"""
target = torchtt.randn([10,12,14,16],[1,8,8,7,1])
func = lambda x: 0.5*(x-target).norm(True)
R = [1,3,4,6,1]
x = torchtt.randn(target.N,R.copy())
gr_ad = torchtt.manifold.riemannian_gradient(x, func)
gr_proj = torchtt.manifold.riemannian_projection(x, (x-target))
self.assertListEqual(gr_ad.R,[2*r if r!=1 else 1 for r in R],"TT manifold: Riemannian gradient error: ranks mismatch.")
self.assertListEqual(gr_proj.R,[2*r if r!=1 else 1 for r in R],"TT manifold: Riemannian projection error: ranks mismatch.")
self.assertLess(err_rel(gr_ad.full(),gr_proj.full()),1e-12,"TT manifold: Riemannian gradient and projected gradient differ.")
def test_mainfold_matrix(self):
"""
Test the manifold gradient and the manifold projection for the TT matrix case.
"""
A = torchtt.randn([(2,3),(4,5),(6,7),(4,2)],[1,2,3,2,1])
X = torchtt.randn([(2,3),(4,5),(6,7),(4,2)],[1,3,2,2,1])
func = lambda x: 0.5*(x-A).norm(True)
gr_ad = torchtt.manifold.riemannian_gradient(X, func)
gr_proj = torchtt.manifold.riemannian_projection(X, (X-A))
self.assertListEqual(gr_ad.R,[2*r if r!=1 else 1 for r in X.R],"TT manifold: Riemannian gradient error: ranks mismatch.")
self.assertListEqual(gr_proj.R,[2*r if r!=1 else 1 for r in X.R],"TT manifold: Riemannian projection error: ranks mismatch.")
self.assertLess(err_rel(gr_ad.full(),gr_proj.full()),1e-12,"TT manifold: Riemannian gradient and projected gradient differ.")
def test_ad(self):
"""
Test the AD functionality.
"""
N = [2,3,4,5]
A = torchtt.randn([(n,n) for n in N],[1]+[2]*(len(N)-1)+[1])
y = torchtt.randn(N,A.R)
x = torchtt.ones(N)
def f(x,A,y):
z = torchtt.dot(A @ (x-y),(x-y))
return z.norm()
torchtt.grad.watch(x)
val = f(x,A,y)
grad_cores = torchtt.grad.grad(val, x)
torchtt.grad.watch(A)
val = f(x,A,y)
grad_cores_A = torchtt.grad.grad(val, A)
self.assertListEqual([c.shape for c in grad_cores],[c.shape for c in x.cores],"TT AD: problem for grad w.r.t. TT tensor.")
self.assertListEqual([c.shape for c in grad_cores_A],[c.shape for c in A.cores],"TT AD: problem for grad w.r.t. TT matrix.")
# h = 1e-7
# x1 = x.clone()
# x1.cores[1][0,0,0] += h
# x2 = x.clone()
# x2.cores[1][0,0,0] -= h
# derivative = (f(x1,A,y)-f(x2,A,y))/(2*h)
# print(tn.abs(derivative-grad_cores[1][0,0,0])/tn.abs(derivative))
if __name__ == '__main__':
unittest.main() | 3,188 | 34.831461 | 151 | py |
torchTT | torchTT-main/tests/test_linalg.py | """
Test the basic multilinear algebra operations between torchtt.TT objects.
"""
import unittest
import torchtt as tntt
import torch as tn
import numpy as np
err_rel = lambda t, ref : (tn.linalg.norm(t-ref).numpy() / tn.linalg.norm(ref).numpy() if tn.linalg.norm(ref).numpy()>0 else tn.linalg.norm(t-ref).numpy() ) if ref.shape == t.shape else np.inf
class TestLinalg(unittest.TestCase):
basic_dtype = tn.complex128
def test_add(self):
'''
Test the addition operator
'''
N = [10,8,6,9,12]
x = tntt.random(N,[1,3,4,5,6,1], dtype = self.basic_dtype)
y = tntt.random(N,[1,2,4,5,4,1], dtype = self.basic_dtype)
z = tntt.random(N,[1,2,2,2,2,1], dtype = self.basic_dtype)
const = 3.1415926535
X = x.full()
Y = y.full()
Z = z.full()
w = x+y+z
t = const+(const+x)+const
W = X+Y+Z
T = const+(const+X)+const
self.assertTrue(err_rel(w.full(),W)<1e-14,'Addition error 1')
self.assertTrue(err_rel(t.full(),T)<1e-14,'Addition error 2')
M = tntt.random([(5,6),(7,8),(9,10)],[1,5,5,1])
P = tntt.random([(5,6),(7,8),(9,10)],[1,2,20,1])
Q = M+P+P+M
Qr = M.full()+P.full()+P.full()+M.full()
self.assertTrue(err_rel(Q.full(),Qr)<1e-14,'Addition error 2: TT-matrix')
# test broadcasting
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([ 4,5,6],[1,2,2,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x+y
zr = xr+yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Addition broadcasting error 1: TT-tensors.")
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([ 1,1,6],[1,2,2,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x+y
zr = xr+yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Addition broadcasting error 2: TT-tensors.")
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([ 1],[1,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x+y
zr = xr+yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Addition broadcasting error 3: TT-tensors.")
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([1,1,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x+y
zr = xr+yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Addition broadcasting error 4: TT-tensors.")
def test_sub(self):
'''
Test the subtraction operator.
'''
N = [10,8,6,9,12]
x = tntt.random(N,[1,3,4,5,6,1], dtype = self.basic_dtype)
y = tntt.random(N,[1,2,4,5,4,1], dtype = self.basic_dtype)
z = tntt.random(N,[1,2,2,2,2,1], dtype = self.basic_dtype)
const = 3.1415926535
X = x.full()
Y = y.full()
Z = z.full()
w = -x+y-z
t = const+(const-x)-const
W = -X+Y-Z
T = const+(const-X)-const
self.assertTrue(err_rel(w.full(),W)<1e-14,'Subtraction error 1')
self.assertTrue(err_rel(t.full(),T)<1e-14,'Subtraction error 2')
M = tntt.random([(5,6),(7,8),(9,10)],[1,5,5,1])
P = tntt.random([(5,6),(7,8),(9,10)],[1,2,20,1])
Q = -M+P-P-M
Qr = -M.full()+P.full()-P.full()-M.full()
self.assertTrue(err_rel(Q.full(),Qr)<1e-14,'Subtraction error 2: TT-matrix')
# test broadcasting
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([ 4,5,6],[1,2,2,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x-y
zr = xr-yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Subtraction broadcasting error 1: TT-tensors.")
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([ 1,1,6],[1,2,2,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x-y
zr = xr-yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Subtraction broadcasting error 2: TT-tensors.")
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([ 1],[1,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x-y
zr = xr-yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Subtraction broadcasting error 3: TT-tensors.")
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([1,1,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x-y
zr = xr-yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Subtraction broadcasting error 4: TT-tensors.")
def test_mult(self):
"""
Test the pointwise multiplication between TT-objects.
"""
A = tntt.random([(5,6),(7,8),(9,10),(4,5)],[1,5,5,3,1], dtype = self.basic_dtype)
B = tntt.random([(5,6),(7,8),(9,10),(4,5)],[1,5,5,3,1], dtype = self.basic_dtype)
Ar = A.full()
Br = B.full()
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([2,3,4,5,6],[1,2,5,6,2,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
c = 2.5
z = c*x*(-y*c)
zr = c*xr*(-yr*c)
self.assertLess(err_rel(z.full(), zr), 1e-13, "Multiplication error: TT-tensors.")
C = c*A*(B*c)
Cr = c*Ar*(Br*c)
self.assertLess(err_rel(C.full(),Cr), 1e-13, "Multiplication error: TT-matrices.")
z = 0*x
zr = 0*xr
self.assertLess(err_rel(z.full(),zr), 1e-13, "Multiplication error: TT-tensor 0 with scalar.")
self.assertEqual(z.R, [1,1,1,1,1,1], "Multiplication error: TT-tensor 0 with scalar.")
C = 0*A
Cr = 0*Ar
self.assertLess(err_rel(C.full(),Cr), 1e-13, "Multiplication error: TT-matrix 0 with scalar.")
self.assertEqual(C.R, [1,1,1,1,1], "Multiplication error: TT-matrix 0 with scalar.")
# test broadcasting
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([ 4,5,6],[1,2,2,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x*y
zr = xr*yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Multiplication broadcasting error 1: TT-tensors.")
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([ 1,1,6],[1,2,2,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x*y
zr = xr*yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Multiplication broadcasting error 2: TT-tensors.")
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([ 1],[1,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x*y
zr = xr*yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Multiplication broadcasting error 3: TT-tensors.")
x = tntt.random([2,3,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
y = tntt.random([1,1,4,5,6],[1,2,4,8,4,1], dtype = self.basic_dtype)
xr = x.full()
yr = y.full()
z = x*y
zr = xr*yr
self.assertLess(err_rel(z.full(),zr),1e-13,"Multiplication broadcasting error 4: TT-tensors.")
def test_matmult(self):
"""
Test the matrix multiplication operations.
"""
A = tntt.random([(5,6),(7,8),(4,5)],[1,5,3,1], dtype = self.basic_dtype)
B = tntt.random([(5,6),(7,8),(4,5)],[1,5,3,1], dtype = self.basic_dtype).t()
x = tntt.randn([5,7,4],[1,3,2,1], dtype = self.basic_dtype)
y = tntt.randn([6,8,5],[1,1,2,1], dtype = self.basic_dtype)
A_ref = A.full()
B_ref = B.full()
x_ref = x.full()
y_ref = y.full()
# matrix matrix
C = A@B
C_ref = tn.einsum('ijkabc,abcmno->ijkmno', A_ref, B_ref)
self.assertLess(err_rel(C.full(),C_ref),1e-13,"torchtt.TT.__matmul__() error: 2 TT matrices.")
# matrix vector
z = B@x
z_ref = tn.einsum('abcijk,ijk->abc', B_ref, x_ref)
self.assertLess(err_rel(C.full(),C_ref),1e-13,"torchtt.TT.__matmul__() error: TT matrix with TT vecotr.")
def test_matvecdense(self):
"""
Test the multiplication between a TT-matrix and a dense tensor
"""
A = tntt.random([(5,6),(7,8),(9,10),(4,5)],[1,5,5,3,1], dtype = self.basic_dtype)
x = tn.rand([6,8,10,5], dtype = self.basic_dtype)
y = A @ x
yr = tn.einsum('abcdijkl,ijkl->abcd', A.full(), x)
self.assertLess(err_rel(y,yr),1e-14,'Dense matvec error 1.')
x = tn.rand([32,4,33,6,8,10,5], dtype = self.basic_dtype)
y = A @ x
yr = tn.einsum('abcdijkl,mnoijkl->mnoabcd',A.full(), x)
self.assertEqual(y.shape,yr.shape,'Dense matvec shape mismatch.')
self.assertLess(err_rel(y,yr),1e-14,'Dense matvec error 2.')
x = tn.rand([1,22,6,8,10,5], dtype = self.basic_dtype)
y = A @ x
yr = tn.einsum('abcdijkl,nmijkl->nmabcd',A.full(), x)
self.assertEqual(y.shape,yr.shape,'Dense matvec shape mismatch.')
self.assertLess(err_rel(y,yr),1e-14,'Dense matvec error 2.')
def test_mode_product(self):
"""
Test the n-mode tensor product.
"""
x = tntt.randn([2,3,4,5,6], [1,3,3,3,3,1], dtype = self.basic_dtype)
M1 = tn.rand((8,3), dtype = self.basic_dtype)
M2 = tn.rand((7,2), dtype = self.basic_dtype)
M3 = tn.rand((10,5), dtype = self.basic_dtype)
y = x.mprod(M1, 1)
yr = tn.einsum('ijklm,aj->iaklm',x.full(),M1)
self.assertLess(err_rel(y.full(),yr),1e-14,'torchtt.tt.mprod() error: case 1.')
z = x.mprod([M2, M3], [0, 3])
zr = tn.einsum('ijklm,ai,bl->ajkbm', x.full(), M2, M3)
self.assertLess(err_rel(z.full(),zr),1e-14,'torchtt.tt.mprod() error: case 2.')
def test_dot(self):
'''
Test the dot product between TT tensors.
'''
a = tntt.random([4,5,6,7,8,9],[1,2,10,16,20,7,1], dtype = self.basic_dtype)
b = tntt.random([4,5,6,7,8,9],[1,3,4,10,10,4,1], dtype = self.basic_dtype)
c = tntt.random([5,7,9],[1,2,7,1], dtype = self.basic_dtype)
d = tntt.random([4,5,9],[1,2,2,1], dtype = self.basic_dtype)
x = tntt.dot(a,b)
y = tntt.dot(a,c,[1,3,5])
z = tntt.dot(b,d,[0,1,5])
self.assertLess(err_rel(x,tn.einsum('abcdef,abcdef->',a.full(),tn.conj(b.full()))), 1e-12, 'Dot product error. Test: equal sized tensors.')
self.assertLess(err_rel(y.full(),tn.einsum('abcdef,bdf->ace',a.full(),tn.conj(c.full()))), 1e-12, 'Dot product error. Test: different sizes 1.')
self.assertLess(err_rel(z.full(),tn.einsum('abcdef,abf->cde',b.full(),tn.conj(d.full()))), 1e-12, 'Dot product error. Test: different sizes 2.')
def test_sum(self):
'''
Test the sum method.
'''
a = tntt.random([4,5,6,7,8],[1,2,10,16,7,1], dtype = self.basic_dtype)
afull = a.full()
self.assertLess(err_rel(a.sum(),afull.sum()), 1e-13, "Test TT.sum() error 1.")
def test_kron(self):
'''
Test the Kronecker product.
'''
a = tntt.random([5,7,9],[1,2,7,1], dtype = self.basic_dtype)
b = tntt.random([4,5,9],[1,2,2,1], dtype = self.basic_dtype)
c = a**b
self.assertLess(err_rel(c.full(),tn.einsum('abc,def->abcdef',a.full(),b.full())), 1e-12, 'Kronecker product error: 2 tensors.')
A = tntt.random([(2,3),(4,5)],[1,2,1], dtype = self.basic_dtype)
B = tntt.random([(3,3),(4,2)],[1,3,1], dtype = self.basic_dtype)
C = A**B
self.assertLess(err_rel(C.full(),tn.einsum('abcd,mnop->abmncdop',A.full(),B.full())), 1e-12, 'Kronecker product error: 2 tensor operators.')
c = a**None
self.assertLess(err_rel(a.full(),c.full()),1e-14,'Kronecker product error: tensor and None.')
c = a**tntt.ones([])
self.assertLess(err_rel(a.full(),c.full()),1e-14,'Kronecker product error: tensor and None.')
def test_combination(self):
'''
Test sequence of linear algebra operations.
'''
x = tntt.random([4,7,13,14,19],[1,2,10,13,10,1], dtype = self.basic_dtype)
y = tntt.random([4,7,13,14,19],[1,2,4,2,4,1], dtype = self.basic_dtype)
x = x/x.norm()
y = y/y.norm()
z = x*x-2*x*y+y*y
u = (x-y)*(x-y)
norm = (z-u).norm()
self.assertLess(norm.numpy(),1e-14,"Error: Multiple operations. Part 1 fails.")
def test_slicing(self):
'''
Test the slicing operator.
'''
# print('Testing: Slicing of a tensor.')
# TT-tensor
cores = [tn.rand([1,9,3], dtype = self.basic_dtype),tn.rand([3,10,4], dtype = self.basic_dtype),tn.rand([4,15,5], dtype = self.basic_dtype),tn.rand([5,15,1], dtype = self.basic_dtype)]
Att = tntt.TT(cores)
A = Att.full()
self.assertLess(err_rel(A[1,2,3,4],Att[1,2,3,4]), 1e-15 , "Tensor slicing error: slice to a scalar.")
self.assertLess(err_rel(A[1:3,2:4,3:10,4],Att[1:3,2:4,3:10,4].full()), 1e-15 , "Tensor slicing error: eliminate some dimension.")
self.assertLess(err_rel(A[1,:,3,4],Att[1,:,3,4].full()), 1e-15, "Tensor slicing error: slice to 1d." )
self.assertLess(err_rel(A[None,:,2,:,None,4,None],Att[None,:,2,:,None,4,None].full()) , 1e-15 , "Tensor slicing error: add dimensions.")
self.assertLess(err_rel(A[None,None,1,2,2,None,4,None,None],Att[None,None,1,2,2,None,4,None,None].full()) , 1e-15 , "Tensor slicing error: add more dimensions.")
self.assertLess(err_rel(A[...,1,1],Att[...,1,1].full()) , 1e-15 , "Tensor slicing error: Ellipsis in the beginning.")
self.assertLess(err_rel(A[1,...],Att[1,...].full()) , 1e-15 , "Tensor slicing error: ellipsis in the end.")
self.assertLess(err_rel(A[...],Att[...].full()) , 1e-15 , "Tensor slicing error: ellipsis only.")
self.assertLess(err_rel(A[None,None,...],Att[None,None,...].full()) , 1e-15 , "Tensor slicing error: ellipsis and None only.")
self.assertLess(err_rel(A[...,None],Att[...,None].full()) , 1e-15 , "Tensor slicing error: None and ellipsis only.")
# TT-matrix
cores = [tn.rand([1,9,8,3], dtype = self.basic_dtype),tn.rand([3,10,9,4], dtype = self.basic_dtype),tn.rand([4,15,14,5], dtype = self.basic_dtype),tn.rand([5,15,10,1], dtype = self.basic_dtype)]
Att = tntt.TT(cores)
A = Att.full()
self.assertLess(err_rel(A[1,2,3,4,5,4,3,2],Att[1,2,3,4,5,4,3,2]), 1e-15 , "Tensor slicing error: TT matrix slice to scalar." )
self.assertLess(err_rel(A[1:3,1:3,1:3,1:3,5:6,1:3,1:3,1:3],Att[1:3,1:3,1:3,1:3,5:6,1:3,1:3,1:3].full()) , 1e-15 , "Tensor slicing error: TT-matrix.")
self.assertLess(err_rel(A[1,1:3,1:3,1:3,2,1:3,1:3,1:3],Att[1,1:3,1:3,1:3,2,1:3,1:3,1:3].full()), 1e-15 , "Tensor slicing error: TT-matrix eliminate some dimension." )
self.assertLess(err_rel(A[None,None,1,1:3,None,1:3,1:3,None,None,None,2,1:3,None,1:3,1:3,None],Att[None,None,1,1:3,None,1:3,1:3,None,None,None,2,1:3,None,1:3,1:3,None].full()), 1e-15 , "Tensor slicing error: TT matrix add dimensions." )
#### TODO: More testing for the TT-matrix case.
def test_qtt(self):
'''
Test case for the QTT functions.
'''
N = [16,8,64,128]
R = [1,2,10,12,1]
x = tntt.random(N,R, dtype = self.basic_dtype)
x_qtt = x.to_qtt()
x_full = x.full()
self.assertTrue(err_rel(tn.reshape(x_qtt.full(),x.N),x_full)<1e-12,'Tensor to QTT failed.')
x = tntt.random([256,128,1024,128],[1,40,50,20,1], dtype = self.basic_dtype)
# x = tntt.random([16,8,4,16],[1,10,12,4,1], dtype = self.basic_dtype)
N = x.N
xq = x.to_qtt()
xx = xq.qtt_to_tens(N)
self.assertTrue(np.abs((x-xx).norm(True)/x.norm(True))<1e-12,'TT to QTT and back not working.')
def test_reshape(self):
'''
Test the reshape function.
'''
T = tntt.ones([3,2], dtype = self.basic_dtype)
Tf = T.full()
Tr = tntt.reshape(T,[6])
self.assertLess(tn.linalg.norm(tn.reshape(Tf,Tr.N)-Tr.full()).numpy(),1e-12,'TT-tensor reshape fail: test 1')
T = tntt.random([6,8,9],[1,4,5,1], dtype = self.basic_dtype)
Tf = T.full()
Tr = tntt.reshape(T,[2,6,12,3])
self.assertLess(tn.linalg.norm(tn.reshape(Tf,Tr.N)-Tr.full()).numpy(),1e-12,'TT-tensor reshape fail: test 2')
T = tntt.random([6,8,9],[1,4,5,1], dtype = self.basic_dtype)
Tf = T.full()
Tr = tntt.reshape(T,[2,3,4,2,3,3])
self.assertLess(tn.linalg.norm(tn.reshape(Tf,Tr.N)-Tr.full()).numpy(),1e-12,'TT-tensor reshape fail: test 3')
T = tntt.random([2,3,4,2,3,2,5],[1,2,3,4,4,5,2,1], dtype = self.basic_dtype)
Tf = T.full()
Tr = tntt.reshape(T,[6,24,10])
self.assertLess(tn.linalg.norm(tn.reshape(Tf,Tr.N)-Tr.full()).numpy(),1e-11,'TT-tensor reshape fail: test 4')
# test TT-matrix
A = tntt.random([(9,4),(16,6)],[1,4,1], dtype = self.basic_dtype)
Af = A.full()
Ar = tntt.reshape(A,[(3,2),(3,2),(4,2),(4,3)])
self.assertLess(tn.linalg.norm(tn.reshape(Af,Ar.M+Ar.N)-Ar.full()).numpy(),1e-12,'TT-matrix reshape fail: test 1')
A = tntt.random([(9,4),(16,6),(3,5)],[1,4,5,1], dtype = self.basic_dtype)
Af = A.full()
Ar = tntt.reshape(A,[(3,2),(6,6),(24,10)])
self.assertLess(err_rel(Ar.full(),tn.reshape(Af,Ar.M+Ar.N)),1e-13,'TT-matrix reshape fail: test 2')
A = tntt.random([(4,8),(16,12),(2,8),(6,4)],[1,4,7,2,1], dtype = self.basic_dtype)
T = tntt.random([8,12,8,4],[1,3,9,3,1], dtype = self.basic_dtype)
Ar = tntt.reshape(A,[(2,4),(4,6),(4,2),(8,32),(3,2)])
Tr = tntt.reshape(T,[4,6,2,32,2])
Af = A.full()
Tf = T.full()
Ur = Ar@Tr
U = A@T
self.assertLess(err_rel(Ur.full(),tn.reshape(U.full(),Ur.N)),1e-13,'TT-matrix reshape fail: test 3')
A = tntt.random([(2,2),(4,2),(2,2)],[1,3,4,1])
Af = A.full()
Ar = tntt.reshape(A, [(2,2),(2,2),(2,1),(2,2)])
self.assertLess(err_rel(Ar.full(),tn.reshape(Af,Ar.M+Ar.N)),1e-13,'TT-matrix reshape fail: test 4')
def test_mask(self):
"""
Test the apply_mask() method.
"""
indices = tn.randint(0,20,(1000,4))
x = tntt.random([21,22,23,21],[1,10,10,10,1], dtype = self.basic_dtype)
xf = x.full()
vals = x.apply_mask(indices)
vals_ref = 0*vals
for i in range(len(indices)):
vals_ref[i] = xf[tuple(indices[i])]
self.assertLess(tn.linalg.norm(vals-vals_ref), 1e-12, "Mask method error.")
def test_bilinear(self):
"""
Test the method torchtt.bilinear_form()
"""
A = tntt.random([(5,6),(7,8),(2,3),(4,5)],[1,5,5,3,1], dtype = self.basic_dtype)
x = tntt.randn([5,7,2,4],[1,2,3,4,1], dtype = self.basic_dtype)
y = tntt.randn([6,8,3,5],[1,6,5,4,1], dtype = self.basic_dtype)
res = tntt.bilinear_form(x,A,y)
res_ref = tn.einsum('abcd,abcdijkl,ijkl->',tn.conj(x.full()),A.full(),y.full())
self.assertLess(err_rel(res,res_ref),5e-13,"torchtt.bilinear_form() failed.")
def test_conj(self):
"""
Test the conjugate.
"""
A = tntt.random([(5,6),(7,8),(2,3),(4,5)],[1,5,5,3,1], dtype = self.basic_dtype)
x = tntt.randn([5,7,2,4],[1,2,3,4,1], dtype = self.basic_dtype)
self.assertLess(err_rel(x.conj().full(),tn.conj(x.full())),5e-13,"torchtt.TT.conj() failed.")
self.assertLess(err_rel(A.conj().full(),tn.conj(A.full())),5e-13,"torchtt.TT.conj() failed fpr TT matrix.")
def test_cat(self):
"""
Test the concatenation of tensors.
"""
a1 = tntt.randn((3,4,2,6,7), [1,2,3,2,4,1])
a2 = tntt.randn((3,4,8,6,7), [1,3,2,2,1,1])
a3 = tntt.randn((3,4,15,6,7), [1,3,7,7,5,1])
a = tntt.cat((a1,a2,a3),2)
af = tn.cat((a1.full(), a2.full(), a3.full()), 2)
self.assertLess(tn.linalg.norm(a.full()-af)/tn.linalg.norm(af), 1e-14, "torchtt.cat() failed.")
def test_pad(self):
"""
Test the tensor padding in TT.
"""
A = tntt.randn((5,6,7,8),(1,2,3,4,1))
Ap = tntt.pad(A, ((1,2),(1,4),(3,5),(2,1)), value = 1/2)
self.assertLess(tn.linalg.norm(A.full()-Ap.full()[1:6,1:7,3:10,2:10])/Ap.norm(), 1e-15, "torchtt.pad() fail 1.")
self.assertLess(abs(tn.mean(Ap.full()[6:,7:,10:,10:])-1/2), 1e-15, "torchtt.pad() fail 2.")
self.assertLess(abs(tn.mean(Ap.full()[:1,:1,:3,:2])-1/2), 1e-15, "torchtt.pad() fail 3.")
# TTM case
M = tntt.randn(((3,2),(4,4),(5,2)),(1,3,2,1))
Mp = tntt.pad(M, ((1,2),(1,4),(3,5)), value = 1/2)
err = tn.linalg.norm(M.full() - Mp.full()[1:4,1:5,3:8,1:3,1:5,3:5])
self.assertLess(err/M.norm(), 1e-15, "torchtt.pad() TTM fail 1.")
n = 3
err = tn.linalg.norm(tn.reshape(Mp.full()[:1,:1,:3,:1,:1,:3],[n,n]) - 1/2*tn.eye(n))
self.assertLess(err/n, 1e-15, "torchtt.pad() TTM fail 2.")
n = 40
err = tn.linalg.norm(tn.reshape(Mp.full()[4:, 5:, 8:, 3:, 5:, 5:],[n,n])-tn.eye(n)/2)
self.assertLess(err/n, 1e-15, "torchtt.pad() TTM fail 3.")
def test_diag(self):
"""
Test the torchtt.diag() function.
"""
n = [3,4,5]
I = tntt.eye(n, dtype = tn.float64)
dg = tntt.diag(I)
self.assertLess(err_rel(dg.full(), tntt.ones(n).full()), 1e-13, "torchtt.diag() TTM->TT failed.")
o = tntt.ones(n)
E = tntt.diag(o)
self.assertLess(err_rel(E.full(), tntt.eye(n).full() ), 1e-13, "torchtt.diag() TT->TTM failed.")
if __name__ == '__main__':
unittest.main() | 23,011 | 38.00339 | 244 | py |
torchTT | torchTT-main/tests/test_cross.py | """
Test the cross approximation method.
"""
import unittest
import torchtt as tntt
import torch as tn
import numpy as np
err_rel = lambda t, ref : tn.linalg.norm(t-ref).numpy() / tn.linalg.norm(ref).numpy() if ref.shape == t.shape else np.inf
class TestCrossApproximation(unittest.TestCase):
def test_dmrg_corss_interpolation(self):
"""
Test the DMRG cross interpolation method.
"""
func1 = lambda I: 1/(2+tn.sum(I+1,1).to(dtype=tn.float64))
N = [20]*4
x = tntt.interpolate.dmrg_cross(func1, N, eps = 1e-7)
Is = tntt.meshgrid([tn.arange(0,n,dtype=tn.float64) for n in N])
x_ref = 1/(2+Is[0].full()+Is[1].full()+Is[2].full()+Is[3].full()+4)
self.assertLess(err_rel(x.full(),x_ref),1e-6,"Error dmrg cross interpolation")
def test_function_interpolate_multivariable(self):
"""
Test the DMRG cross interpolation method for funcxtion approximation.
"""
func1 = lambda I: 1/(2+tn.sum(I+1,1).to(dtype=tn.float64))
N = [20]*4
Is = tntt.meshgrid([tn.arange(0,n,dtype=tn.float64) for n in N])
x_ref = 1/(2+Is[0].full()+Is[1].full()+Is[2].full()+Is[3].full()+4)
y = tntt.interpolate.function_interpolate(func1, Is, 1e-8)
self.assertLess(err_rel(y.full(),x_ref),1e-7,"Error dmrg cross interpolation")
def test_function_interpolate_univariate(self):
"""
Test the DMRG cross interpolation method for funcxtion approximation.
"""
N = [20]*4
Is = tntt.meshgrid([tn.arange(0,n,dtype=tn.float64) for n in N])
x_ref = 1/(2+Is[0].full()+Is[1].full()+Is[2].full()+Is[3].full()+4)
x = tntt.TT(x_ref)
y = tntt.interpolate.function_interpolate(lambda x : tn.log(x), x, eps = 1e-7)
self.assertLess(err_rel(y.full(),tn.log(x_ref)),1e-6,"Error dmrg cross interpolation") | 1,972 | 36.226415 | 122 | py |
torchTT | torchTT-main/torchtt/_dmrg.py | """
DMRG implementation for fast matvec product.
Inspired by TT-Toolbox from MATLAB.
@author: ion
"""
import torchtt
import torch as tn
from torchtt._decomposition import rank_chop, QR, SVD
import datetime
import opt_einsum as oe
try:
import torchttcpp
_flag_use_cpp = True
except:
import warnings
warnings.warn("\x1B[33m\nC++ implementation not available. Using pure Python.\n\033[0m")
_flag_use_cpp = False
def dmrg_matvec(A, x, y0 = None,nswp = 20, eps = 1e-12, rmax = 32768, kickrank = 4, verb = False, use_cpp = True):
"""
Perform fast matrix vector multiplication `y = Ax` in the TT using the DMRG algorithm.
Uses C++ backend if available.
Args:
A (TT): TT matrix
x (TT): TT tensor
y0 (TT, optional): initial guess of the result (if None is provided a random tensor is generated as a guess). Defaults to None.
nswp (int, optional): numebr of sweeps. Defaults to 20.
eps (float, optional): relative accuracy. Defaults to 1e-12.
rmax (int, optional): maximum rank. Defaults to 32768.
kickrank (int, optional): kickrank. Defaults to 4.
verb (bool, optional): show debug info. Defaults to False.
use_cpp (bool, optional): flag to choose between the python and C++ implementation (if available). Defaults to False.
Returns:
TT: the result.
"""
if _flag_use_cpp and use_cpp:
return torchtt.TT(torchttcpp.dmrg_mv(A.cores, x.cores, [] if y0 is None else y0.cores, A.M, A.N, x.R, [] if y0 is None else y0.R, nswp, eps, rmax, kickrank, verb))
#return dmrg_matvec_python(A, x, y0, nswp, eps, rmax, kickrank, verb)
else:
return dmrg_matvec_python(A, x, y0, nswp, eps, rmax, kickrank, verb)
def dmrg_matvec_python(A, x, y0 = None, nswp = 20, eps = 1e-12, rmax = 32768, kickrank = 4, verb = False):
"""
Perform fast matrix vector multiplication `y = Ax` in the TT using the DMRG algorithm.
Args:
A (TT): TT matrix
x (TT): TT tensor
y0 (TT, optional): initial guess of the result (if None is provided a random tensor is generated as a guess). Defaults to None.
nswp (int, optional): numebr of sweeps. Defaults to 20.
eps (float, optional): relative accuracy. Defaults to 1e-12.
rmax (int, optional): maximum rank. Defaults to 32768.
kickrank (int, optional): kickrank. Defaults to 4.
verb (bool, optional): show debug info. Defaults to False.
Returns:
TT: the result.
"""
if y0 == None:
y0 = torchtt.random(A.M,2, dtype=A.cores[0].dtype, device = A.cores[0].device)
y_cores = y0.cores
Ry = y0.R.copy()
d = len(x.N)
if isinstance(rmax,int):
rmax = [1] + [rmax]*(d-1) + [1]
N = x.N
M = A.M
r_enlarge = [2]*d
Phis = [tn.ones((1,1,1),dtype=A.cores[0].dtype, device = A.cores[0].device)] + [None]*(d-1) + [tn.ones((1,1,1),dtype=A.cores[0].dtype, device = A.cores[0].device)]
delta_cores = [1.0]*(d-1)
delta_cores_prev = [1.0]*(d-1)
last = False
for i in range(nswp):
if verb: print('sweep ',i)
# TME = datetime.datetime.now()
for k in range(d-1,0,-1):
core = y_cores[k]
core = tn.reshape(tn.permute(core,[1,2,0]),[M[k]*Ry[k+1],Ry[k]])
Q, R = QR(core)
rnew = min([core.shape[0],core.shape[1]])
# update current core
y_cores[k] = (tn.reshape(Q.T,[rnew,M[k],-1]))
Ry[k] = rnew
# and the k-1 one
core_next = tn.reshape(y_cores[k-1],[y_cores[k-1].shape[0]*y_cores[k-1].shape[1],y_cores[k-1].shape[2]]) @ R.T
y_cores[k-1] = (tn.reshape(core_next,[-1,M[k-1],rnew]))
# update Phi
Phi = tn.einsum('ijk,mnk->ijmn',Phis[k+1],tn.conj(x.cores[k])) # shape rk x rAk x rxk-1 x Nk
Phi = tn.einsum('ijkl,mlnk->ijmn',tn.conj(A.cores[k]),Phi) # shape rAk-1 x Nk x rk x rxk-1
Phi = tn.einsum('ijkl,mjk->mil',Phi,y_cores[k]) # shape rk-1 x rAk-1 x rxk-1
# Phi = tn.einsum('YAX,amnA,ymY,xnX->yax', Phis[k+1], tn.conj(A.cores[k]), y_cores[k], x.cores[k])
Phis[k] = Phi
# TME = datetime.datetime.now()-TME
# print('first ',TME.total_seconds())
# DMRG
for k in range(d-1):
if verb: print('\tcore ',k)
W_prev = tn.einsum('ijk,klm->ijlm',y_cores[k],y_cores[k+1])
# TME = datetime.datetime.now()
if not last:
# from left
W1 = tn.einsum('ijk,klm->ijlm',Phis[k],tn.conj(x.cores[k])) # shape rk-1 x rAk-1 x Nk x rxk
W1 = tn.einsum('ijkl,mikn->mjln',tn.conj(A.cores[k]),W1) # shape rk-1 x Mk x rAk x rxk
# from right
W2 = tn.einsum('ijk,mnk->njmi',Phis[k+2],tn.conj(x.cores[k+1])) # shape Nk+1 x rAk+1 x rxk x rk+1
W2 = tn.einsum('ijkl,klmn->ijmn',tn.conj(A.cores[k+1]),W2) # shape rAk x Mk+1 x rxk x rk+1
# new supercore
W = tn.einsum('ijkl,kmln->ijmn',W1,W2)
else:
W = tn.conj(W_prev)
b = tn.linalg.norm(W)
if b != 0:
a = tn.linalg.norm(W-tn.conj(W_prev))
delta_cores[k] = (a/b).cpu().numpy()
else:
delta_cores[k] = 0
if delta_cores[k]/delta_cores_prev[k] >= 1 and delta_cores[k]>eps:
r_enlarge[k] += 1
if delta_cores[k]/delta_cores_prev[k] < 0.1 and delta_cores[k]<eps:
r_enlarge[k] = max(1,r_enlarge[k]-1)
# SVD
U, S, V = SVD(tn.reshape(W,[W.shape[0]*W.shape[1],-1]))
# new rank is...
r_new = rank_chop(S.cpu().numpy(),(b.cpu()*eps/(d**(0.5 if last else 1.5))).numpy())
# enlarge ranks
if not last: r_new += r_enlarge[k]
# ranks must remain valid
r_new = min([r_new,S.shape[0],rmax[k+1]])
r_new = max(1,r_new)
# truncate the SVD matrices and spit into 2 cores
W1 = U[:,:r_new]
W2 = ( V[:r_new,:].T @ tn.diag(S[:r_new]))
# TME = datetime.datetime.now()
if i < nswp-1:
# kick-rank
W1, Rmat = QR(tn.cat((W1,tn.randn((W1.shape[0],kickrank),dtype=W1.dtype,device=A.cores[0].device)),axis=1))
W2 = tn.cat((W2,tn.zeros((W2.shape[0],kickrank),dtype=W2.dtype, device = W2.device)),axis=1)
W2 = tn.einsum('ij,kj->ki',W2,Rmat)
r_new = W1.shape[1]
else:
W2 = W2.t()
# TME = datetime.datetime.now()-TME
# print('\t\t ',TME.total_seconds())
# TME = datetime.datetime.now()
if verb: print('\tcore ',k,': delta ',delta_cores[k],' rank ',Ry[k+1],' ->',r_new)
Ry[k+1] = r_new
# print(k,W1.shape,W2.shape,Ry,N)
y_cores[k] = tn.conj(tn.reshape(W1,[Ry[k],M[k],r_new]))
y_cores[k+1] = tn.conj(tn.reshape(W2,[r_new,M[k+1],Ry[k+2]]))
#Wc = tn.einsum('ijk,klm->ijlm', tn.conj(y_cores[k]), tn.conj(y_cores[k+1]))
# print('decomposition ',tn.linalg.norm(Wc-W)/tn.linalg.norm(W))
Phi_next = tn.einsum('ijk,kmn->ijmn',Phis[k],tn.conj(x.cores[k])) # shape rk-1 x rAk-1 x Nk x rxk
Phi_next = tn.einsum('ijkl,jmkn->imnl',Phi_next,tn.conj(A.cores[k])) # shape rk-1 x Mk x rAk x rxk
Phi_next = tn.einsum('ijm,ijkl->mkl',y_cores[k],Phi_next) # shape rk x rAk x rxk
Phis[k+1] = Phi_next+0
# TME = datetime.datetime.now()-TME
# print('\t\t ',TME.total_seconds())
if last : break
if max(delta_cores) < eps:
last = True
delta_cores_prev = delta_cores.copy()
return torchtt.TT(y_cores)
def dmrg_hadamard(x, y, z0 = None, nswp = 20, eps = 1e-12, rmax = 32768, kickrank = 4, verb = False, use_cpp = True):
"""
Perform fast elementwise multiplication `z = x * y` in the TT using the DMRG algorithm.
C++ backend not yet ready if available.
Args:
z (TT): TT tensor
x (TT): TT tensor
z0 (TT, optional): initial guess of the result (if None is provided a random tensor is generated as a guess). Defaults to None.
nswp (int, optional): numebr of sweeps. Defaults to 20.
eps (float, optional): relative accuracy. Defaults to 1e-12.
rmax (int, optional): maximum rank. Defaults to 32768.
kickrank (int, optional): kickrank. Defaults to 4.
verb (bool, optional): show debug info. Defaults to False.
use_cpp (bool, optional): flag to choose between the python and C++ implementation (if available). Defaults to False.
Returns:
TT: the result.
"""
if False and _flag_use_cpp and use_cpp:
return torchtt.TT(torchttcpp.dmrg_mv(A.cores, x.cores, [] if y0 is None else y0.cores, A.M, A.N, x.R, [] if y0 is None else y0.R, nswp, eps, rmax, kickrank, verb))
#return dmrg_matvec_python(A, x, y0, nswp, eps, rmax, kickrank, verb)
else:
return dmrg_hadamard_python(x, y, z0, nswp, eps, rmax, kickrank, verb)
def dmrg_hadamard_python(z, x, y0 = None, nswp = 20, eps = 1e-12, rmax = 32768, kickrank = 4, verb = False):
"""
Perform fast matrix vector multiplication `y = z * x` in the TT using the DMRG algorithm.
Args:
z (TT): TT matrix
x (TT): TT tensor
y0 (TT, optional): initial guess of the result (if None is provided a random tensor is generated as a guess). Defaults to None.
nswp (int, optional): numebr of sweeps. Defaults to 20.
eps (float, optional): relative accuracy. Defaults to 1e-12.
rmax (int, optional): maximum rank. Defaults to 32768.
kickrank (int, optional): kickrank. Defaults to 4.
verb (bool, optional): show debug info. Defaults to False.
Returns:
TT: the result.
"""
if y0 == None:
y0 = torchtt.random(z.N, 2, dtype = z.cores[0].dtype, device = z.cores[0].device)
y_cores = y0.cores
Ry = y0.R.copy()
d = len(x.N)
if isinstance(rmax,int):
rmax = [1] + [rmax]*(d-1) + [1]
N = x.N
M = z.N
r_enlarge = [2]*d
Phis = [tn.ones((1,1,1), dtype=z.cores[0].dtype, device = z.cores[0].device)] + [None]*(d-1) + [tn.ones((1,1,1),dtype=z.cores[0].dtype, device = z.cores[0].device)]
delta_cores = [1.0]*(d-1)
delta_cores_prev = [1.0]*(d-1)
last = False
for i in range(nswp):
if verb: print('sweep ',i)
# TME = datetime.datetime.now()
for k in range(d-1,0,-1):
core = y_cores[k]
core = tn.reshape(tn.permute(core,[1,2,0]),[M[k]*Ry[k+1],Ry[k]])
Q, R = QR(core)
rnew = min([core.shape[0],core.shape[1]])
# update current core
y_cores[k] = (tn.reshape(Q.T,[rnew,M[k],-1]))
Ry[k] = rnew
# and the k-1 one
core_next = tn.reshape(y_cores[k-1],[y_cores[k-1].shape[0]*y_cores[k-1].shape[1],y_cores[k-1].shape[2]]) @ R.T
y_cores[k-1] = (tn.reshape(core_next,[-1,M[k-1],rnew]))
# update Phi
Phi = tn.einsum('ijk,mnk->ijmn',Phis[k+1],tn.conj(x.cores[k])) # shape rk x rAk x rxk-1 x Nk
Phi = tn.einsum('ikl,mlnk->ikmn',tn.conj(z.cores[k]),Phi) # shape rAk-1 x Nk x rk x rxk-1
Phi = tn.einsum('ijkl,mjk->mil',Phi,y_cores[k]) # shape rk-1 x rAk-1 x rxk-1
# Phi = tn.einsum('YAX,amnA,ymY,xnX->yax', Phis[k+1], tn.conj(A.cores[k]), y_cores[k], x.cores[k])
Phis[k] = Phi
# TME = datetime.datetime.now()-TME
# print('first ',TME.total_seconds())
# DMRG
for k in range(d-1):
if verb: print('\tcore ',k)
W_prev = tn.einsum('ijk,klm->ijlm',y_cores[k],y_cores[k+1])
# TME = datetime.datetime.now()
if not last:
# from left
W1 = tn.einsum('ijk,klm->ijlm',Phis[k],tn.conj(x.cores[k])) # shape rk-1 x rAk-1 x Nk x rxk
W1 = tn.einsum('ikl,mikn->mkln',tn.conj(z.cores[k]),W1) # shape rk-1 x Mk x rAk x rxk
# from right
W2 = tn.einsum('ijk,mnk->njmi',Phis[k+2],tn.conj(x.cores[k+1])) # shape Nk+1 x rAk+1 x rxk x rk+1
W2 = tn.einsum('ikl,klmn->ikmn',tn.conj(z.cores[k+1]),W2) # shape rAk x Mk+1 x rxk x rk+1
# new supercore
W = tn.einsum('ijkl,kmln->ijmn',W1,W2)
else:
W = tn.conj(W_prev)
b = tn.linalg.norm(W)
if b != 0:
a = tn.linalg.norm(W-tn.conj(W_prev))
delta_cores[k] = (a/b).cpu().numpy()
else:
delta_cores[k] = 0
if delta_cores[k]/delta_cores_prev[k] >= 1 and delta_cores[k]>eps:
r_enlarge[k] += 1
if delta_cores[k]/delta_cores_prev[k] < 0.1 and delta_cores[k]<eps:
r_enlarge[k] = max(1,r_enlarge[k]-1)
# SVD
U, S, V = SVD(tn.reshape(W,[W.shape[0]*W.shape[1],-1]))
# new rank is...
r_new = rank_chop(S.cpu().numpy(),(b.cpu()*eps/(d**(0.5 if last else 1.5))).numpy())
# enlarge ranks
if not last: r_new += r_enlarge[k]
# ranks must remain valid
r_new = min([r_new,S.shape[0],rmax[k+1]])
r_new = max(1,r_new)
# truncate the SVD matrices and spit into 2 cores
W1 = U[:,:r_new]
W2 = ( V[:r_new,:].T @ tn.diag(S[:r_new]))
# TME = datetime.datetime.now()
if i < nswp-1:
# kick-rank
W1, Rmat = QR(tn.cat((W1,tn.randn((W1.shape[0],kickrank),dtype=W1.dtype,device=z.cores[0].device)),axis=1))
W2 = tn.cat((W2,tn.zeros((W2.shape[0],kickrank),dtype=W2.dtype, device = W2.device)),axis=1)
W2 = tn.einsum('ij,kj->ki',W2,Rmat)
r_new = W1.shape[1]
else:
W2 = W2.t()
# TME = datetime.datetime.now()-TME
# print('\t\t ',TME.total_seconds())
# TME = datetime.datetime.now()
if verb: print('\tcore ',k,': delta ',delta_cores[k],' rank ',Ry[k+1],' ->',r_new)
Ry[k+1] = r_new
# print(k,W1.shape,W2.shape,Ry,N)
y_cores[k] = tn.conj(tn.reshape(W1,[Ry[k],M[k],r_new]))
y_cores[k+1] = tn.conj(tn.reshape(W2,[r_new,M[k+1],Ry[k+2]]))
#Wc = tn.einsum('ijk,klm->ijlm', tn.conj(y_cores[k]), tn.conj(y_cores[k+1]))
# print('decomposition ',tn.linalg.norm(Wc-W)/tn.linalg.norm(W))
Phi_next = tn.einsum('ijk,kmn->ijmn',Phis[k],tn.conj(x.cores[k])) # shape rk-1 x rAk-1 x Nk x rxk
Phi_next = tn.einsum('ijkl,jkn->iknl',Phi_next,tn.conj(z.cores[k])) # shape rk-1 x Mk x rAk x rxk
Phi_next = tn.einsum('ijm,ijkl->mkl',y_cores[k],Phi_next) # shape rk x rAk x rxk
Phis[k+1] = Phi_next+0
# TME = datetime.datetime.now()-TME
# print('\t\t ',TME.total_seconds())
if last : break
if max(delta_cores) < eps:
last = True
delta_cores_prev = delta_cores.copy()
return torchtt.TT(y_cores)
| 16,355 | 42.384615 | 171 | py |
torchTT | torchTT-main/torchtt/_aux_ops.py | """
Additional operations.
@author: ion
"""
import torch as tn
def apply_mask(cores, R, indices):
"""
compute the entries
Args:
cores ([type]): [description]
R ([type]): [description]
indices ([type]): [description]
"""
d = len(cores)
dt = cores[0].dtype
M = len(indices)
# result = tn.zeros((M), dtype = dt)
# for i in range(M):
# tmp = tn.ones(1)
# for k in range(d):
# tmp = tn.einsum('i,ik->k',tmp,cores[k][:,indices[i][k],:])
# result[i] = tn.sum(tmp)
result = tn.ones((M,1), dtype = dt)
for i in range(d):
result = tn.einsum('ij,jik->ik',result,cores[i][:,indices[:,i],:])
return tn.squeeze(result)
def dense_matvec(cores, other):
"""
Performs multiplication between a TT-matrix and a full tensor.
Compatible to tailing dimensions broadcasting.
Args:
cores (list[torch.tensor]): the TT-cores of the TT-matrix. The TT-matrix should be of shape (M1 x ... x Md) x (N1 x ... x Nd).
other (torch.tensor): The tensor with shape B1 x ... x Bn x N1 x ... x Nd.
Returns:
torch.tensor: The result. Shape is B1 x ... x Bn x M1 x ... x Md.
"""
result = tn.unsqueeze(other,-1)
d = len(cores)
D = len(other.shape)
for i in range(d):
result = tn.tensordot(result,cores[i],([D-d,-1],[2,0]))
result = tn.squeeze(result,-1)
return result
def bilinear_form_aux(x_cores, A_cores, y_cores, d):
"""
Computes the bilinear form xT A y given the TT cores.
Args:
x_cores (list[torch.tensor]): the TT cores.
A_cores (list[torch.tensor]): the TT cores.
y_cores (list[torch.tensor]): the TT cores.
d (int): number of modes.
Returns:
torch.tensor: the result as 1 element torch.tensor.
"""
result = tn.ones((1,1,1), dtype = A_cores[0].dtype, device = A_cores[0].device)
for i in range(d):
result = tn.einsum('lsr,lmL->srmL',result,tn.conj(x_cores[i]))
result = tn.einsum('srmL,smnS->LSrn',result,A_cores[i])
result = tn.einsum('LSrn,rnR->LSR',result,y_cores[i])
return tn.squeeze(result)
| 2,198 | 25.817073 | 135 | py |
torchTT | torchTT-main/torchtt/errors.py | """
Contains the errors used in the `torchtt` package.
"""
class ShapeMismatch(Exception):
"""The shape of the tensors does not match.
This means that the inputs have shapes that do not match.
"""
pass
class RankMismatch(Exception):
"""The TT-ranks do not match.
This means that the inputs shapes that do not match.
"""
pass
class IncompatibleTypes(Exception):
"""The function arguments are not compatible.
Usually means that a TT matrix was passed as argument instead of a TT tensor (or viceversa).
"""
pass
class InvalidArguments(Exception):
"""The arguments are not valid.
The arguments passed are not of valid type.
"""
pass | 720 | 23.033333 | 96 | py |
torchTT | torchTT-main/torchtt/nn.py | """
Implements a basic TT layer for constructing deep TT networks.
"""
import torch as tn
import torch.nn as nn
import torchtt
from ._aux_ops import dense_matvec
from .errors import *
class LinearLayerTT(nn.Module):
"""
Basic class for TT layers. See `Tensorizing Neural Networks <https://arxiv.org/abs/1509.06569>`_ for a detailed description.
It can be used similarily to any layer from `torch.nn`.
The output of the layer is :math:`\\mathcal{LTT}(\\mathsf{x}) =\\mathsf{Wx}+\\mathsf{b}`, where the tensor operator :math:`\\mathsf{W}` is represented in the TT format (with a fixed prescribed rank).
"""
def __init__(self, size_in, size_out, rank, dtype = tn.float32, initializer = 'He'):
"""
The constructor of the TT layer class takes as arguments the input shape and the output shape for the layer, the rank as well as the dtype and the initializer.
Possible initializers are:
* ``'He'`` for He Normal (He-et-al) initialization.
* ``'Glo'`` for Glorot initialization.
Args:
size_in (list[int]): the size of the input tensor.
size_out (list[int]): the size of the output tensor.
rank (list[int]): the rank of the tensor operator.
dtype (torch.dtype, optional): the dtype of the layer. Defaults to torch.float32.
initializer (str, optional): the initializer for the weights and biases. Defaults to 'He'.
Raises:
InvalidArguments: Initializer not defined. Possible choices are 'He' and 'Glo'.
"""
super().__init__()
self.size_in, self.size_out, self.rank = size_in, size_out, rank
if initializer=='He':
t = torchtt.randn([(s2,s1) for s1,s2 in zip(size_in,size_out)], rank, dtype=dtype, var = 2/tn.prod(tn.tensor([s1 for s1 in size_in])))
#self.cores = [nn.Parameter(tn.Tensor(c.clone())) for c in t.cores]
self.cores = nn.ParameterList([nn.Parameter(c) for c in t.cores])
#bias
bias = tn.zeros(size_out, dtype = dtype)
self.bias = nn.Parameter(bias)
elif initializer=='Glo':
t = torchtt.randn([(s2,s1) for s1,s2 in zip(size_in,size_out)], rank, dtype=dtype, var = 1/(tn.prod(tn.tensor([s1 for s1 in size_in]))+tn.prod(tn.tensor([s1 for s1 in size_out]))) )
#self.cores = [nn.Parameter(tn.Tensor(c.clone())) for c in t.cores]
self.cores = nn.ParameterList([nn.Parameter(c) for c in t.cores])
#bias
bias = tn.zeros(size_out, dtype = dtype)
self.bias = nn.Parameter(bias)
else:
raise InvalidArguments('Initializer not defined. Possible choices are \'He\' and \'Glo\'.')
@tn.jit.export
def forward(self, x):
"""
Computes the output of the layer for the given input.
Supports trailing dimensiond broadcasting. If the input of the layer is set to ``[M1,...,Md]`` and a tensor od shape ``[...,M1,...,Md]`` is provided then the multiplication is performed along the last d dimensions.
Args:
x (torch.tensor): input of the layer.
Returns:
torch.tensor: output of the layer.
"""
# return dense_matvec(self.cores,x) + self.bias
result = tn.unsqueeze(x,-1)
d = len(self.size_in)
D = len(x.shape)
for c in self.cores:
result = tn.tensordot(result,c,([D-d,-1],[2,0]))
result = tn.squeeze(result,-1)
return result+self.bias
| 3,607 | 41.447059 | 222 | py |
torchTT | torchTT-main/torchtt/_extras.py | """
This file implements additional functions that are visible in the module.
"""
import torch as tn
import torch.nn.functional as tnf
from torchtt._decomposition import mat_to_tt, to_tt, lr_orthogonal, round_tt, rl_orthogonal, QR, SVD, rank_chop
from torchtt._division import amen_divide
import numpy as np
import math
from torchtt._dmrg import dmrg_matvec
from torchtt._aux_ops import apply_mask, dense_matvec, bilinear_form_aux
from torchtt.errors import *
#from ._tt_base import TT
import torchtt._tt_base
import sys
def eye(shape, dtype=tn.float64, device = None):
"""
Construct the TT decomposition of a multidimensional identity matrix.
all the TT ranks are 1.
Args:
shape (list[int]): the shape.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Returns:
torchtt.TT: the one tensor.
"""
shape = list(shape)
cores = [tn.unsqueeze(tn.unsqueeze(tn.eye(s, dtype=dtype, device = device),0),3) for s in shape]
return torchtt._tt_base.TT(cores)
def zeros(shape, dtype=tn.float64, device = None):
"""
Construct a tensor that contains only zeros.
the shape can be a list of ints or a list of tuples of ints. The second case creates a TT matrix.
Args:
shape (list[int] | list[tuple[int]]): the shape.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: the zero tensor.
"""
if isinstance(shape,list):
d = len(shape)
if isinstance(shape[0],tuple):
# we create a TT-matrix
cores = [tn.zeros([1,shape[i][0],shape[i][1],1],dtype=dtype, device = device) for i in range(d)]
else:
# we create a TT-tensor
cores = [tn.zeros([1,shape[i],1],dtype=dtype, device = device) for i in range(d)]
else:
raise InvalidArguments('Shape must be a list.')
return torchtt._tt_base.TT(cores)
def kron(first, second):
"""
Computes the tensor Kronecker product.
If None is provided as input the reult is the other tensor.
If A is N_1 x ... x N_d and B is M_1 x ... x M_p, then kron(A,B) is N_1 x ... x N_d x M_1 x ... x M_p
Args:
first (torchtt.TT | None): first argument.
second (torchtt.TT | None): second argument.
Raises:
IncompatibleTypes: Incompatible data types (make sure both are either TT-matrices or TT-tensors).
InvalidArguments: Invalid arguments.
Returns:
torchtt.TT: the result.
"""
if first == None and isinstance(second,torchtt._tt_base.TT):
cores_new = [c.clone() for c in second.cores]
result = torchtt._tt_base.TT(cores_new)
elif second == None and isinstance(first,torchtt._tt_base.TT):
cores_new = [c.clone() for c in first.cores]
result = torchtt._tt_base.TT(cores_new)
elif isinstance(first,torchtt._tt_base.TT) and isinstance(second,torchtt._tt_base.TT):
if first.is_ttm != second.is_ttm:
raise IncompatibleTypes('Incompatible data types (make sure both are either TT-matrices or TT-tensors).')
# concatenate the result
cores_new = [c.clone() for c in first.cores] + [c.clone() for c in second.cores]
result = torchtt._tt_base.TT(cores_new)
else:
raise InvalidArguments('Invalid arguments.')
return result
def ones(shape, dtype=tn.float64, device = None):
"""
Construct a tensor that contains only ones.
the shape can be a list of ints or a list of tuples of ints. The second case creates a TT matrix.
Args:
shape (list[int] or list[tuple[int]]): the shape.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: the one tensor.
"""
if isinstance(shape,list):
d = len(shape)
if d==0:
return torchtt._tt_base.TT(None)
else:
if isinstance(shape[0],tuple):
# we create a TT-matrix
cores = [tn.ones([1,shape[i][0],shape[i][1],1],dtype=dtype,device=device) for i in range(d)]
else:
# we create a TT-tensor
cores = [tn.ones([1,shape[i],1],dtype=dtype,device=device) for i in range(d)]
else:
raise InvalidArguments('Shape must be a list.')
return torchtt._tt_base.TT(cores)
def xfun(shape, dtype = tn.float64, device = None):
"""
Construct a tensor from 0 to tn.prod(shape)-1.
the shape must be a list of ints.
Args:
shape (list[int]): the shape.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: the xfun tensor.
"""
if isinstance(shape, list):
d = len(shape)
if d == 0:
return torchtt._tt_base.TT(None)
if d == 1:
return torchtt._tt_base.TT(tn.arange(shape[0], dtype = dtype, device = device))
else:
cores = []
firstcore = tn.ones(1, shape[0], 2, dtype = dtype, device = device)
firstcore[0, :, 0] = tn.arange(shape[0], dtype = dtype, device = device)
cores.append(firstcore)
ni = tn.tensor(shape[0], dtype = dtype, device = device)
for i in range(1, d - 1):
core = tn.zeros((2, shape[i], 2), dtype = dtype, device = device)
for j in range(shape[i]):
core[:, j, :] = tn.eye(2, dtype = dtype, device = device)
core[1, :, 0] = ni * tn.arange(shape[i], dtype = dtype, device = device)
ni *= shape[i]
cores.append(core)
core = tn.ones((2, shape[d - 1], 1), dtype = dtype, device = device)
core[1, :, 0] = ni * tn.arange(shape[d - 1], dtype = dtype, device = device)
cores.append(core)
else:
raise InvalidArguments('Shape must be a list.')
return torchtt._tt_base.TT(cores)
def linspace(shape = [1], a = 0.0, b = 0.0, dtype = tn.float64, device = None):
"""
Construct an evenly spaced tensor from a to b with a given shape in TT decomposition.
the shape must be a list of ints.
Args:
shape (list[int]): the shape.
a (float): start value
b (float): end value
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: a linspace tensor.
"""
if isinstance(shape,list):
d = len(shape)
if d == 0:
return torchtt._tt_base.TT(None)
if d == 1:
return torchtt._tt_base.TT(tn.linspace(shape[0], a, b, dtype = dtype, device = device))
else:
x = xfun(shape)
oneTensor = ones(shape)
N = tn.prod(tn.tensor(shape), dtype = dtype, device = device).numpy()
stepsize = (b - a) * 1.0 / (N - 1)
T = a * oneTensor + x * stepsize
else:
raise InvalidArguments('Shape must be a list.')
return T.round(1e-15)
def arange(shape = [1], a = 0, b = 0, step = 1, dtype = tn.float64, device = None):
"""
Construct a tensor of size (a-b)/step with a given shape, if possible.
the shape must be a list of int and the vector has to fit the shape.
Args:
shape (list[int] or list[tuple[int]]): the shape.
a (float): start value
b (float): end value
step (int): stepsize
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: an evenly spaced tensor within a given interval.
"""
if isinstance(shape,list):
d = len(shape)
if d == 0:
return torchtt._tt_base.TT(None)
if d == 1:
return torchtt._tt_base.TT(tn.arange(a, b, step, dtype = dtype, device = device))
else:
raise InvalidArguments('Shape must be a list.')
return reshape(torchtt._tt_base.TT(tn.arange(a, b, step, dtype = dtype, device = device)), shape)
def random(N, R, dtype = tn.float64, device = None):
"""
Returns a tensor of shape N with random cores of rank R.
Each core is a normal distributed with mean 0 and variance 1.
Check also the method torchtt.randn()for better random tensors in the TT format.
Args:
N (list[int] or list[tuple[int]]): the shape of the tensor. If the elements are tuples of integers, we deal with a TT-matrix.
R (list[int] or int): can be a list if the exact rank is specified or an integer if the maximum rank is secified.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Check if N and R are right.
Returns:
torchtt.TT: the result.
"""
if isinstance(R,int):
R = [1]+[R]*(len(N)-1)+[1]
elif len(N)+1 != len(R) or R[0] != 1 or R[-1] != 1 or len(N)==0:
raise InvalidArguments('Check if N and R are right.')
cores = []
for i in range(len(N)):
cores.append(tn.randn([R[i],N[i][0],N[i][1],R[i+1]] if isinstance(N[i],tuple) else [R[i],N[i],R[i+1]], dtype = dtype, device = device))
T = torchtt._tt_base.TT(cores)
return T
def randn(N, R, var = 1.0, dtype = tn.float64, device = None):
"""
A torchtt.TT tensor of shape N = [N1 x ... x Nd] and rank R is returned.
The entries of the fuill tensor are alomst normal distributed with the variance var.
Args:
N (list[int]): the shape.
R (list[int]): the rank.
var (float, optional): the variance. Defaults to 1.0.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Returns:
torchtt.TT: the result.
"""
d = len(N)
v1 = var / np.prod(R)
v = v1**(1/d)
cores = [None] * d
for i in range(d):
cores[i] = tn.randn([R[i],N[i][0],N[i][1],R[i+1]] if isinstance(N[i],tuple) else [R[i],N[i],R[i+1]], dtype = dtype, device = device)*np.sqrt(v)
return torchtt._tt_base.TT(cores)
def reshape(tens, shape, eps = 1e-16, rmax = sys.maxsize):
"""
Reshapes a torchtt.TT tensor in the TT format.
A rounding is also performed.
Args:
tens (torchtt.TT): the input tensor.
shape (list[int] or list[tuple[int]]): the desired shape. In the case of a TT operator the shape has to be given as list of tuples of ints [(M1,N1),...,(Md,Nd)].
eps (float, optional): relative accuracy. Defaults to 1e-16.
rmax (int, optional): maximum rank. Defaults to the maximum possible integer.
Raises:
ShapeMismatch: The product of modes should remain equal. Check the given shape.
Returns:
torchtt.TT: the resulting tensor.
"""
dfin = len(shape)
cores, R = rl_orthogonal(tens.cores, tens.R, tens.is_ttm)
if tens.is_ttm:
M = []
N = []
for t in shape:
M.append(t[0])
N.append(t[1])
if np.prod(tens.N)!=np.prod(N) or np.prod(tens.M)!=np.prod(M):
raise ShapeMismatch('The product of modes should remain equal. Check the given shape.')
core = cores[0]
cores_new = []
idx = 0
idx_shape = 0
while True:
if core.shape[1] % M[idx_shape] == 0 and core.shape[2] % N[idx_shape] == 0:
if core.shape[1] // M[idx_shape] > 1 or core.shape[2] // N[idx_shape] > 1:
m1 = M[idx_shape]
m2 = core.shape[1] // m1
n1 = N[idx_shape]
n2 = core.shape[2] // n1
r1 = core.shape[0]
r2 = core.shape[-1]
tmp = tn.reshape(core,[r1*m1,m2,n1,n2*r2])
crz,_ = mat_to_tt(tmp, [r1*m1,m2], [n1,n2*r2], eps/np.sqrt(dfin-1), rmax)
cores_new.append(tn.reshape(crz[0],[r1,m1,n1,-1]))
core = tn.reshape(crz[1],[-1,m2,n2,r2])
else:
cores_new.append(core+0)
if idx == len(cores)-1:
break
else:
idx+=1
core = cores[idx]
idx_shape += 1
if idx_shape == len(shape):
break
else:
idx += 1
if idx>=len(cores):
break
core = tn.einsum('ijkl,lmno->ijmkno',core,cores[idx])
core = tn.reshape(core,[core.shape[0],core.shape[1]*core.shape[2],-1,core.shape[-1]])
else:
if np.prod(tens.N)!=np.prod(shape):
raise ShapeMismatch('The product of modes should remain equal. Check the given shape.')
core = cores[0]
cores_new = []
idx = 0
idx_shape = 0
while True:
if core.shape[1] % shape[idx_shape] == 0:
if core.shape[1] // shape[idx_shape] > 1:
s1 = shape[idx_shape]
s2 = core.shape[1] // s1
r1 = core.shape[0]
r2 = core.shape[2]
tmp = tn.reshape(core,[r1*s1,s2*r2])
crz,_ = to_tt(tmp,tmp.shape,eps/np.sqrt(dfin-1),rmax)
cores_new.append(tn.reshape(crz[0],[r1,s1,-1]))
core = tn.reshape(crz[1],[-1,s2,r2])
else:
cores_new.append(core+0)
if idx == len(cores)-1:
break
else:
idx+=1
core = cores[idx]
idx_shape += 1
if idx_shape == len(shape):
break
else:
idx += 1
if idx>=len(cores):
break
core = tn.einsum('ijk,klm->ijlm',core,cores[idx])
core = tn.reshape(core,[core.shape[0],-1,core.shape[-1]])
return torchtt._tt_base.TT(cores_new).round(eps)
def meshgrid(vectors):
"""
Creates a meshgrid of torchtt.TT objects. Similar to numpy.meshgrid or torch.meshgrid.
The input is a list of d torch.tensor vectors of sizes N_1, ... ,N_d
The result is a list of torchtt.TT instances of shapes N1 x ... x Nd.
Args:
vectors (list[torch.tensor]): the vectors (1d tensors).
Returns:
list[TT]: the resulting meshgrid.
"""
Xs = []
dtype = vectors[0].dtype
for i in range(len(vectors)):
lst = [tn.ones((1,v.shape[0],1),dtype=dtype) for v in vectors]
lst[i] = tn.reshape(vectors[i],[1,-1,1])
Xs.append(torchtt._tt_base.TT(lst))
return Xs
def dot(a,b,axis=None):
"""
Computes the dot product between 2 tensors in TT format.
If both a and b have identical mode sizes the result is the dot product.
If a and b have inequal mode sizes, the function perform index contraction.
The number of dimensions of a must be greater or equal as b.
The modes of the tensor a along which the index contraction with b is performed are given in axis.
For the compelx case (a,b) = b^H . a.
Examples:
.. code-block:: python
a = torchtt.randn([3,4,5,6,7],[1,2,2,2,2,1])
b = torchtt.randn([3,4,5,6,7],[1,2,2,2,2,1])
c = torchtt.randn([3,5,6],[1,2,2,1])
print(torchtt.dot(a,b))
print(torchtt.dot(a,c,[0,2,3]))
Args:
a (torchtt.TT): the first tensor.
b (torchtt.TT): the second tensor.
axis (list[int], optional): the mode indices for index contraction. Defaults to None.
Raises:
InvalidArguments: Both operands should be TT instances.
NotImplementedError: Operation not implemented for TT-matrices.
ShapeMismatch: Operands are not the same size.
ShapeMismatch: Number of the modes of the first tensor must be equal with the second.
Returns:
float or torchtt.TT: the result. If no axis index is provided the result is a scalar otherwise a torchtt.TT object.
"""
if not isinstance(a, torchtt._tt_base.TT) or not isinstance(b, torchtt._tt_base.TT):
raise InvalidArguments('Both operands should be TT instances.')
if axis == None:
# treat first the full dot product
# faster than partial projection
if a.is_ttm or b.is_ttm:
raise NotImplementedError('Operation not implemented for TT-matrices.')
if a.N != b.N:
raise ShapeMismatch('Operands are not the same size.')
result = tn.tensor([[1.0]],dtype = a.cores[0].dtype, device=a.cores[0].device)
for i in range(len(a.N)):
result = tn.einsum('ab,aim,bin->mn',result, a.cores[i], tn.conj(b.cores[i]))
result = tn.squeeze(result)
else:
# partial case
if a.is_ttm or b.is_ttm:
raise NotImplementedError('Operation not implemented for TT-matrices.')
if len(a.N)<len(b.N):
raise ShapeMismatch('Number of the modes of the first tensor must be equal with the second.')
# if a.N[axis] != b.N:
# raise Exception('Dimension mismatch.')
k = 0 # index for the tensor b
cores_new = []
rank_left = 1
for i in range(len(a.N)):
if i in axis:
cores_new.append(tn.conj(b.cores[k]))
rank_left = b.cores[k].shape[2]
k+=1
else:
rank_right = b.cores[k].shape[0] if i+1 in axis else rank_left
cores_new.append(tn.conj(tn.einsum('ik,j->ijk',tn.eye(rank_left,rank_right,dtype=a.cores[0].dtype),tn.ones([a.N[i]],dtype=a.cores[0].dtype))))
result = (a*torchtt._tt_base.TT(cores_new)).sum(axis)
return result
def bilinear_form(x,A,y):
"""
Computes the bilinear form x^T A y for TT tensors:
Args:
x (torchtt.TT): the tensors.
A (torchtt.TT): the tensors (must be TT matrix).
y (torchtt.TT): the tensors.
Raises:
InvalidArguments: Inputs must be torchtt.TT instances.
IncompatibleTypes: x and y must be TT tensors and A must be TT matrix.
ShapeMismatch: Check the shapes. Required is x.N == A.M and y.N == A.N.
Returns:
torch.tensor: the result of the bilienar form as tensor with 1 element.
"""
if not isinstance(x,torchtt._tt_base.TT) or not isinstance(A,torchtt._tt_base.TT) or not isinstance(y,torchtt._tt_base.TT):
raise InvalidArguments("Inputs must be torchtt.TT instances.")
if x.is_ttm or y.is_ttm or A.is_ttm==False:
raise IncompatibleTypes("x and y must be TT tensors and A must be TT matrix.")
if x.N != A.M or y.N != A.N:
raise ShapeMismatch("Check the shapes. Required is x.N == A.M and y.N == A.N.")
d = len(x.N)
return bilinear_form_aux(x.cores,A.cores,y.cores,d)
def elementwise_divide(x, y, eps = 1e-12, starting_tensor = None, nswp = 50, kick = 4, local_iterations = 40, resets = 2, preconditioner = None, verbose = False):
"""
Perform the elemntwise division x/y of two tensors in the TT format using the AMEN method.
Use this method if different AMEN arguments are needed.
This method does not check the validity of the inputs.
Args:
x (torchtt.TT or scalar): first tensor (can also be scalar of type float, int, torch.tensor with shape (1)).
y (torchtt.TT): second tensor.
eps (float, optional): relative acccuracy. Defaults to 1e-12.
starting_tensor (torchtt.TT or None, optional): initial guess of the result (None for random initial guess). Defaults to None.
nswp (int, optional): number of iterations. Defaults to 50.
kick (int, optional): size of rank enrichment. Defaults to 4.
local_iterations (int, optional): the number of iterations for the local iterative solver. Defaults to 40.
resets (int, optional): the number of restarts in the GMRES solver. Defaults to 2.
preconditioner (string, optional): Use preconditioner for the local solver (possible vaules None, 'c'). Defaults to None.
verbose (bool, optional): display debug info. Defaults to False.
Returns:
torchtt.TT: the result
"""
cores_new = amen_divide(y,x,nswp,starting_tensor,eps,rmax = 1000, kickrank = kick, local_iterations = local_iterations, resets = resets, verbose=verbose, preconditioner = preconditioner)
return torchtt._tt_base.TT(cores_new)
def rank1TT(elements):
"""
Compute the rank 1 TT from a list of vectors (or matrices).
Args:
elements (list[torch.tensor]): the list of vectors (or matrices in case a TT matrix should be created).
Returns:
torchtt.TT: the resulting TT object.
"""
return torchtt._tt_base.TT([e[None,...,None] for e in elements])
def numel(tensor):
"""
Return the number of entries needed to store the TT cores for the given tensor.
Args:
tensor (torchtt.TT): the TT representation of the tensor.
Returns:
int: number of floats stored for the TT decomposition.
"""
return sum([tn.numel(tensor.cores[i]) for i in range(len(tensor.N))])
def diag(input):
"""
Creates diagonal TT matrix from TT tensor or extracts the diagonal of a TT matrix:
* If a TT matrix is provided the result is a TT tensor representing the diagonal :math:` \\mathsf{x}_{i_1...i_d} = \\mathsf{A}_{i_1...i_d,i_1...i_d} `
* If a TT tensor is provided the result is a diagonal TT matrix with the entries :math:` \\mathsf{A}_{i_1...i_d,j_1...j_d} = \\mathsf{x}_{i_1...i_d} \\delta_{i_1}^{j_1} \\cdots \\delta_{i_d}^{j_d} `
Args:
input (TT): the input.
Raises:
InvalidArguments: Input must be a torchtt.TT instance.
Returns:
torchtt.TT: the result.
"""
if not isinstance(input, torchtt._tt_base.TT):
raise InvalidArguments("Input must be a torchtt.TT instance.")
if input.is_ttm:
return torchtt._tt_base.TT([tn.diagonal(c, dim1 = 1, dim2 = 2).permute([0,2,1]) for c in input.cores])
else:
return torchtt._tt_base.TT([tn.einsum('ijk,jm->ijmk',c,tn.eye(c.shape[1])) for c in input.cores])
def permute(input, dims, eps = 1e-12):
"""
Permutes the dimensions of the tensor. Works similarily to ``torch.permute``.
Works like a bubble sort for both TT tensors and TT matrices.
Examples:
.. code-block:: python
x_tt = torchtt.random([5,6,7,8,9],[1,2,3,4,2,1])
xp_tt = torchtt.permute(x_tt, [4,3,2,1,0], 1e-10)
print(xp_tt) # the shape of this tensor should be [9,8,7,6,5]
Args:
input (torchtt.TT): the input tensor.
dims (list[int]): the order of the indices in the new tensor.
eps (float, optional): the relative accuracy of the decomposition. Defaults to 1e-12.
Raises:
InvalidArguments: The input must be a TT tensor dims must be a list of integers or a tple of integers.
ShapeMismatch: `dims` must be the length of the number of dimensions.
InvalidArguments: Duplicate dims are not allowed.
InvalidArguments: Dims should only contain integers from 0 to d-1.
Returns:
torchtt.TT: the resulting tensor.
"""
if not isinstance(input, torchtt._tt_base.TT) :
raise InvalidArguments("The input must be a TT tensor dims must be a list of integers or a tple of integers.")
if len(dims) != len(input.N):
raise ShapeMismatch("`dims` must be the length of the number of dimensions.")
if len(dims) != len(set(dims)):
raise InvalidArguments("Duplicate dims are not allowed.")
if min(dims) != 0 or max(dims) != len(input.N)-1:
raise InvalidArguments("Dims should only contain integers from 0 to d-1.")
cores, R = rl_orthogonal(input.cores, input.R, input.is_ttm)
d = len(cores)
eps = eps/(d**1.5)
indices = list(range(d))
last_idx = 0
inversions = True
while inversions:
inversions = False
for i in range(d-1):
i1 = indices[i]
i2 = indices[i+1]
if dims.index(i1)>dims.index(i2):
# inverion in the index permutation => the cores must be swapped.
inversions = True
indices[i] = i2
indices[i+1] = i1
# print(indices,' permute ', i1, i2)
last_idx = i
if input.is_ttm:
#reorthonormalize
for k in range(last_idx, i):
Q, R = QR(tn.reshape(cores[k],[cores[k].shape[0]*cores[k].shape[1]*cores[k].shape[2], cores[k].shape[3]]))
R[k+1] = Q.shape[1]
cores[k] = tn.reshape(Q, [cores[k].shape[0], cores[k].shape[1], cores[k].shape[2], -1])
cores[k+1] = tn.einsum('ij,jkl->ikl',R,cores[k+1])
n2 = [cores[i].shape[1], cores[i].shape[2]]
core = tn.einsum('ijkl,lmno->ijkmno',cores[i],cores[i+1])
core = tn.permute(core, [0,3,4,1,2,5])
U,S,V = SVD(tn.reshape(core, [core.shape[0]*core.shape[1]*core.shape[2],-1]))
if S.is_cuda:
r_now = min([rank_chop(S.cpu().numpy(),tn.linalg.norm(S).cpu().numpy()*eps)])
else:
r_now = min([rank_chop(S.numpy(),tn.linalg.norm(S).numpy()*eps)])
US = U[:,:r_now]@tn.diag(S[:r_now])
V = V[:r_now,:]
cores[i] = tn.reshape(US,[cores[i].shape[0],cores[i+1].shape[1],cores[i+1].shape[2],-1])
R[i+1] = cores[i].shape[2]
cores[i+1] = tn.reshape(V, [-1]+ n2 +[cores[i+1].shape[3]])
else:
#reorthonormalize
for k in range(last_idx, i):
Q, R = QR(tn.reshape(cores[k],[cores[k].shape[0]*cores[k].shape[1], cores[k].shape[2]]))
R[k+1] = Q.shape[1]
cores[k] = tn.reshape(Q, [cores[k].shape[0], cores[k].shape[1],-1])
cores[k+1] = tn.einsum('ij,jkl->ikl',R,cores[k+1])
n2 = cores[i].shape[1]
core = tn.einsum('ijk,klm->ijlm',cores[i],cores[i+1])
core = tn.permute(core, [0,2,1,3])
U,S,V = SVD(tn.reshape(core, [core.shape[0]*core.shape[1],-1]))
if S.is_cuda:
r_now = min([rank_chop(S.cpu().numpy(),tn.linalg.norm(S).cpu().numpy()*eps)])
else:
r_now = min([rank_chop(S.numpy(),tn.linalg.norm(S).numpy()*eps)])
US = U[:,:r_now]@tn.diag(S[:r_now])
V = V[:r_now,:]
cores[i] = tn.reshape(US,[cores[i].shape[0],cores[i+1].shape[1],-1])
R[i+1] = cores[i].shape[2]
cores[i+1] = tn.reshape(V, [-1, n2, cores[i+1].shape[2]])
return torchtt._tt_base.TT(cores)
def save(tensor, path):
"""
Save a `torchtt.TT` object in a file.
Examples:
.. code-block:: python
import torchtt
#generate a TT object
A = torchtt.randn([10,20,30,40,4,5],[1,6,5,4,3,2,1])
# save the TT object
torchtt.save(A,"./test.TT")
# load the TT object
B = torchtt.load("./test.TT")
# the loaded should be the same
print((A-B).norm()/A.norm())
Args:
tensor (torchtt.TT): the tensor to be saved.
path (str): the file name.
Raises:
InvalidArguments: First argument must be a torchtt.TT instance.
"""
if not isinstance(tensor, torchtt._tt_base.TT):
raise InvalidArguments("First argument must be a torchtt.TT instance.")
if tensor.is_ttm:
dct = {"is_ttm": tensor.is_ttm, "R": tensor.R, "M": tensor.M, "N": tensor.N, "cores": tensor.cores}
tn.save(dct, path)
else:
dct = {"is_ttm": tensor.is_ttm, "R": tensor.R, "N": tensor.N, "cores": tensor.cores}
tn.save(dct, path)
def load(path):
"""
Load a torchtt.TT object from a file.
Examples:
.. code-block:: python
import torchtt
#generate a TT object
A = torchtt.randn([10,20,30,40,4,5],[1,6,5,4,3,2,1])
# save the TT object
torchtt.save(A,"./test.TT")
# load the TT object
B = torchtt.load("./test.TT")
# the loaded should be the same
print((A-B).norm()/A.norm())
Args:
path (str): the file name.
Returns:
torchtt.TT: the tensor.
"""
dct = tn.load(path)
return torchtt._tt_base.TT(dct['cores'])
def cat(tensors, dim = 0):
"""
Concatenate tensors in the TT format along a given dimension `dim`. Only works for TT tensors and not TT matrices.
Examples:
.. code-block:: python
import torchtt
import torch
a1 = torchtt.randn((3,4,2,6,7), [1,2,3,4,2,1])
a2 = torchtt.randn((3,4,8,6,7), [1,3,1,7,5,1])
a3 = torchtt.randn((3,4,15,6,7), [1,3,10,2,4,1])
a = torchtt.cat((a1,a2,a3),2)
af = torch.cat((a1.full(), a2.full(),
print(torch.linalg.norm(a.full()-af))
`
Args:
tensors (tuple[TT]): the tensors to be concatenated. Their mode sizes must match for all modex except the concatenating dimension.
dim (int, optional): The dimension to be concatenated after. Defaults to 0.
Raises:
InvalidArguments: Not implemented for tensor matrices.
InvalidArguments: The mode sizes must be the same on the nonconcatenated dimensions for all the provided tensors.
InvalidArguments: The tensors must have the same number of dimensions.
Returns:
torchtt.TT: the result.
"""
if(len(tensors) == 0):
return None
if tensors[0].is_ttm:
raise InvalidArguments("Not implemented for tensor matrices.")
Rs = [tensors[0].R]
for i in range(1, len(tensors)):
if tensors[i].is_ttm:
raise InvalidArguments("Not implemented for tensor matrices.")
if tensors[i].N[:dim] != tensors[0].N[:dim] and tensors[i].N[(dim+1):] != tensors[0].N[(dim+1):]:
raise InvalidArguments("The mode sizes must be the same on the nonconcatenated dimensions for all the provided tensors.")
if len(tensors[i].N) != len(tensors[0].N):
raise InvalidArguments("The tensors must have the same number of dimensions.")
Rs.append(tensors[i].R)
cores = []
if tensors[0].is_ttm:
pass
else:
r_sum = [1]
for i in range(1,len(tensors[0].N)):
r_sum.append(sum([Rs[k][i] for k in range(len(tensors))]))
r_sum.append(1)
for i in range(len(tensors[0].N)):
if i == dim:
n = sum([t.N[dim] for t in tensors])
cores.append(tn.zeros((r_sum[i], n, r_sum[i+1]), device = tensors[0].cores[0].device, dtype = tensors[0].cores[0].dtype))
else:
cores.append(tn.zeros((r_sum[i], tensors[0].N[i], r_sum[i+1]), device = tensors[0].cores[0].device, dtype = tensors[0].cores[0].dtype))
offset1 = 0
offset2 = 0
offset3 = 0
for t in tensors:
if i==dim:
cores[i][offset1:(offset1+t.cores[i].shape[0]),offset2:(offset2+t.cores[i].shape[1]),offset3:(offset3+t.cores[i].shape[2])] = t.cores[i]
if i>0: offset1 += t.cores[i].shape[0]
offset2 += t.cores[i].shape[1]
if i<len(tensors[0].N)-1: offset3 += t.cores[i].shape[2]
else:
cores[i][offset1:(offset1+t.cores[i].shape[0]),:,offset3:(offset3+t.cores[i].shape[2])] = t.cores[i]
if i>0: offset1 += t.cores[i].shape[0]
if i<len(tensors[0].N)-1: offset3 += t.cores[i].shape[2]
#for i in range(len(self.__N)):
# pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1] , 0,0 , 0,0 if i==0 else other.R[i])
# pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
# cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(other.cores[i],pad2))
return torchtt._tt_base.TT(cores)
def pad(tensor, padding, value = 0.0):
"""
Pad a tensor in the TT format.
The `padding` argument is a tuple of tuples `((b1, a1), (b2, a2), ... , (bd, ad))`.
Each dimension is padded with `bk` at the beginning and `ak` at the end. The padding value is constant and is given as the argument `value`.
In case of a TT operator, duiagual padding is performed. On the diagonal, the provided `value` is inserted.
Args:
tensor (TT): the tensor to be padded.
padding (tuple(tuple(int))): the paddings.
value (float, optional): the value to pad. Defaults to 0.0.
Raises:
InvalidArguments: The number of paddings should not exceed the number of dimensions of the tensor.
Returns:
TT: the result.
"""
if(len(padding) > len(tensor.N)):
raise InvalidArguments("The number of paddings should not exceed the number of dimensions of the tensor.")
if tensor.is_ttm:
cores = [c.clone() for c in tensor.cores]
for pad,k in zip(reversed(padding),reversed(range(len(tensor.N)))):
cores[k] = tnf.pad(cores[k],(1 if k < len(tensor.N)-1 else 0,1 if k < len(tensor.N)-1 else 0,pad[0],pad[1],pad[0],pad[1],1 if k>0 else 0,1 if k>0 else 0),value = 0)
cores[k][0,:pad[0],:pad[0],0] = value*tn.eye(pad[0], device = cores[k].device, dtype = cores[k].dtype)
cores[k][-1,(pad[0]+tensor.M[k]):,(pad[0]+tensor.N[k]):,-1] = value*tn.eye(pad[1], device = cores[k].device, dtype = cores[k].dtype)
value = 1
else:
rprod = np.prod(tensor.R)
value = value/rprod
cores = [c.clone() for c in tensor.cores]
for pad,k in zip(reversed(padding),reversed(range(len(tensor.N)))):
cores[k] = tnf.pad(cores[k],(0,0,pad[0],pad[1],0,0),value = value)
value = 1 if value != 0 else 0
return torchtt._tt_base.TT(cores)
def shape_tuple_to_mn(shape):
"""
Convert the shape of a TTM from tuple format to row and column shapes.
Args:
shape (list[tuple[int]]): shape.
Returns:
tuple[list[int],list[int]]: still the shape.
"""
M = [s[0] for s in shape]
N = [s[1] for s in shape]
return M, N
def shape_mn_to_tuple(M, N):
"""
Convert the shape of a TTM from row/column format to tuple format.
Args:
M (list[int]): row shapes.
N (list[int]): column shapes.
Returns:
list[tuple[int]]: shape.
"""
return [(m,n) for m,n in zip(M,N)] | 37,410 | 37.291709 | 202 | py |
torchTT | torchTT-main/torchtt/manifold.py | """
Manifold gradient module.
"""
import torch as tn
from torchtt._decomposition import mat_to_tt, to_tt, lr_orthogonal, round_tt, rl_orthogonal
from . import TT
from torchtt.errors import *
def _delta2cores(tt_cores, R, Sds, is_ttm = False, ortho = None):
"""
Convert the detla notation to TT.
Implements Algorithm 5.1 from "AUTOMATIC DIFFERENTIATION FOR RIEMANNIAN OPTIMIZATION ON LOW-RANK MATRIX AND TENSOR-TRAIN MANIFOLDS".
Args:
tt_cores (list[torch.tensor]): the TT cores.
R (list[int]): the rank of the tensor.
Sds (list[torch.tensor]): deltas.
is_ttm (bool, optional): is TT amtrix or not. Defaults to False.
ortho (list[list[torch.tensor]], optional): the left and right orthogonal cores of tt_cores. Defaults to None.
Returns:
list[torch.tensor]: the resulting TT cores.
"""
if ortho == None:
l_cores,_ = lr_orthogonal(tt_cores, R, is_ttm)
r_cores,_ = rl_orthogonal(tt_cores, R, is_ttm)
else:
l_cores = ortho[0]
r_cores = ortho[1]
# first
cores_new = [tn.cat((Sds[0],l_cores[0]),2 if not is_ttm else 3)]
# 2...d-1
for k in range(1,len(tt_cores)-1):
up = tn.cat((r_cores[k],tn.zeros((r_cores[k].shape),dtype = l_cores[0].dtype, device = l_cores[0].device)),2 if not is_ttm else 3)
down = tn.cat((Sds[k],l_cores[k]),2 if not is_ttm else 3)
cores_new.append(tn.cat((up,down),0))
# last
cores_new.append(tn.cat((r_cores[-1],Sds[-1]),0))
return cores_new
def riemannian_gradient(x,func):
"""
Compute the Riemannian gradient using AD.
Args:
x (torchtt.TT): the point on the manifold where the gradient is computed.
func ([type]): function that has to be differentiated. The function takes as only argument `torchtt.TT` instances.
Returns:
torchtt.TT: the gradient projected on the tangent space of x.
"""
l_cores,_ = lr_orthogonal(x.cores, x.R, x.is_ttm)
r_cores,_ = rl_orthogonal(l_cores, x.R, x.is_ttm)
is_ttm = x.is_ttm
R = x.R
d = len(x.N)
Rs = [ r_cores[0] ]
Rs += [ x.cores[i]*0 for i in range(1,d)]
# AD part
for i in range(d):
Rs[i].requires_grad_(True)
Ghats = _delta2cores(x.cores, R, Rs, is_ttm = is_ttm,ortho = [l_cores,r_cores])
fval = func(TT(Ghats))
fval.backward()
# Sds = tape.gradient(fval, Rs)
Sds = [r.grad for r in Rs]
# print('Sds ',Sds)
# compute Sdeltas
for k in range(d-1):
D = tn.reshape(Sds[k],[-1,R[k+1]])
UL = tn.reshape(l_cores[k],[-1,R[k+1]])
D = D - UL @ (UL.T @ D)
Sds[k] = tn.reshape(D,l_cores[k].shape)
# print([tf.einsum('ijk,ijl->kl',l_cores[i],Sds[i]).numpy() for i in range(d-1)])
# delta to TT
grad_cores = _delta2cores(x.cores, R, Sds, is_ttm,ortho = [l_cores,r_cores])
return TT(grad_cores)
def riemannian_projection(Xspace,z):
"""
Project the tensor z onto the tangent space defined at xspace
Args:
Xspace (torchtt.TT): the target where the tensor should be projected.
z (torchtt.TT): the tensor that should be projected.
Raises:
IncompatibleTypes: Both must be of same type.
Returns:
torchtt.TT: the projection.
"""
if Xspace.is_ttm != z.is_ttm:
raise IncompatibleTypes('Both must be of same type.')
is_ttm = Xspace.is_ttm
l_cores,R = lr_orthogonal(Xspace.cores, Xspace.R, Xspace.is_ttm)
r_cores,_ = rl_orthogonal(l_cores, R, Xspace.is_ttm)
d = len(Xspace.N)
N = Xspace.N
# Pleft = [tf.ones((1,1,1),dtype=Xspace.cores[0].dtype)]
Pleft = []
tmp = tn.ones((1,1),dtype=Xspace.cores[0].dtype, device = Xspace.cores[0].device)
for k in range(d-1):
if is_ttm:
tmp = tn.einsum('rs,rijR,sijS->RS',tmp,l_cores[k],z.cores[k]) # size rk x sk
else:
tmp = tn.einsum('rs,riR,siS->RS',tmp,l_cores[k],z.cores[k]) # size rk x sk
Pleft.append(tmp)
Pright = []
tmp = tn.ones((1,1), dtype = Xspace.cores[0].dtype, device = Xspace.cores[0].device)
for k in range(d-1,0,-1):
if is_ttm:
tmp = tn.einsum('RS,rijR,sijS->rs',tmp,r_cores[k],z.cores[k]) # size rk x sk
else:
tmp = tn.einsum('RS,riR,siS->rs',tmp,r_cores[k],z.cores[k]) # size rk x sk
Pright.append(tmp)
Pright = Pright[::-1]
# compute elements of the tangent space
Sds = []
for k in range(d):
if k==0:
L = tn.ones((1,1),dtype=Xspace.cores[0].dtype, device = Xspace.cores[0].device)
else:
L = Pleft[k-1]
if k==d-1:
if is_ttm:
Sds.append(tn.einsum('rs,sjiS->rjiS',L,z.cores[k]))
else:
Sds.append(tn.einsum('rs,siS->riS',L,z.cores[k]))
else:
R = Pright[k]
if is_ttm:
tmp1 = tn.einsum('rs,sijS->rijS',L,z.cores[k])
tmp2 = tn.einsum('rijR,RS->rijS',l_cores[k],tn.einsum('rs,rijR,sijS->RS',L,l_cores[k],z.cores[k]))
Sds.append(tn.einsum('rijS,RS->rijR',tmp1-tmp2,R))
else:
tmp1 = tn.einsum('rs,siS->riS',L,z.cores[k])
tmp2 = tn.einsum('riR,RS->riS',l_cores[k],tn.einsum('rs,riR,siS->RS',L,l_cores[k],z.cores[k]))
Sds.append(tn.einsum('riS,RS->riR',tmp1-tmp2,R))
# convert Sds to TT
grad_cores = _delta2cores(Xspace.cores, R, Sds, Xspace.is_ttm,ortho = [l_cores,r_cores])
return TT(grad_cores)
| 5,672 | 31.603448 | 138 | py |
torchTT | torchTT-main/torchtt/__init__.py |
r"""
Provides Tensor-Train (TT) decomposition using `pytorch` as backend.
Contains routines for computing the TT decomposition and all the basisc linear algebra in the TT format. Additionally, GPU support can be used thanks to the `pytorch` backend.
It also has linear solvers in TT and cross approximation as well as automatic differentiation.
.. include:: INTRO.md
"""
from ._tt_base import TT
from ._extras import eye, zeros, kron, ones, random, randn, reshape, meshgrid , dot, elementwise_divide, numel, rank1TT, bilinear_form, diag, permute, load, save, cat, pad, shape_mn_to_tuple, shape_tuple_to_mn
# from .torchtt import TT, eye, zeros, kron, ones, random, randn, reshape, meshgrid , dot, elementwise_divide, numel, rank1TT, bilinear_form, diag, permute, load, save, cat, pad
from ._dmrg import dmrg_hadamard
__all__ = ['TT', 'eye', 'zeros', 'kron', 'ones', 'random', 'randn', 'reshape', 'meshgrid', 'dot', 'elementwise_divide', 'numel', 'rank1TT', 'bilinear_form', 'diag', 'permute', 'load', 'save', 'cat', 'pad', 'shape_mn_to_tuple', 'shape_tuple_to_mn', 'dmrg_hadamard']
from . import solvers
from . import grad
# from .grad import grad, watch, unwatch
from . import manifold
from . import interpolate
from . import nn
from . import cpp
# from .errors import *
| 1,287 | 46.703704 | 264 | py |
torchTT | torchTT-main/torchtt/_decomposition.py | """
Basic decomposition and orthogonalization.
@author: ion
"""
import torch as tn
import numpy as np
def QR(mat):
"""
Compute the QR decomposition. Backend can be changed.
Parameters
----------
mat : tn array
DESCRIPTION.
Returns
-------
Q : the Q matrix
R : the R matrix
"""
Q,R = tn.linalg.qr(mat)
return Q, R
def SVD(mat):
"""
Computes the SVD of a matrix
Args:
mat (torch.tensor): the matrix
Returns:
U, S, V: the SVD factors.
"""
if mat.shape[0] < 10*mat.shape[1]:
try:
u, s, v = tn.linalg.svd(mat,full_matrices=False)
s = s.to(v.dtype)
return u, s, v
except:
u, s, v = np.linalg.svd(mat.numpy(),full_matrices=False)
return tn.tensor(u, dtype = mat.dtype, device = mat.device), tn.tensor(s, dtype = mat.dtype, device = mat.device), tn.tensor(v, dtype = mat.dtype, device = mat.device)
else:
try:
u, s, v = tn.linalg.svd(mat.t(),full_matrices=False)
s = s.to(v.dtype)
return v.t(), s, u.t()
except:
u, s, v = np.linalg.svd((mat.t()).numpy(),full_matrices=False)
return tn.tensor(v.t(), dtype = mat.dtype, device = mat.device), tn.tensor(s, dtype = mat.dtype, device = mat.device), tn.tensor(u.t(), dtype = mat.dtype, device = mat.device)
# u, s, v = tn.linalg.svd(mat,full_matrices=False)
# return u, s, v
def lr_orthogonal(tt_cores, R, is_ttm, no_gpu = False):
"""
Orthogonalize the TT-cores left to right.
Parameters
----------
tt_cores : list of torch tensors.
The TT-cores as a list.
Returns
-------
tt_cores : list of torch tensors.
The orthogonal TT-cores as a list.
"""
d = len(tt_cores)
rank_next = R[0]
core_now = tt_cores[0]
cores_new = d*[None]
for i in range(d-1):
if is_ttm:
mode_shape = [core_now.shape[1],core_now.shape[2]]
core_now = tn.reshape(core_now,[core_now.shape[0]*core_now.shape[1]*core_now.shape[2],-1])
else:
mode_shape = [core_now.shape[1]]
core_now = tn.reshape(core_now,[core_now.shape[0]*core_now.shape[1],-1])
# perform QR
Qmat, Rmat = QR(core_now)
core_now= Qmat
# take next core
core_next = tt_cores[i+1]
shape_next = list(core_next.shape[1:])
core_next = tn.reshape(core_next,[core_next.shape[0],-1])
core_next = Rmat @ core_next
core_next = tn.reshape(core_next,[core_now.shape[1]]+shape_next)
# update the cores
cores_new[i] = tn.reshape(core_now,[R[i]]+mode_shape+[-1])
R[i+1] = core_now.shape[1]
cores_new[i+1] = core_next
core_now = core_next
return cores_new, R
def rl_orthogonal(tt_cores, R, is_ttm, no_gpu = False):
"""
Orthogonalize the TT-cores right to left.
Parameters
----------
tt_cores : list of torch tensors.
The TT-cores as a list.
Returns
-------
tt_cores : list of torch tensors.
The orthogonal TT-cores as a list.
"""
d = len(tt_cores)
cores_new = d*[None]
cores_new[-1] = tt_cores[-1]+0
for i in range(d-1,0,-1):
if is_ttm:
mode_shape = [cores_new[i].shape[1],cores_new[i].shape[2]]
core_now = tn.reshape(cores_new[i],[cores_new[i].shape[0],cores_new[i].shape[2]*cores_new[i].shape[3]*cores_new[i].shape[1]]).t()
else:
mode_shape = [cores_new[i].shape[1]]
core_now = tn.reshape(cores_new[i],[cores_new[i].shape[0],cores_new[i].shape[1]*cores_new[i].shape[2]]).t()
# perform QR
Qmat, Rmat = QR(core_now)
# print('QR ',list(Qmat.shape),list(Rmat.shape))
rnew = min([core_now.shape[0],core_now.shape[1]])
rnew = Rmat.shape[0]
# update current core
cores_new[i] = tn.reshape(Qmat.T,[rnew]+mode_shape+[-1])
# print('R ',tt_cores[i].shape,cores_new[i].shape,tt_cores[i-1].shape)
R[i] = cores_new[i].shape[0]
# and the k-1 one
if is_ttm:
mode_shape = [tt_cores[i-1].shape[1],tt_cores[i-1].shape[2]]
core_next = tn.reshape(tt_cores[i-1],[tt_cores[i-1].shape[0]*tt_cores[i-1].shape[1]*tt_cores[i-1].shape[2],tt_cores[i-1].shape[3]]) @ Rmat.T
else:
mode_shape = [tt_cores[i-1].shape[1]]
core_next = tn.reshape(tt_cores[i-1],[tt_cores[i-1].shape[0]*tt_cores[i-1].shape[1],tt_cores[i-1].shape[2]]) @ Rmat.T
cores_new[i-1] = tn.reshape(core_next,[tt_cores[i-1].shape[0]]+mode_shape+[-1])
return cores_new, R
def round_tt(tt_cores,R,eps,Rmax,is_ttm=False):
"""
Rounds a TT-tensor (tt_cores have to be orthogonal)
Parameters
----------
tt_cores : list of torch tensors.
Orthogonal TT cores.
R : list of integers of length d+1.
ranks of the TT-decomposition.
eps : double.
desired rounding accuracy.
Rmax : list of integers
the maximum rank that is allowed.
Returns
-------
tt_cores : list of torch tensors.
The TT-cores of the rounded tensor.
R : list of inteders of length d+1.
rounded ranks.
"""
d = len(tt_cores)
if d == 1:
tt_cores = [tt_cores[0].clone()]
return tt_cores, R
tt_cores, R = lr_orthogonal(tt_cores, R, is_ttm)
core_now = tt_cores[-1]
eps = eps / np.sqrt(d-1)
for i in range(d-1,0,-1):
core_next = tt_cores[i-1]
core_now = tn.reshape(core_now,[R[i],-1])
core_next = tn.reshape(core_next,[-1,R[i]])
U, S, V = SVD(core_now)
if S.is_cuda:
r_now = min([Rmax[i],rank_chop(S.cpu().numpy(),tn.linalg.norm(S).cpu().numpy()*eps)])
else:
r_now = min([Rmax[i],rank_chop(S.numpy(),tn.linalg.norm(S).numpy()*eps)])
U = U[:,:r_now]
S = S[:r_now]
V = V[:r_now,:]
U = U @ tn.diag(S)
R[i] = r_now
core_next = core_next @ U
core_now = V
tt_cores[i] = tn.reshape(core_now,[R[i]]+list(tt_cores[i].shape[1:-1])+[R[i+1]])
tt_cores[i-1] = tn.reshape(core_next,[R[i-1]]+list(tt_cores[i-1].shape[1:-1])+[R[i]])
core_now = core_next
return tt_cores, R
def mat_to_tt(A,M,N,eps,rmax = 1000,is_sparse=False):
"""
Computes the TT-matrix decomposition of A. A has the shape M x N, where M, N are of length d.
The eps and rmax are given.
Parameters
----------
A : torch tensor
the array.
M : list of integers
shape.
N : list.of integers.
shape.
eps : float
desired accuracy.
rmax : int, optional
Masixum rank. The default is 100.
is_sparse : bool, optional
is A in sparse foramt. The default is False.
Returns
-------
cores : list of 4d cores
the cores of the TT-matrix decomposition.
R : list of integers
ranks.
"""
d = len(M)
if len(M)!=len(N):
raise('Dimension mismatch')
return
if is_sparse:
pass
else:
A = tn.reshape(A, M+N)
permute = tuple( np.arange(2*d).reshape([2,d]).transpose().flatten() )
A = tn.permute(A,permute)
A = tn.reshape(A, [i[0]*i[1] for i in zip(M,N)])
ttv, R = to_tt(A,eps=eps,rmax=rmax)
cores= []
# cores have to be in the TT-matrix format ( rIr' -> rijr')
for i in range(d):
tmp = tn.permute(ttv[i], [1,0,2])
tmp = tn.reshape(tmp,[M[i],N[i],tmp.shape[1],tmp.shape[2]])
tmp = tn.permute(tmp, [2,0,1,3])
cores.append(tmp)
return cores, R
def rank_chop(s,eps):
"""
Chop the rank.
Parameters
----------
s : numpy vector
Vector of singular values.
eps : double
Desired accuracy.
Returns
-------
R : int
Rank.
"""
if np.linalg.norm(s) == 0.0:
return 1
if eps <= 0.0:
return s.size
R = s.size - 1
sc = np.cumsum(np.abs(s[::-1])**2)[::-1]
R = np.argmax(sc<eps**2)
# print(sc,eps**2,sc<eps**2,R)
# while R>0:
# if np.sum(s[R:]**2) >= eps**2:
# break;
# R -= 1
R = R if R>0 else 1
R = s.size if sc[-1]>eps**2 else R
return R
def to_tt(A,N=None,eps=1e-14,rmax=100,is_sparse=False):
"""
Computes the TT cores of a full tensor A given the tolerance eps and the maximum rank.
The TT-cores are returned as a list.
Parameters
----------
A : torch tensor
Tensor to decompose.
N : vector of integers, optional
DESCRIPTION. The default is None.
eps : double, optional
DESCRIPTION. The default is 1e-14.
rmax : int or list of integers, optional
maximum rand either as scalar or list. The default is 100.
is_sparse : boolean, optional
Is True if the tensor is of type sparse type. The default is False.
Returns
-------
cores : list of torch tensors.
The TT-cores of the decomposition.
r : list of integers.
The TT-ranks.
"""
if N == None:
N = list(A.shape)
d = len(N)
r = [1]*(d+1)
# check if rmax is a list
if not isinstance(rmax,list):
rmax = [1] + (d-1)*[rmax] + [1]
C = A
cores = []
ep = eps/np.sqrt(d-1)
for i in range(d-1):
m = N[i]*r[i]
# reshape C to a matrix
C = tn.reshape(C, [m,-1])
# tme = datetime.datetime.now()
# perform svd
u, s, v = SVD(C)
# tme = datetime.datetime.now()-tme
# print('time1',tme)
# tme = datetime.datetime.now()
# choose the rank according to eps tolerance
r1 = rank_chop(s.cpu().numpy(), ep*tn.linalg.norm(s).cpu().numpy())
r1 = min([r1,rmax[i+1]])
u = u[:,:r1]
s = s[:r1]
r[i+1] = r1
# reshape and append the core
cores.append(tn.reshape(u,[r[i],N[i],r1]))
# truncate the right singular vector
v = v[:r1,:]
# update the core
v = tn.diag(s) @ v
C = v
# tme = datetime.datetime.now()-tme
# print('time2',tme)
cores.append(tn.reshape(C,[r[-2],N[-1],-1]))
return cores, r
| 10,713 | 25.324324 | 188 | py |
torchTT | torchTT-main/torchtt/_division.py | """
Elementwise division using AMEN
@author: ion
"""
import torch as tn
import numpy as np
import datetime
from torchtt._decomposition import QR, SVD, rl_orthogonal, lr_orthogonal
from torchtt._iterative_solvers import BiCGSTAB_reset, gmres_restart
import opt_einsum as oe
def local_product(Phi_right, Phi_left, coreA, core, shape):
"""
Compute local matvec product
Args:
Phi (torch.tensor): right tensor of shape r x R x r.
Psi (torch.tensor): left tensor of shape lp x Rp x lp.
coreA (torch.tensor): current core of A, shape is rp x N x r.
x (torch.tensor): the current core of x, shape is rp x N x r.
shape (torch.Size): the shape of x.
Returns:
torch.tensor: the reuslt.
"""
w = oe.contract('lsr,smS,LSR,rmR->lmL',Phi_left,coreA,Phi_right,core)
return w
class LinearOp():
def __init__(self,Phi_left,Phi_right,coreA,shape,prec):
self.Phi_left = Phi_left
self.Phi_right = Phi_right
self.coreA = coreA
self.shape = shape
self.prec = prec
# self.contraction = oe.contract_expression('lsr,smS,LSR,rmR->lmL', Phi_left.shape, coreA.shape, Phi_right.shape, shape)
if prec == 'c':
Jl = tn.einsum('sd,smS->dmS',tn.diagonal(Phi_left,0,0,2),coreA)
Jr = tn.diagonal(Phi_right,0,0,2)
J = tn.einsum('dmS,SD->dmD',Jl,Jr)
self.J = 1/J
def apply_prec(self,x):
if self.prec == 'c':
y = x * self.J # no improvement using opt_einsum
return y
def matvec(self, x, apply_prec = True):
if self.prec == None or not apply_prec:
x = tn.reshape(x,self.shape)
# tme = datetime.datetime.now()
# w = oe.contract('lsr,smS,LSR,rmR->lmL',self.Phi_left,self.coreA,self.Phi_right,x)
w1 = tn.tensordot(self.coreA,self.Phi_left,([0],[1])) # smS,lsr->mSlr
w2 = tn.tensordot(x,self.Phi_right,([2],[2])) # rmR,LSR->rmLS
w = tn.einsum('rmLS,mSlr->lmL',w2,w1) # rmLS,mSlr->lmL
# w = self.contraction(self.Phi_left,self.coreA,self.Phi_right,x)
elif self.prec == 'c':
x = tn.reshape(x,self.shape)
x = self.apply_prec(x)
# w = self.contraction(self.Phi_left,self.coreA,self.Phi_right,x)
w1 = tn.tensordot(self.coreA,self.Phi_left,([0],[1])) # smS,lsr->mSlr
w2 = tn.tensordot(x,self.Phi_right,([2],[2])) # rmR,LSR->rmLS
w = tn.einsum('rmLS,mSlr->lmL',w2,w1) # rmLS,mSlr->lmL
# w = oe.contract('lsr,smS,LSR,rmR->lmL',self.Phi_left,self.coreA,self.Phi_right,x)
else:
raise Exception('Preconditioner '+str(self.prec)+' not defined.')
return tn.reshape(w,[-1,1])
def amen_divide(a, b, nswp = 22, x0 = None, eps = 1e-10,rmax = 100, max_full = 500, kickrank = 4, kick2 = 0, trunc_norm = 'res', local_iterations = 40, resets = 2, verbose = True, preconditioner = None):
if verbose: time_total = datetime.datetime.now()
dtype = a.cores[0].dtype
device = a.cores[0].device
rank_search = 1 # binary rank search
damp = 2
rA = a.R
N = b.N
d = len(N)
if x0 == None:
rx = [1] + (d-1)*[2] + [1]
x_cores = [ tn.ones([rx[k],N[k],rx[k+1]], dtype = dtype, device = device) for k in range(d)]
else:
x = x0
x_cores = x.cores.copy()
rx = x.R.copy()
# check if rmax is a list
if isinstance(rmax, int):
rmax = [1] + (d-1) * [rmax] + [1]
# z cores
rz = [1]+(d-1)*[kickrank+kick2]+[1]
z_cores = [ tn.randn([rz[k],N[k],rz[k+1]], dtype = dtype, device = device) for k in range(d)]
z_cores, rz = rl_orthogonal(z_cores, rz, False)
norms = np.zeros(d)
Phiz = [tn.ones((1,1,1), dtype = dtype, device = device)] + [None] * (d-1) + [tn.ones((1,1,1), dtype = dtype, device = device)] # size is rzk x Rk x rxk
Phiz_b = [tn.ones((1,1), dtype = dtype, device = device)] + [None] * (d-1) + [tn.ones((1,1), dtype = dtype, device = device)] # size is rzk x rzbk
Phis = [tn.ones((1,1,1), dtype = dtype, device = device)] + [None] * (d-1) + [tn.ones((1,1,1), dtype = dtype, device = device)] # size is rk x Rk x rk
Phis_b = [tn.ones((1,1), dtype = dtype, device = device)] + [None] * (d-1) + [tn.ones((1,1), dtype = dtype, device = device)] # size is rk x rbk
last = False
normA = np.ones((d-1))
normb = np.ones((d-1))
normx = np.ones((d-1))
nrmsc = 1.0
for swp in range(nswp):
tme_sweep = datetime.datetime.now()
# right to left orthogonalization
if verbose:
print()
print('Starting sweep %d %s...'%(swp,"(last one) " if last else ""))
tme_sweep = datetime.datetime.now()
tme = datetime.datetime.now()
for k in range(d-1,0,-1):
# update the z part (ALS) update
if not last:
if swp > 0:
czA = local_product(Phiz[k+1],Phiz[k],a.cores[k],x_cores[k],x_cores[k].shape) # shape rzp x N x rz
czy = tn.einsum('br,bnB,BR->rnR',Phiz_b[k],b.cores[k],Phiz_b[k+1]) # shape is rzp x N x rz
cz_new = czy*nrmsc - czA
_,_,vz = SVD(tn.reshape(cz_new,[cz_new.shape[0],-1]))
cz_new = vz[:min(kickrank,vz.shape[0]),:].t() # truncate to kickrank
if k < d-1: # extend cz_new with random elements
cz_new = tn.cat((cz_new,tn.randn((cz_new.shape[0],kick2), dtype = dtype, device = device)),1)
else:
cz_new = tn.reshape(z_cores[k],[rz[k],-1]).t()
qz, _ = QR(cz_new)
rz[k] = qz.shape[1]
z_cores[k] = tn.reshape(qz.t(),[rz[k],N[k],rz[k+1]])
# norm correction ?
if swp > 0: nrmsc = nrmsc * normA[k-1] * normx[k-1] / normb[k-1]
core = tn.reshape(x_cores[k],[rx[k],N[k]*rx[k+1]]).t()
Qmat, Rmat = QR(core)
core_prev = tn.einsum('ijk,km->ijm',x_cores[k-1],Rmat.T)
rx[k] = Qmat.shape[1]
current_norm = tn.linalg.norm(core_prev)
if current_norm>0:
core_prev /= current_norm
else:
current_norm = 1.0
normx[k-1] = normx[k-1]*current_norm
x_cores[k] = tn.reshape(Qmat.t(),[rx[k],N[k],rx[k+1]])
x_cores[k-1] = core_prev[:]
# update phis (einsum)
# print(x_cores[k].shape,A.cores[k].shape,x_cores[k].shape)
Phis[k] = compute_phi_bck_A(Phis[k+1],x_cores[k],a.cores[k],x_cores[k])
Phis_b[k] = compute_phi_bck_rhs(Phis_b[k+1],b.cores[k],x_cores[k])
# ... and norms
norm = tn.linalg.norm(Phis[k])
norm = norm if norm>0 else 1.0
normA[k-1] = norm
Phis[k] /= norm
norm = tn.linalg.norm(Phis_b[k])
norm = norm if norm>0 else 1.0
normb[k-1] = norm
Phis_b[k] /= norm
# norm correction
nrmsc = nrmsc * normb[k-1]/ (normA[k-1] * normx[k-1])
# compute phis_z
if not last:
Phiz[k] = compute_phi_bck_A(Phiz[k+1], z_cores[k], a.cores[k], x_cores[k]) / normA[k-1]
Phiz_b[k] = compute_phi_bck_rhs(Phiz_b[k+1], b.cores[k], z_cores[k]) / normb[k-1]
# start loop
max_res = 0
max_dx = 0
for k in range(d):
if verbose: print('\tCore',k)
previous_solution = tn.reshape(x_cores[k],[-1,1])
# assemble rhs
rhs = tn.einsum('br,bmB,BR->rmR',Phis_b[k] , b.cores[k] * nrmsc, Phis_b[k+1])
rhs = tn.reshape(rhs,[-1,1])
norm_rhs = tn.linalg.norm(rhs)
#residuals
real_tol = (eps/np.sqrt(d))/damp
# solve the local system
use_full = rx[k]*N[k]*rx[k+1] < max_full
if use_full:
# solve the full system
if verbose: print('\t\tChoosing direct solver (local size %d)....'%(rx[k]*N[k]*rx[k+1]))
Bp = tn.einsum('smS,LSR->smRL',a.cores[k],Phis[k+1]) # shape is Rp x N x N x r x r
#B = tn.einsum('lsr,smnRL->rmRlnL',Phis[k],Bp)
B = oe.contract('lsr,smRL,mn->lmLrnR',Phis[k],Bp,tn.eye(N[k],dtype=dtype,device=device))
B = tn.reshape(B,[rx[k]*N[k]*rx[k+1],rx[k]*N[k]*rx[k+1]])
solution_now = tn.linalg.solve(B,rhs)
res_old = tn.linalg.norm(B@previous_solution-rhs)/norm_rhs
res_new = tn.linalg.norm(B@solution_now-rhs)/norm_rhs
else:
# iterative solver
if verbose:
print('\t\tChoosing iterative solver (local size %d)....'%(rx[k]*N[k]*rx[k+1]))
time_local = datetime.datetime.now()
shape_now = [rx[k],N[k],rx[k+1]]
Op = LinearOp(Phis[k],Phis[k+1],a.cores[k],shape_now, preconditioner)
# solution_now, flag, nit, res_new = BiCGSTAB_reset(Op, rhs,previous_solution[:], eps_local, local_iterations)
eps_local = real_tol * norm_rhs
drhs = Op.matvec(previous_solution, False)
drhs = rhs-drhs
eps_local = eps_local / tn.linalg.norm(drhs)
solution_now, flag, nit = gmres_restart(Op, drhs, previous_solution*0, rhs.shape[0], local_iterations+1, eps_local, resets)
if preconditioner != None:
solution_now = Op.apply_prec(tn.reshape(solution_now,shape_now))
solution_now = tn.reshape(solution_now,[-1,1])
solution_now = previous_solution + solution_now
res_old = tn.linalg.norm(Op.matvec(previous_solution, False)-rhs)/norm_rhs
res_new = tn.linalg.norm(Op.matvec(solution_now, False)-rhs)/norm_rhs
if verbose:
print('\t\tFinished with flag %d after %d iterations with relres %g (from %g)'%(flag,nit,res_new,eps_local))
time_local = datetime.datetime.now() - time_local
print('\t\tTime needed ',time_local)
# residual damp check
if res_old/res_new < damp and res_new > real_tol:
if verbose: print('WARNING: residual increases. res_old %g, res_new %g, real_tol %g'%(res_old,res_new,real_tol)) # warning (from tt toolbox)
# compute residual and step size
dx = tn.linalg.norm(solution_now-previous_solution)/tn.linalg.norm(solution_now)
if verbose:
print('\t\tdx = %g, res_now = %g, res_old = %g'%(dx,res_new,res_old))
max_dx = max(dx,max_dx)
max_res = max(max_res,res_old)
solution_now = tn.reshape(solution_now,[rx[k]*N[k],rx[k+1]])
# truncation
if k<d-1:
u, s, v = SVD(solution_now)
# print('\t\tTruncation of solution of shape',[rx[k]*N[k],rx[k+1]],' into u', u.shape, ' and v ',v.shape)
if trunc_norm == 'fro':
pass
else:
# search for a rank such that offeres small enough residuum
# TODO: binary search?
r = 0
for r in range(u.shape[1]-1,0,-1):
solution = u[:,:r] @ tn.diag(s[:r]) @ v[:r,:] # solution has the same size
# res = tn.linalg.norm(tn.reshape(local_product(Phis[k+1],Phis[k],a.cores[k],tn.reshape(solution,[rx[k],N[k],rx[k+1]]),solution_now.shape),[-1,1]) - rhs)/norm_rhs
if use_full:
res = tn.linalg.norm(B@tn.reshape(solution,[-1,1])-rhs)/norm_rhs
else:
# res = tn.linalg.norm(tn.reshape(local_product(Phis[k+1],Phis[k],a.cores[k],tn.reshape(solution,[rx[k],N[k],rx[k+1]]),solution_now.shape),[-1,1]) - rhs)/norm_rhs
res = tn.linalg.norm(Op.matvec(solution)-rhs)/norm_rhs
if res > max(real_tol*damp,res_new):
break
r += 1
r = min([r,tn.numel(s),rmax[k+1]])
else:
u, v = QR(solution_now)
# v = v.t()
r = u.shape[1]
s = tn.ones(r, dtype = dtype, device = device)
u = u[:,:r]
v = tn.diag(s[:r]) @ v[:r,:]
v = v.t()
if not last:
czA = local_product(Phiz[k+1], Phiz[k], a.cores[k], tn.reshape(u@v.t(),[rx[k],N[k],rx[k+1]]), [rx[k],N[k],rx[k+1]]) # shape rzp x N x rz
czy = tn.einsum('br,bnB,BR->rnR',Phiz_b[k],b.cores[k]*nrmsc,Phiz_b[k+1]) # shape is rzp x N x rz
cz_new = czy - czA
# print('Phiz_b',[plm.shape for plm in Phiz_b])
# print('czA',czA.shape,' czy',czy.shape)
# print('rz',rz)
# print('rx',rx)
uz,_,_ = SVD(tn.reshape(cz_new, [rz[k]*N[k],rz[k+1]]))
cz_new = uz[:,:min(kickrank,uz.shape[1])] # truncate to kickrank
if k < d-1: # extend cz_new with random elements
cz_new = tn.cat((cz_new,tn.randn((cz_new.shape[0],kick2), dtype = dtype, device = device)),1)
qz,_ = QR(cz_new)
rz[k+1] = qz.shape[1]
z_cores[k] = tn.reshape(qz,[rz[k],N[k],rz[k+1]])
if k < d-1:
if not last:
left_res = local_product(Phiz[k+1],Phis[k],a.cores[k],tn.reshape(u@v.t(),[rx[k],N[k],rx[k+1]]),[rx[k],N[k],rx[k+1]])
left_b = tn.einsum('br,bmB,BR->rmR',Phis_b[k],b.cores[k]*nrmsc,Phiz_b[k+1])
uk = left_b - left_res # rx_k x N_k x rz_k+1
u, Rmat = QR(tn.cat((u,tn.reshape(uk,[u.shape[0],-1])),1))
r_add = uk.shape[2]
v = tn.cat((v,tn.zeros([rx[k+1],r_add], dtype = dtype, device = device)), 1)
v = v @ Rmat.t()
r = u.shape[1]
# print(u.shape,v.shape,x_cores[k+1].shape)
v = tn.einsum('ji,jkl->ikl',v,x_cores[k+1])
# remove norm correction
nrmsc = nrmsc * normA[k] * normx[k] / normb[k]
norm_now = tn.linalg.norm(v)
if norm_now>0:
v = v / norm_now
else:
norm_now = 1.0
normx[k] *= norm_now
x_cores[k] = tn.reshape(u, [rx[k],N[k],r])
x_cores[k+1] = tn.reshape(v, [r,N[k+1],rx[k+2]])
rx[k+1] = r
# next phis with norm correction
Phis[k+1] = compute_phi_fwd_A(Phis[k], x_cores[k], a.cores[k], x_cores[k])
Phis_b[k+1] = compute_phi_fwd_rhs(Phis_b[k], b.cores[k],x_cores[k])
# ... and norms
norm = tn.linalg.norm(Phis[k+1])
norm = norm if norm>0 else 1.0
normA[k] = norm
Phis[k+1] /= norm
norm = tn.linalg.norm(Phis_b[k+1])
norm = norm if norm>0 else 1.0
normb[k] = norm
Phis_b[k+1] /= norm
# norm correction
nrmsc = nrmsc * normb[k] / ( normA[k] * normx[k] )
# next phiz
if not last:
Phiz[k+1] = compute_phi_fwd_A(Phiz[k], z_cores[k], a.cores[k], x_cores[k]) / normA[k]
Phiz_b[k+1] = compute_phi_fwd_rhs(Phiz_b[k], b.cores[k],z_cores[k]) / normb[k]
else:
x_cores[k] = tn.reshape(u@tn.diag(s[:r]) @ v[:r,:].t(),[rx[k],N[k],rx[k+1]])
if verbose:
print('Solution rank is',rx)
print('Maxres ',max_res)
tme_sweep = datetime.datetime.now()-tme_sweep
print('Time ',tme_sweep)
if last:
break
if max_res < eps:
last = True
if verbose:
time_total = datetime.datetime.now() - time_total
print()
print('Finished after' ,swp,' sweeps and ',time_total)
print()
normx = np.exp(np.sum(np.log(normx))/d)
for k in range(d):
x_cores[k] *= normx
return x_cores
def compute_phi_bck_A(Phi_now,core_left,core_A,core_right):
"""
Compute the phi backwards for the form dot(left,A @ right)
Args:
Phi_now (torch.tensor): The current phi. Has shape r1_k+1 x R_k+1 x r2_k+1
core_left (torch.tensor): the core on the left. Has shape r1_k x N_k x r1_k+1
core_A (torch.tensor): the core of the matrix. Has shape R_k x N_k x N_k x R_k
core_right (torch.tensor): the core to the right. Has shape r2_k x N_k x r2_k+1
Returns:
torch.tensor: The following phi (backward). Has shape r1_k x R_k x r2_k
"""
# Phip = tn.einsum('ijk,klm->ijlm',core_right,Phi_now)
# Phipp = tn.einsum('ijkl,abjk->ilba',Phip,core_A)
# Phi = tn.einsum('ijkl,akj->ila',Phipp,core_left)
Phi = oe.contract('LSR,lML,sMS,rMR->lsr',Phi_now,core_left,core_A,core_right)
return Phi
def compute_phi_fwd_A(Phi_now, core_left, core_A, core_right):
"""
Compute the phi forward for the form dot(left,A @ right)
Args:
Phi_now (torch.tensor): The current phi. Has shape r1_k x R_k x r2_k
core_left (torch.tensor): the core on the left. Has shape r1_k x N_k x r1_k+1
core_A (torch.tensor): the core of the matrix. Has shape R_k x N_k x N_k x R_k
core_right (torch.tensor): the core to the right. Has shape r2_k x N_k x r2_k+1
Returns:
torch.tensor: The following phi (backward). Has shape r1_k+1 x R_k+1 x r2_k+1
"""
# Psip = tn.einsum('ijk,kbc->ijbc', Phi_now, core_left) # shape is rk-1 x Rk-1 x Nk x rk
# Psipp = tn.einsum('ijkl,aijd->klad', core_A, Psip) # shape is nk x Rk x rk-1 x rk
# Phi_next= tn.einsum('ijk,jbid->kbd',core_right,Psipp) # shape is rk x Rk x rk
# tme1 = datetime.datetime.now()
# Phi_next = tn.einsum('lsr,lML,sMNS,rNR->LSR',Phi_now,core_left,core_A,core_right)
# tme1 = datetime.datetime.now() - tme1
# tme2 = datetime.datetime.now()
Phi_next = oe.contract('lsr,lML,sMS,rMR->LSR',Phi_now,core_left,core_A,core_right)
# tme2 = datetime.datetime.now() - tme2
# print('\n>>>>>>>>>>>>>>>>>>>>>>>>>>Time1 ',tme1,' time 2', tme2)
return Phi_next
def compute_phi_bck_rhs(Phi_now,core_b,core):
"""
Args:
Phi_now (torch.tensor): The current phi. Has shape rb_k+1 x r_k+1
core_b (torch.tensor): The current core of the rhs. Has shape rb_k x N_k x rb_k+1
core (torch.tensor): The current core. Has shape r_k x N_k x r_k+1
Returns:
torch.tensor: The backward phi corresponding to the rhs. Has shape rb_k x r_k
"""
#Phit = tn.einsum('ij,abj->iba',Phi_now,core_b)
#Phi = tn.einsum('ijk,kjc->ic',core,Phit)
Phi = oe.contract('BR,bnB,rnR->br',Phi_now,core_b,core)
return Phi
def compute_phi_fwd_rhs(Phi_now,core_rhs,core):
"""
Args:
Phi_now (torch.tensor): The current phi. Has shape rb_k x r_k
core_b (torch.tensor): The current core of the rhs. Has shape rb_k x N_k+1 x rb_k+1
core (torch.tensor): The current core. Has shape r_k x N_k x r_k+1
Returns:
torch.tensor: The forward computer phi for the rhs. Has shape rb_k+1 x r_k+1
"""
# tmp = tn.einsum('ij,jbc->ibc',Phi_now,core_rhs) # shape rk-1 x Nk x rbk
# Phi_next = tn.einsum('ijk,ijc->kc',core,tmp)
Phi_next = oe.contract('br,bnB,rnR->BR',Phi_now,core_rhs,core)
return Phi_next
| 20,269 | 41.494759 | 203 | py |
torchTT | torchTT-main/torchtt/_tt_base.py | """
This file implements the core TT class.
"""
import torch as tn
import torch.nn.functional as tnf
from torchtt._decomposition import mat_to_tt, to_tt, lr_orthogonal, round_tt, rl_orthogonal, QR, SVD, rank_chop
from torchtt._division import amen_divide
import numpy as np
import math
from torchtt._dmrg import dmrg_matvec
from torchtt._aux_ops import apply_mask, dense_matvec, bilinear_form_aux
from torchtt.errors import *
import torchtt._extras
import sys
class TT():
#cores : list[tn.tensor]
#""" The TT cores as a list of `torch.tensor` instances."""
@property
def is_ttm(self):
"""
Check whether the instance is a TT operator or not.
Returns:
bool: the flag.
"""
return self.__is_ttm
@property
def M(self):
"""
Return the "row" shape in case of TT matrices.
Raises:
IncompatibleTypes: The field is_ttm is defined only for TT matrices.
Returns:
list[int]: the shape.
"""
if not self.__is_ttm:
raise IncompatibleTypes("The field is_ttm is defined only for TT matrices.")
return self.__M.copy()
@property
def N(self):
"""
Return the shape of a tensor or the "column" shape of a TT operator.
Returns:
list[int]: the shape.
"""
return self.__N.copy()
@property
def R(self):
"""
The rank of the TT decomposition.
It's length should be ``len(R)==len(N)+1``.
Returns:
list[int]: the rank.
"""
return self.__R.copy()
def __init__(self, source, shape=None, eps=1e-10, rmax=sys.maxsize):
"""
Constructor of the TT class. Can convert full tensor in the TT-format (from `torch.tensor` or `numpy.array`).
In the case of tensor operators of full shape `M1 x ... Md x N1 x ... x Nd`, the shape must be specified as a list of tuples `[(M1,N1),...,(Md,Nd)]`.
A TT-object can also be computed from cores if the list of cores is passed as argument.
If None is provided, an empty tensor is created.
The TT decomposition of a tensor is
:math:`\\mathsf{x}=\\sum\\limits_{r_1...r_{d-1}=1}^{R_1,...,R_{d-1}} \\mathsf{x}^{(1)}_{1i_1r_1}\\cdots\\mathsf{x}^{(d)}_{r_{d-1}i_d1},`
where :math::`\\{\\mathsf{x}^{(k)}\\}_{k=1}^d` are the TT cores and ::math:`\\mathbf{R}=(1,R_1,...,R_{d-1},1)` is the TT rank.
Using the constructor, a TT decomposition of a tensor can be computed. The TT cores are stored as a list in ``torchtt.TT.cores``.
This class implements basic operators such as `+,-,*,/,@,**` (add, subtract, elementwise multiplication, elementwise division, matrix vector product and Kronecker product) between TT instances.
The `examples\` folder server as a tutorial for all the possibilities of the toolbox.
Examples:
.. code-block:: python
import torchtt
import torch
x = torch.reshape(torch.arange(0,128,dtype = torch.float64),[8,4,4])
xtt = torchtt.TT(x)
ytt = torchtt.TT(torch.squeeze(x),[8,4,4])
# create a TT matrix
A = torch.reshape(torch.arange(0,20160,dtype = torch.float64),[3,5,7,4,6,8])
Att = torchtt.TT(A,[(3,4),(5,6),(7,8)])
print(Att)
Args:
source (torch.tensor ot list[torch.tensor] or numpy.array or None): the input tensor in full format or the cores. If a `torch.tensor` or `numpy.array` is provided
shape (list[int] or list[tuple[int]], optional): the shape (if it differs from the one provided). For the TT-matrix case is mandatory. Defaults to None.
eps (float, optional): tolerance of the TT approximation. Defaults to 1e-10.
rmax (int or list[int], optional): maximum rank (either a list of integer or an integer). Defaults to the maximum possible integer.
Raises:
RankMismatch: Ranks of the given cores do not match (change the spaces of the cores).
InvalidArguments: Invalid input: TT-cores have to be either 4d or 3d.
InvalidArguments: Check the ranks and the mode size.
NotImplementedError: Function only implemented for torch tensors, numpy arrays, list of cores as torch tensors and None
"""
if source is None:
# empty TT
self.cores = []
self.__M = []
self.__N = []
self.__R = [1,1]
self.__is_ttm = False
elif isinstance(source, list):
# tt cores were passed directly
# check if sizes are consistent
prev = 1
N = []
M = []
R = [source[0].shape[0]]
d = len(source)
for i in range(len(source)):
s = source[i].shape
if s[0] != R[-1]:
raise RankMismatch("Ranks of the given cores do not match: for core number %d previous rank is %d and and current rank is %d."%(i,R[-1],s[0]))
if len(s) == 3:
R.append(s[2])
N.append(s[1])
elif len(s)==4:
R.append(s[3])
M.append(s[1])
N.append(s[2])
else:
raise InvalidArguments("Invalid input: TT-cores have to be either 4d or 3d.")
if len(N) != d or len(R) != d+1 or R[0] != 1 or R[-1] != 1 or (len(M)!=0 and len(M)!=len(N)) :
raise InvalidArguments("Check the ranks and the mode size.")
self.cores = source
self.__R = R
self.__N = N
if len(M) == len(N):
self.__M = M
self.__is_ttm = True
else:
self.__is_ttm = False
self.shape = [ (m,n) for m,n in zip(self.__M,self.__N) ] if self.__is_ttm else [n for n in self.N]
elif tn.is_tensor(source):
if shape == None:
# no size is given. Deduce it from the tensor. No TT-matrix in this case.
self.__N = list(source.shape)
if len(self.__N)>1:
self.cores, self.__R = to_tt(source,self.__N,eps,rmax,is_sparse=False)
else:
self.cores = [tn.reshape(source,[1,self.__N[0],1])]
self.__R = [1,1]
self.__is_ttm = False
elif isinstance(shape,list) and isinstance(shape[0],tuple):
# if the size contains tuples, we have a TT-matrix.
if len(shape) > 1:
self.__M = [s[0] for s in shape]
self.__N = [s[1] for s in shape]
self.cores, self.__R = mat_to_tt(source, self.__M, self.__N, eps, rmax)
self.__is_ttm = True
else:
self.__M = [shape[0][0]]
self.__N = [shape[0][1]]
self.cores, self.__R = [tn.reshape(source,[1,shape[0][0],shape[0][1],1])], [1,1]
self.__is_ttm = True
else:
# TT-decomposition with prescribed size
# perform reshape first
self.__N = shape
self.cores, self.__R = to_tt(tn.reshape(source,shape),self.__N,eps,rmax,is_sparse=False)
self.__is_ttm = False
self.shape = [ (m,n) for m,n in zip(self.__M,self.__N) ] if self.__is_ttm else [n for n in self.N]
elif isinstance(source, np.ndarray):
source = tn.tensor(source)
if shape == None:
# no size is given. Deduce it from the tensor. No TT-matrix in this case.
self.__N = list(source.shape)
if len(self.__N)>1:
self.cores, self.__R = to_tt(source,self.__N,eps,rmax,is_sparse=False)
else:
self.cores = [tn.reshape(source,[1,self.__N[0],1])]
self.__R = [1,1]
self.__is_ttm = False
elif isinstance(shape,list) and isinstance(shape[0],tuple):
# if the size contains tuples, we have a TT-matrix.
self.__M = [s[0] for s in shape]
self.__N = [s[1] for s in shape]
self.cores, self.__R = mat_to_tt(source, self.__M, self.__N, eps, rmax)
self.__is_ttm = True
else:
# TT-decomposition with prescribed size
# perform reshape first
self.__N = shape
self.cores, self.__R = to_tt(tn.reshape(source,shape),self.__N,eps,rmax,is_sparse=False)
self.__is_ttm = False
self.shape = [ (m,n) for m,n in zip(self.__M,self.__N) ] if self.__is_ttm else [n for n in self.N]
else:
raise NotImplementedError("Function only implemented for torch tensors, numpy arrays, list of cores as torch tensors and None.")
def cuda(self, device = None):
"""
Return a torchtt.TT object on the CUDA device by cloning all the cores on the GPU.
Args:
device (torch.device, optional): The CUDA device (None for CPU). Defaults to None.
Returns:
torchtt.TT: The TT-object. The TT-cores are on CUDA.
"""
t = TT([ c.cuda(device) for c in self.cores])
return t
def cpu(self):
"""
Retrive the cores from the GPU.
Returns:
torchtt.TT: The TT-object on CPU.
"""
return TT([ c.cpu() for c in self.cores])
def is_cuda(self):
"""
Return True if the tensor is on GPU.
Returns:
bool: Is the torchtt.TT on GPU or not.
"""
return all([c.is_cuda for c in self.core])
def to(self, device = None, dtype = None):
"""
Moves the TT instance to the given device with the given dtype.
Args:
device (torch.device, optional): The desired device. If none is provided, the device is the CPU. Defaults to None.
dtype (torch.dtype, optional): The desired dtype (torch.float64, torch.float32,...). If None is provided the dtype is not changed. Defaults to None.
"""
return TT( [ c.to(device=device,dtype=dtype) for c in self.cores])
def detach(self):
"""
Detaches the TT tensor. Similar to ``torch.tensor.detach()``.
Returns:
torchtt.TT: the detached tensor.
"""
return TT([c.detach() for c in self.cores])
def clone(self):
"""
Clones the torchtt.TT instance. Similar to torch.tensor.clone().
Returns:
torchtt.TT: the cloned TT object.
"""
return TT([c.clone() for c in self.cores])
def full(self):
"""
Return the full tensor.
In case of a TTM, the result has the shape ``M1 x M2 x ... x Md x N1 x N2 x ... x Nd``.
Returns:
torch.tensor: the full tensor.
"""
if self.__is_ttm:
# the case of tt-matrix
tfull = self.cores[0][0,:,:,:]
for i in range(1,len(self.cores)-1) :
tfull = tn.einsum('...i,ijkl->...jkl',tfull,self.cores[i])
if len(self.__N) != 1:
tfull = tn.einsum('...i,ijk->...jk',tfull,self.cores[-1][:,:,:,0])
tfull = tn.permute(tfull,list(np.arange(len(self.__N))*2)+list(np.arange(len(self.N))*2+1))
else:
tfull = tfull[:,:,0]
else:
# the case of a normal tt
tfull = self.cores[0][0,:,:]
for i in range(1,len(self.cores)-1) :
tfull = tn.einsum('...i,ijk->...jk',tfull,self.cores[i])
if len(self.__N) != 1:
tfull = tn.einsum('...i,ij->...j',tfull,self.cores[-1][:,:,0])
else:
tfull = tn.squeeze(tfull)
return tfull
def numpy(self):
"""
Return the full tensor as a numpy.array.
In case of a TTM, the result has the shape ``M1 x M2 x ... x Md x N1 x N2 x ... x Nd``.
If it is involved in an AD graph, an error will occur.
Returns:
numpy.array: the full tensor in numpy.
"""
return self.full().cpu().numpy()
def __repr__(self):
"""
Show the information as a string
Returns:
string: the string representation of a torchtt.TT
"""
if self.__is_ttm:
output = 'TT-matrix'
output += ' with sizes and ranks:\n'
output += 'M = ' + str(self.__M) + '\nN = ' + str(self.__N) + '\n'
output += 'R = ' + str(self.__R) + '\n'
output += 'Device: '+str(self.cores[0].device)+', dtype: '+str(self.cores[0].dtype)+'\n'
entries = sum([tn.numel(c) for c in self.cores])
output += '#entries ' + str(entries) +' compression ' + str(entries/np.prod(np.array(self.__N,dtype=np.float64)*np.array(self.__M,dtype=np.float64))) + '\n'
else:
output = 'TT'
output += ' with sizes and ranks:\n'
output += 'N = ' + str(self.__N) + '\n'
output += 'R = ' + str(self.__R) + '\n\n'
output += 'Device: '+str(self.cores[0].device)+', dtype: '+str(self.cores[0].dtype)+'\n'
entries = sum([tn.numel(c) for c in self.cores])
output += '#entries ' + str(entries) +' compression ' + str(entries/np.prod(np.array(self.__N,dtype=np.float64))) + '\n'
return output
def __radd__(self,other):
"""
Addition in the TT format. Implements the "+" operator. This function is called in the case a non-torchtt.TT object is added to the left.
Args:
other (float | int | torch.tensor): the first operand. If a `torch.tensor` is provided, it must have 1 element.
Returns:
torchtt.TT: the result.
"""
return self.__add__(other)
def __add__(self,other):
"""
Addition in the TT format. Implements the "+" operator. The following type pairs are supported:
- both operands are TT-tensors.
- both operands are TT-matrices.
- first operand is a TT-tensor or a TT-matrix and the second is a scalar (either torch.tensor scalar or int or float).
The broadcasting rules from `torch` apply here.
Args:
other (torchtt.TT | float | int | torch.tensor): the second operand. If a `torch.tensor` is provided, it must have 1 element.
Raises:
ShapeMismatch: Dimension mismatch.
IncompatibleTypes: Addition between a tensor and a matrix is not defined.
Returns:
torchtt.TT: the result.
"""
if np.isscalar(other) or ( tn.is_tensor(other) and tn.numel(other) == 1):
# the second term is a scalar
cores = []
for i in range(len(self.__N)):
if self.__is_ttm:
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0,0 , 0 if i==0 else self.R[i],0)
othr = tn.ones([1,1,1,1],dtype=self.cores[i].dtype) * (other if i ==0 else 1)
else:
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
othr = tn.ones([1,1,1],dtype=self.cores[i].dtype) * (other if i ==0 else 1)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(othr,pad2))
result = TT(cores)
elif isinstance(other,TT):
#second term is TT object
if self.__is_ttm and other.is_ttm:
# both are TT-matrices
if self.__M != self.M or self.__N != self.N:
raise ShapeMismatch("Shapes are incompatible: first operand is %s x %s, second operand is %s x %s."%(str(self.M), str(self.N), str(other.M), str(other.N)))
cores = []
for i in range(len(self.__N)):
pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1], 0,0 , 0,0 , 0,0 if i==0 else other.R[i])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(other.cores[i],pad2))
result = TT(cores)
elif self.__is_ttm==False and other.is_ttm==False:
# normal tensors in TT format.
if self.__N == other.N:
cores = []
for i in range(len(self.__N)):
pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1] , 0,0 , 0,0 if i==0 else other.R[i])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(other.cores[i],pad2))
else:
if len(self.__N) < len(other.N):
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
cores = []
for i in range(len(self.cores)-len(other.cores)):
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(tn.ones((1,self.__N[i],1), device = self.cores[i].device),pad2))
for k,i in zip(range(len(other.cores)), range(len(self.cores)-len(other.cores), len(self.cores))):
if other.N[k] == self.__N[i]:
pad1 = (0,0 if i == len(self.__N)-1 else other.R[k+1] , 0,0 , 0,0 if i==0 else other.R[k])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(other.cores[k],pad2))
elif other.N[k] == 1:
pad1 = (0,0 if i == len(self.__N)-1 else other.R[k+1] , 0,0 , 0,0 if i==0 else other.R[k])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(tn.tile(other.cores[k],(1,self.__N[i],1)),pad2))
else:
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
result = TT(cores)
else:
# incompatible types
raise IncompatibleTypes('Addition between a tensor and a matrix is not defined.')
else:
InvalidArguments('Second term is incompatible.')
return result
def __rsub__(self,other):
"""
Subtract 2 tensors in the TT format. Implements the "-" operator.
Args:
other (torchtt.TT | float | int | torch.tensor): the first operand. If a `torch.tensor` is provided, it must have 1 element.
Returns:
torchtt.TT: the result.
"""
T = self.__sub__(other)
T.cores[0] = -T.cores[0]
return T
def __sub__(self,other):
"""
Subtract 2 tensors in the TT format. Implements the "-" operator.
Possible second operands are: torchtt.TT, float, int, torch.tensor with 1 element.
Broadcasting rules from `torch` apply for this operation as well.
Args:
other (torchtt.TT | float | int | torch.tensor): the second operand. If a `torch.tensor` is provided, it must have 1 element.
Raises:
ShapeMismatch: Both dimensions of the TT matrix should be equal.
ShapeMismatch: Dimension mismatch.
IncompatibleTypes: Addition between a tensor and a matrix is not defined.
InvalidArguments: Second term is incompatible (must be either torchtt.TT or int or float or torch.tensor with 1 element).
Returns:
torchtt.TT: the result.
"""
if np.isscalar(other) or ( tn.is_tensor(other) and other.shape == []):
# the second term is a scalar
cores = []
for i in range(len(self.__N)):
if self.__is_ttm:
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0,0 , 0 if i==0 else self.R[i],0)
othr = tn.ones([1,1,1,1],dtype=self.cores[i].dtype) * (-other if i ==0 else 1)
else:
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
othr = tn.ones([1,1,1],dtype=self.cores[i].dtype) * (-other if i ==0 else 1)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(othr,pad2))
result = TT(cores)
elif isinstance(other,TT):
#second term is TT object
if self.__is_ttm and other.is_ttm:
# both are TT-matrices
if self.__M != self.M or self.__N != self.N:
raise ShapeMismatch("Shapes are incompatible: first operand is %s x %s, second operand is %s x %s."%(str(self.M), str(self.N), str(other.M), str(other.N)))
cores = []
for i in range(len(self.__N)):
pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1] , 0,0 , 0,0 , 0,0 if i==0 else other.R[i])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(-other.cores[i] if i==0 else other.cores[i],pad2))
result = TT(cores)
elif self.__is_ttm==False and other.is_ttm==False:
# normal tensors in TT format.
if self.__N == other.N:
cores = []
for i in range(len(self.__N)):
pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1] , 0,0 , 0,0 if i==0 else other.R[i])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i], pad1)+tnf.pad(-other.cores[i] if i==0 else other.cores[i],pad2))
else:
if len(self.__N) < len(other.N):
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
cores = []
for i in range(len(self.cores)-len(other.cores)):
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad((-1 if i==0 else 1)*tn.ones((1,self.__N[i],1), device = self.cores[i].device),pad2))
for k,i in zip(range(len(other.cores)), range(len(self.cores)-len(other.cores), len(self.cores))):
if other.N[k] == self.__N[i]:
pad1 = (0,0 if i == len(self.__N)-1 else other.R[k+1] , 0,0 , 0,0 if i==0 else other.R[k])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(-other.cores[k] if i==0 else other.cores[k],pad2))
elif other.N[k] == 1:
pad1 = (0,0 if i == len(self.__N)-1 else other.R[k+1] , 0,0 , 0,0 if i==0 else other.R[k])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(tn.tile(-other.cores[k] if i==0 else other.cores[k],(1,self.__N[i],1)),pad2))
else:
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
result = TT(cores)
else:
# incompatible types
raise IncompatibleTypes('Addition between a tensor and a matrix is not defined.')
else:
InvalidArguments('Second term is incompatible (must be either torchtt.TT or int or float or torch.tensor with 1 element).')
return result
def __rmul__(self,other):
"""
Elementwise multiplication in the TT format.
This implements the "*" operator when the left operand is not torchtt.TT.
Following are supported:
* TT tensor and TT tensor
* TT matrix and TT matrix
* TT tensor and scalar(int, float or torch.tensor scalar)
Args:
other (torchtt.TT | float | int | torch.tensor): the first operand. If a `torch.tensor` is provided, it must have 1 element.
Raises:
ShapeMismatch: Shapes must be equal.
IncompatibleTypes: Second operand must be the same type as the fisrt (both should be either TT matrices or TT tensors).
InvalidArguments: Second operand must be of type: torchtt.TT, float, int of torch.tensor.
Returns:
torchtt.TT: [description]
"""
return self.__mul__(other)
def __mul__(self,other):
"""
Elementwise multiplication in the TT format.
This implements the "*" operator.
Following are supported:
- TT tensor and TT tensor
- TT matrix and TT matrix
- TT tensor and scalar(int, float or torch.tensor scalar)
The broadcasting rules are the same as in torch (see [here](https://pytorch.org/docs/stable/notes/broadcasting.html)).
Args:
other (torchtt.TT | float | int | torch.tensor): the second operand. If a `torch.tensor` is provided, it must have 1 element.
Raises:
ShapeMismatch: Shapes are incompatible (see the broadcasting rules).
IncompatibleTypes: Second operand must be the same type as the fisrt (both should be either TT matrices or TT tensors).
InvalidArguments: Second operand must be of type: torchtt.TT, float, int of torch.tensor.
Returns:
torchtt.TT: the result.
"""
# elementwise multiplication
if isinstance(other, TT):
if self.__is_ttm and other.is_ttm:
if self.__N == other.N and self.__M == other.M:
# raise ShapeMismatch('Shapes must be equal.')
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('aijb,mijn->amijbn',self.cores[i],other.cores[i]),[self.__R[i]*other.R[i],self.__M[i],self.__N[i],self.R[i+1]*other.R[i+1]])
cores_new.append(core)
else:
raise ShapeMismatch("Shapes are incompatible: first operand is %s x %s, second operand is %s x %s."%(str(self.M), str(self.N), str(other.M), str(other.N)))
# if len(self.__N) < len(other.N):
# raise ShapeMismatch("Shapes are incompatible: first operand is %s x %s, second operand is %s x %s."%(str(self.M), str(self.N), str(other.M), str(other.N)))
# cores_new = []
# raise NotImplementedError("Not yet implemented.")
elif self.__is_ttm == False and other.is_ttm == False:
# broadcasting rul;es have to be applied. Sperate if else to make the non-broadcasting case the fastest.
if self.__N == other.N:
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('aib,min->amibn',self.cores[i],other.cores[i]),[self.__R[i]*other.R[i],self.__N[i],self.R[i+1]*other.R[i+1]])
cores_new.append(core)
else:
if len(self.__N) < len(other.N):
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
cores_new = []
for i in range(len(self.cores)-len(other.cores)):
cores_new.append(self.cores[i]*1)
for k,i in zip(range(len(other.cores)), range(len(self.cores)-len(other.cores), len(self.cores))):
if other.N[k] == self.__N[i]:
core = tn.reshape(tn.einsum('aib,min->amibn',self.cores[i],other.cores[k]),[self.__R[i]*other.R[k],self.__N[i],self.R[i+1]*other.R[k+1]])
elif other.N[k] == 1:
core = tn.reshape(tn.einsum('aib,mn->amibn',self.cores[i],other.cores[k][:,0,:]),[self.__R[i]*other.R[k],self.__N[i],self.R[i+1]*other.R[k+1]])
else:
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
cores_new.append(core)
else:
raise IncompatibleTypes('Second operand must be the same type as the fisrt (both should be either TT matrices or TT tensors).')
result = TT(cores_new)
elif isinstance(other,int) or isinstance(other,float) or isinstance(other,tn.tensor):
if other != 0:
cores_new = [c+0 for c in self.cores]
cores_new[0] *= other
result = TT(cores_new)
else:
result = TT([tn.zeros((1,self.M[i],self.N[i],1) if self.is_ttm else (1,self.N[i],1), device = self.cores[0].device, dtype = self.cores[0].dtype) for i in range(len(self.N))])
# result = zeros([(m,n) for m,n in zip(self.M,self.N)] if self.is_ttm else self.N, device=self.cores[0].device)
else:
raise InvalidArguments('Second operand must be of type: TT, float, int of tensorflow Tensor.')
return result
def __matmul__(self,other):
"""
Matrix-vector multiplication in TT-format
Supported operands:
- TT-matrix @ TT-tensor -> TT-tensor: y_i = A_ij * x_j
- TT-tensor @ TT-matrix -> TT-tensor: y_j = x_i * A_ij
- TT-matrix @ TT-matrix -> TT-matrix: Y_ij = A_ik * B_kj
- TT-matrix @ torch.tensor -> torch.tensor: y_bi = A_ij * x_bj
In the last case, the multiplication is performed along the last modes and a full torch.tensor is returned.
Args:
other (torchtt.TT | torch.tensor): the second operand.
Raises:
ShapeMismatch: Shapes do not match.
InvalidArguments: Wrong arguments.
Returns:
torchtt.TT | torch.tensor: the result. Can be full tensor if the second operand is full tensor.
"""
if self.__is_ttm and tn.is_tensor(other):
if self.__N != list(other.shape)[-len(self.N):]:
raise ShapeMismatch("Shapes do not match.")
result = dense_matvec(self.cores,other)
return result
elif self.__is_ttm and other.is_ttm == False:
# matrix-vector multiplication
if self.__N != other.N:
raise ShapeMismatch("Shapes do not match.")
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('ijkl,mkp->imjlp',self.cores[i],other.cores[i]),[self.cores[i].shape[0]*other.cores[i].shape[0],self.cores[i].shape[1],self.cores[i].shape[3]*other.cores[i].shape[2]])
cores_new.append(core)
elif self.__is_ttm and other.is_ttm:
# multiplication between 2 TT-matrices
if self.__N != other.M:
raise ShapeMismatch("Shapes do not match.")
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('ijkl,mknp->imjnlp',self.cores[i],other.cores[i]),[self.cores[i].shape[0]*other.cores[i].shape[0],self.cores[i].shape[1],other.cores[i].shape[2],self.cores[i].shape[3]*other.cores[i].shape[3]])
cores_new.append(core)
elif self.__is_ttm == False and other.is_ttm:
# vector-matrix multiplication
if self.__N != other.M:
raise ShapeMismatch("Shapes do not match.")
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('mkp,ikjl->imjlp',self.cores[i],other.cores[i]),[self.cores[i].shape[0]*other.cores[i].shape[0],other.cores[i].shape[2],self.cores[i].shape[2]*other.cores[i].shape[3]])
cores_new.append(core)
else:
raise InvalidArguments("Wrong arguments.")
result = TT(cores_new)
return result
def fast_matvec(self,other, eps = 1e-12, initial = None, nswp = 20, verb = False, use_cpp = True):
"""
Fast matrix vector multiplication A@x using DMRG iterations. Faster than traditional matvec + rounding.
Args:
other (torchtt.TT): the TT tensor.
eps (float, optional): relative accuracy for DMRG. Defaults to 1e-12.
initial (None|torchtt.TT, optional): an approximation of the product (None means random initial guess). Defaults to None.
nswp (int, optional): number of DMRG iterations. Defaults to 40.
verb (bool, optional): show info for debug. Defaults to False.
use_cpp (bool, optional): use the C++ implementation if available. Defaults to True.
Raises:
InvalidArguments: Second operand has to be TT object.
IncompatibleTypes: First operand should be a TT matrix and second a TT vector.
Returns:
torchtt.TT: the result.
"""
if not isinstance(other,TT):
raise InvalidArguments('Second operand has to be TT object.')
if not self.__is_ttm or other.is_ttm:
raise IncompatibleTypes('First operand should be a TT matrix and second a TT vector.')
return dmrg_matvec(self, other, y0 = initial, eps = eps, verb = verb, nswp = nswp, use_cpp = use_cpp)
def apply_mask(self,indices):
"""
Evaluate the tensor on the given index list.
Examples:
.. code-block:: python
x = torchtt.random([10,12,14],[1,4,5,1])
indices = torch.tensor([[0,0,0],[1,2,3],[1,1,1]])
val = x.apply_mask(indices)
Args:
indices (list[list[int]]): the index list where the tensor should be evaluated. Length is M.
Returns:
torch.tensor: the values of the tensor
"""
result = apply_mask(self.cores,self.__R,indices)
return result
def __truediv__(self,other):
"""
This function implements the "/" operator.
This operation is performed using the AMEN solver. The number of sweeps and rthe relative accuracy are fixed.
For most cases it is sufficient but sometimes it can fail.
Check the function torchtt.elementwise_divide() if you want to change the arguments of the AMEN solver.
Args:
other (torchtt.TT | float | int | torch.tensor): the second operand. If a `torch.tensor` is provided, it must have 1 element.
Raises:
IncompatibleTypes: Operands should be either TT or TTM.
ShapeMismatch: Both operands should have the same shape.
InvalidArguments: Operand not permitted. A TT-object can be divided only with scalars.
Returns:
torchtt.TT: the result.
"""
if isinstance(other,int) or isinstance(other,float) or tn.is_tensor(other):
# divide by a scalar
cores_new = self.cores.copy()
cores_new[0] /= other
result = TT(cores_new)
elif isinstance(other,TT):
if self.__is_ttm != other.is_ttm:
raise IncompatibleTypes('Operands should be either TT or TTM.')
if self.__N != other.N or (self.__is_ttm and self.__M != other.M):
raise ShapeMismatch("Both operands should have the same shape.")
result = TT(amen_divide(other,self,50,None,1e-12,500,verbose=False))
else:
raise InvalidArguments('Operand not permitted. A TT-object can be divided only with scalars.')
return result
def __rtruediv__(self,other):
"""
Right true division. this function is called when a non TT object is divided by a TT object.
This operation is performed using the AMEN solver. The number of sweeps and rthe relative accuracy are fixed.
For most cases it is sufficient but sometimes it can fail.
Check the function torchtt.elementwise_divide() if you want to change the arguments of the AMEN solver.
Example:
.. code-block:: python
z = 1.0/x # x is TT instance
Args:
other (torchtt.TT | float | int | torch.tensor): the first operand. If a `torch.tensor` is provided, it must have 1 element.
Raises:
InvalidArguments: The first operand must be int, float or 1d torch.tensor.
Returns:
torchtt.TT: the result.
"""
if isinstance(other,int) or isinstance(other,float) or ( tn.is_tensor(other) and other.numel()==1):
o = TT([tn.ones((1,n,1),dtype=self.cores[0].dtype,device = self.cores[0].device) for n in self.__N])# ones(self.__N,dtype=self.cores[0].dtype,device = self.cores[0].device)
o.cores[0] *= other
cores_new = amen_divide(self,o,50,None,1e-12,500,verbose=False)
else:
raise InvalidArguments("The first operand must be int, float or 1d torch.tensor.")
return TT(cores_new)
def t(self):
"""
Returns the transpose of a given TT matrix.
Returns:
torchtt.TT: the transpose.
Raises:
InvalidArguments: Has to be TT matrix.
"""
if not self.__is_ttm:
raise InvalidArguments('Has to be TT matrix.')
cores_new = [tn.permute(c,[0,2,1,3]) for c in self.cores]
return TT(cores_new)
def norm(self,squared=False):
"""
Computes the frobenius norm of a TT object.
Args:
squared (bool, optional): returns the square of the norm if True. Defaults to False.
Returns:
torch.tensor: the norm.
"""
if any([c.requires_grad or c.grad_fn != None for c in self.cores]):
norm = tn.tensor([[1.0]],dtype = self.cores[0].dtype, device=self.cores[0].device)
if self.__is_ttm:
for i in range(len(self.__N)):
norm = tn.einsum('ab,aijm,bijn->mn',norm, self.cores[i], tn.conj(self.cores[i]))
norm = tn.squeeze(norm)
else:
for i in range(len(self.__N)):
norm = tn.einsum('ab,aim,bin->mn',norm, self.cores[i], tn.conj(self.cores[i]))
norm = tn.squeeze(norm)
if squared:
return norm
else:
return tn.sqrt(tn.abs(norm))
else:
d = len(self.cores)
core_now = self.cores[0]
for i in range(d-1):
if self.__is_ttm:
mode_shape = [core_now.shape[1],core_now.shape[2]]
core_now = tn.reshape(core_now,[core_now.shape[0]*core_now.shape[1]*core_now.shape[2],-1])
else:
mode_shape = [core_now.shape[1]]
core_now = tn.reshape(core_now,[core_now.shape[0]*core_now.shape[1],-1])
# perform QR
Qmat, Rmat = QR(core_now)
# take next core
core_next = self.cores[i+1]
shape_next = list(core_next.shape[1:])
core_next = tn.reshape(core_next,[core_next.shape[0],-1])
core_next = Rmat @ core_next
core_next = tn.reshape(core_next,[Qmat.shape[1]]+shape_next)
# update the cores
core_now = core_next
if squared:
return tn.linalg.norm(core_next)**2
else:
return tn.linalg.norm(core_next)
def sum(self,index = None):
"""
Contracts a tensor in the TT format along the given indices and retuyrns the resulting tensor in the TT format.
If no index list is given, the sum over all indices is performed.
Examples:
.. code-block:: python
a = torchtt.ones([3,4,5,6,7])
print(a.sum())
print(a.sum([0,2,4]))
print(a.sum([1,2]))
print(a.sum([0,1,2,3,4]))
Args:
index (int | list[int] | None, optional): the indices along which the summation is performed. None selects all of them. Defaults to None.
Raises:
InvalidArguments: Invalid index.
Returns:
torchtt.TT/torch.tensor: the result.
"""
if index != None and isinstance(index,int):
index = [index]
if not isinstance(index,list) and index != None:
raise InvalidArguments('Invalid index.')
if index == None:
# the case we need to sum over all modes
if self.__is_ttm:
C = tn.reduce_sum(self.cores[0],[0,1,2])
for i in range(1,len(self.__N)):
C = tn.sum(tn.einsum('i,ijkl->jkl',C,self.cores[i]),[0,1])
S = tn.sum(C)
else:
C = tn.sum(self.cores[0],[0,1])
for i in range(1,len(self.__N)):
C = tn.sum(tn.einsum('i,ijk->jk',C,self.cores[i]),0)
S = tn.sum(C)
else:
# we return the TT-tensor with summed indices
cores = []
if self.__is_ttm:
tmp = [1,2]
else:
tmp = [1]
for i in range(len(self.__N)):
if i in index:
C = tn.sum(self.cores[i], tmp, keepdim = True)
cores.append(C)
else:
cores.append(self.cores[i])
S = TT(cores)
S.reduce_dims()
if len(S.cores)==1 and tn.numel(S.cores[0])==1:
S = tn.squeeze(S.cores[0])
return S
def to_ttm(self):
"""
Converts a TT-tensor to the TT-matrix format. In the tensor has the shape N1 x ... x Nd, the result has the shape
N1 x ... x Nd x 1 x ... x 1.
Returns:
torch.TT: the result
"""
cores_new = [tn.reshape(c,(c.shape[0],c.shape[1],1,c.shape[2])) for c in self.cores]
return TT(cores_new)
def reduce_dims(self, exclude = []):
"""
Reduces the size 1 modes of the TT-object.
At least one mode should be larger than 1.
Args:
exclude (list, optional): Indices to exclude. Defaults to [].
"""
# TODO: implement a version that reduces the rank also. by spliting the cores with modes 1 into 2 using the SVD.
if self.__is_ttm:
cores_new = []
for i in range(len(self.__N)):
if self.cores[i].shape[1] == 1 and self.cores[i].shape[2] == 1 and not i in exclude:
if self.cores[i].shape[0] > self.cores[i].shape[3] or i == len(self.__N)-1:
# multiply to the left
if len(cores_new) > 0:
cores_new[-1] = tn.einsum('ijok,kl->ijol',cores_new[-1], self.cores[i][:,0,0,:])
else:
# there is no core to the left. Multiply right.
if i != len(self.__N)-1:
self.cores[i+1] = tn.einsum('ij,jkml->ikml', self.cores[i][:,0,0,:],self.cores[i+1])
else:
cores_new.append(self.cores[i])
else:
# multiply to the right. Set the carry
self.cores[i+1] = tn.einsum('ij,jkml->ikml',self.cores[i][:,0,0,:],self.cores[i+1])
else:
cores_new.append(self.cores[i])
# update the cores and ranks and shape
self.__N = []
self.__M = []
self.__R = [1]
for i in range(len(cores_new)):
self.__N.append(cores_new[i].shape[2])
self.__M.append(cores_new[i].shape[1])
self.__R.append(cores_new[i].shape[3])
self.cores = cores_new
else:
cores_new = []
for i in range(len(self.__N)):
if self.cores[i].shape[1] == 1 and not i in exclude:
if self.cores[i].shape[0] > self.cores[i].shape[2] or i == len(self.__N)-1:
# multiply to the left
if len(cores_new) > 0:
cores_new[-1] = tn.einsum('ijk,kl->ijl',cores_new[-1], self.cores[i][:,0,:])
else:
# there is no core to the left. Multiply right.
if i != len(self.__N)-1:
self.cores[i+1] = tn.einsum('ij,jkl->ikl', self.cores[i][:,0,:],self.cores[i+1])
else:
cores_new.append(self.cores[i])
else:
# multiply to the right. Set the carry
self.cores[i+1] = tn.einsum('ij,jkl->ikl',self.cores[i][:,0,:],self.cores[i+1])
else:
cores_new.append(self.cores[i])
# update the cores and ranks and shape
self.__N = []
self.__R = [1]
for i in range(len(cores_new)):
self.__N.append(cores_new[i].shape[1])
self.__R.append(cores_new[i].shape[2])
self.cores = cores_new
self.shape = [ (m,n) for m,n in zip(self.__M,self.__N) ] if self.__is_ttm else [n for n in self.N]
def __getitem__(self,index):
"""
Performs slicing of a TT object.
Both TT matrix and TT tensor are supported.
Similar to pytorch or numpy slicing.
Args:
index (tuple[slice] | tuple[int] | int | Ellipsis | slice): the slicing.
Raises:
NotImplementedError: Ellipsis are not supported.
InvalidArguments: Slice size is invalid.
InvalidArguments: Slice carguments not valid. They have to be either int, slice | None.
InvalidArguments: Invalid slice. Tensor is not 1d.
Returns:
torchtt.TT | torch.tensor: the result. If all the indices are fixed, a scalar torch.tensor is returned otherwise a torchtt.TT.
"""
# slicing function
##### TODO: include Ellipsis support for tensor operators.
# if a slice containg integers is passed, an element is returned
# if ranged slices are used, a TT-object has to be returned.
exclude = []
if isinstance(index,tuple):
# check if more than two Ellipsis are to be found.
if index.count(Ellipsis) > 1 or (self.is_ttm and index.count(Ellipsis) > 0):
raise NotImplementedError('Ellipsis are not supported more than once of for tensor operators.')
if self.__is_ttm:
cores_new = []
k=0
for i in range(len(index)//2):
idx1 = index[i]
idx2 = index[i+len(index)//2]
if isinstance(idx1,slice) and isinstance(idx2,slice):
cores_new.append(self.cores[k][:,idx1,idx2,:])
k+=1
elif idx1==None and idx2==None:
# extend the tensor
tmp = tn.eye(cores_new[-1].shape[-1] if len(cores_new)!=0 else 1, device = self.cores[0].device, dtype = self.cores[0].dtype)[:,None,None,:]
cores_new.append(tmp)
exclude.append(i)
elif isinstance(idx1, int) and isinstance(idx2,int):
cores_new.append(tn.reshape(self.cores[k][:,idx1,idx2,:],[self.__R[k],1,1,self.R[k+1]]))
k+=1
else:
raise InvalidArguments("Slice carguments not valid. They have to be either int, slice or None.")
if k<len(self.cores):
raise InvalidArguments('Slice size is invalid.')
else:
# if len(index) != len(self.__N):
# raise InvalidArguments('Slice size is invalid.')
num_none = sum([i is None for i in index])
if index[0] == Ellipsis:
index = (slice(None, None, None),)*(len(self.__N)-len(index)+1+num_none) + index[1:]
elif index[-1] == Ellipsis:
index = index[:-1] + (slice(None, None, None),)*(len(self.__N)-len(index)+1+num_none)
cores_new = []
k = 0
for i,idx in enumerate(index):
if isinstance(idx,slice):
cores_new.append(self.cores[k][:,idx,:])
k+=1
elif idx is None:
# extend the tensor
tmp = tn.eye(cores_new[-1].shape[-1] if len(cores_new)!=0 else 1, device = self.cores[0].device, dtype = self.cores[0].dtype)[:,None,:]
cores_new.append(tmp)
exclude.append(i)
elif isinstance(idx, int):
cores_new.append(tn.reshape(self.cores[k][:,idx,:],[self.__R[k],-1,self.R[k+1]]))
k+=1
else:
raise InvalidArguments("Slice carguments not valid. They have to be either int, slice or None.")
if k<len(self.cores):
raise InvalidArguments('Slice size is invalid.')
sliced = TT(cores_new)
sliced.reduce_dims(exclude)
if (sliced.is_ttm == False and sliced.N == [1]) or (sliced.is_ttm and sliced.N == [1] and sliced.M == [1]):
sliced = tn.squeeze(sliced.cores[0])
# cores = None
elif isinstance(index,int):
# tensor is 1d and one element is retrived
if len(self.__N) == 1:
sliced = self.cores[0][0,index,0]
else:
raise InvalidArguments('Invalid slice. Tensor is not 1d.')
## TODO
elif index == Ellipsis:
# return a copy of the tensor
sliced = TT([c.clone() for c in self.cores])
elif isinstance(index,slice):
# tensor is 1d and one slice is extracted
if len(self.__N) == 1:
sliced = TT(self.cores[0][:,index,:])
else:
raise InvalidArguments('Invalid slice. Tensor is not 1d.')
## TODO
else:
raise InvalidArguments('Invalid slice.')
return sliced
def __pow__(self, other):
"""
Computes the tensor Kronecker product.
This implements the "**" operator.
If None is provided as input the reult is the other tensor.
If A is N_1 x ... x N_d and B is M_1 x ... x M_p, then kron(A,B) is N_1 x ... x N_d x M_1 x ... x M_p
Args:
first (torchtt.TT or None): first argument.
second (torchtt.TT or none): second argument.
Raises:
IncompatibleTypes: Incompatible data types (make sure both are either TT-matrices or TT-tensors).
InvalidArguments: Invalid arguments.
Returns:
torchtt.TT: the result.
"""
if other == None:
cores_new = [c.clone() for c in self.cores]
result = TT(cores_new)
elif isinstance(other,TT):
if self.is_ttm != other.is_ttm:
raise IncompatibleTypes('Incompatible data types (make sure both are either TT-matrices or TT-tensors).')
# concatenate the result
cores_new = [c.clone() for c in self.cores] + [c.clone() for c in other.cores]
result = TT(cores_new)
else:
raise InvalidArguments('Invalid arguments.')
return result
def __rpow__(self,other):
"""
Computes the tensor Kronecker product.
This implements the "**" operator.
If None is provided as input the reult is the other tensor.
If A is N_1 x ... x N_d and B is M_1 x ... x M_p, then kron(A,B) is N_1 x ... x N_d x M_1 x ... x M_p
Args:
first (torchtt.TT or None): first argument.
second (torchtt.TT or none): second argument.
Raises:
IncompatibleTypes: Incompatible data types (make sure both are either TT-matrices or TT-tensors).
InvalidArguments: Invalid arguments.
Returns:
torchtt.TT: the result.
"""
result = kron(self,other)
return result
def __neg__(self):
"""
Returns the negative of a given TT tensor.
This implements the unery operator "-"
Returns:
torchtt.TT: the negated tensor.
"""
cores_new = [c.clone() for c in self.cores]
cores_new[0] = -cores_new[0]
return TT(cores_new)
def __pos__(self):
"""
Implements the unary "+" operator returning a copy o the tensor.
Returns:
torchtt.TT: the tensor clone.
"""
cores_new = [c.clone() for c in self.cores]
return TT(cores_new)
def round(self, eps=1e-12, rmax = sys.maxsize):
"""
Implements the rounding operations within a given tolerance epsilon.
The maximum rank is also provided.
Args:
eps (float, optional): the relative accuracy. Defaults to 1e-12.
rmax (int, optional): the maximum rank. Defaults to the maximum possible integer.
Returns:
torchtt.TT: the result.
"""
# rmax is not list
if not isinstance(rmax,list):
rmax = [1] + len(self.__N)*[rmax] + [1]
# call the round function
tt_cores, R = round_tt(self.cores, self.__R.copy(), eps, rmax,self.__is_ttm)
# creates a new TT and return it
T = TT(tt_cores)
return T
def to_qtt(self, eps = 1e-12, mode_size = 2, rmax = sys.maxsize):
"""
Converts a tensor to the QTT format: N1 x N2 x ... x Nd -> mode_size x mode_size x ... x mode_size.
The product of the mode sizes should be a power of mode_size.
The tensor in QTT can be converted back using the qtt_to_tens() method.
Examples:
.. code-block:: python
x = torchtt.random([16,8,64,128],[1,2,10,12,1])
x_qtt = x.to_qtt()
print(x_qtt)
xf = x_qtt.qtt_to_tens(x.N) # a TT-rounding is recommended.
Args:
eps (float,optional): the accuracy. Defaults to 1e-12.
mode_size (int, optional): the size of the modes. Defaults to 2.
rmax (int): the maximum rank. Defaults to the maximum possible integer.
Raises:
ShapeMismatch: Only quadratic TTM can be tranformed to QTT.
ShapeMismatch: Reshaping error: check if the dimensions are powers of the desired mode size.
Returns:
torchtt.TT: the resulting reshaped tensor.
"""
cores_new = []
if self.__is_ttm:
shape_new = []
for i in range(len(self.__N)):
if self.__N[i]!=self.__M[i]:
raise ShapeMismatch('Only quadratic TTM can be tranformed to QTT.')
if self.__N[i]==mode_size**int(math.log(self.N[i],mode_size)):
shape_new += [(mode_size,mode_size)]*int(math.log(self.__N[i],mode_size))
else:
raise ShapeMismatch('Reshaping error: check if the dimensions are powers of the desired mode size:\r\ncore size '+str(list(self.cores[i].shape))+' cannot be reshaped.')
result = torchtt._extras.reshape(self, shape_new, eps, rmax)
else:
for core in self.cores:
if int(math.log(core.shape[1],mode_size))>1:
Nnew = [core.shape[0]*mode_size]+[mode_size]*(int(math.log(core.shape[1],mode_size))-2)+[core.shape[2]*mode_size]
try:
core = tn.reshape(core,Nnew)
except:
raise ShapeMismatch('Reshaping error: check if the dimensions care powers of the desired mode size:\r\ncore size '+str(list(core.shape))+' cannot be reshaped to '+str(Nnew))
cores,_ = to_tt(core,Nnew,eps,rmax,is_sparse=False)
cores_new.append(tn.reshape(cores[0],[-1,mode_size,cores[0].shape[-1]]))
cores_new += cores[1:-1]
cores_new.append(tn.reshape(cores[-1],[cores[-1].shape[0],mode_size,-1]))
else:
cores_new.append(core)
result = TT(cores_new)
return result
def qtt_to_tens(self, original_shape):
"""
Transform a tensor back from QTT.
Args:
original_shape (list): the original shape.
Raises:
InvalidArguments: Original shape must be a list.
ShapeMismatch: Mode sizes do not match.
Returns:
torchtt.TT: the folded tensor.
"""
if not isinstance(original_shape,list):
raise InvalidArguments("Original shape must be a list.")
core = None
cores_new = []
if self.__is_ttm:
pass
else:
k = 0
for c in self.cores:
if core==None:
core = c
so_far = core.shape[1]
else:
core = tn.einsum('...i,ijk->...jk',core,c)
so_far *= c.shape[1]
if so_far==original_shape[k]:
core = tn.reshape(core,[core.shape[0],-1,core.shape[-1]])
cores_new.append(core)
core = None
k += 1
if k!= len(original_shape):
raise ShapeMismatch('Mode sizes do not match.')
return TT(cores_new)
def mprod(self, factor_matrices, mode):
"""
n-mode product.
Args:
factor_matrices (torch.tensor or list[torch.tensor]): either a single matrix is directly provided or a list of matrices for product along multiple modes.
mode (int or list[int]): the mode for the product. If factor_matrices is a torch.tensor then mode is an integer and the multiplication will be performed along a single mode.
If factor_matrices is a list, the mode has to be list[int] of equal size.
Raises:
InvalidArguments: Invalid arguments.
ShapeMismatch: The n-th mode of the tensor must be equal with the 2nd mode of the matrix.
IncompatibleTypes: n-model product works only with TT-tensors and not TT matrices.
Returns:
torchtt.TT: the result
"""
if self.__is_ttm:
raise IncompatibleTypes("n-model product works only with TT-tensors and not TT matrices.")
if isinstance(factor_matrices,list) and isinstance(mode, list):
cores_new = [c.clone() for c in self.cores]
for i in range(len(factor_matrices)):
if cores_new[mode[i]].shape[1] != factor_matrices[i].shape[1]:
raise ShapeMismatch("The n-th mode of the tensor must be equal with the 2nd mode of the matrix.")
cores_new[mode[i]] = tn.einsum('ijk,lj->ilk',cores_new[mode[i]],factor_matrices[i]) # if self.__is_ttm else tn.einsum('ijk,lj->ilk',cores_new[mode[i]],factor_matrices[i])
elif isinstance(mode, int) and tn.is_tensor(factor_matrices):
cores_new = [c.clone() for c in self.cores]
if cores_new[mode].shape[1] != factor_matrices.shape[1]:
raise ShapeMismatch("The n-th mode of the tensor must be equal with the 2nd mode of the matrix.")
cores_new[mode] = tn.einsum('ijk,lj->ilk',cores_new[mode],factor_matrices) # if self.__is_ttm else tn.einsum('ijk,lj->ilk',cores_new[mode],factor_matrices)
else:
raise InvalidArguments('Invalid arguments.')
return TT(cores_new)
def conj(self):
"""
Return the complex conjugate of a tensor in TT format.
Returns:
torchtt.TT: the complex conjugated tensor.
"""
return TT([tn.conj(c) for c in self.cores])
| 64,632 | 42.818983 | 237 | py |
torchTT | torchTT-main/torchtt/_torchtt.py | """
Basic class for TT decomposition.
It contains the base TT class as well as additional functions.
The TT class implements tensors in the TT format as well as tensors operators in TT format. Once in the TT format, linear algebra operations (`+`, `-`, `*`, `@`, `/`) can be performed without resorting to the full format. The format and the operations is similat to the one implemented in `torch`.
"""
import torch as tn
import torch.nn.functional as tnf
from torchtt._decomposition import mat_to_tt, to_tt, lr_orthogonal, round_tt, rl_orthogonal, QR, SVD, rank_chop
from torchtt._division import amen_divide
import numpy as np
import math
from torchtt._dmrg import dmrg_matvec
from torchtt._aux_ops import apply_mask, dense_matvec, bilinear_form_aux
from torchtt.errors import *
# import torchttcpp
# from torchtt._aux_ops import save, load
import sys
class TT():
#cores : list[tn.tensor]
#""" The TT cores as a list of `torch.tensor` instances."""
@property
def is_ttm(self):
"""
Check whether the instance is a TT operator or not.
Returns:
bool: the flag.
"""
return self.__is_ttm
@property
def M(self):
"""
Return the "row" shape in case of TT matrices.
Raises:
IncompatibleTypes: The field is_ttm is defined only for TT matrices.
Returns:
list[int]: the shape.
"""
if not self.__is_ttm:
raise IncompatibleTypes("The field is_ttm is defined only for TT matrices.")
return self.__M.copy()
@property
def N(self):
"""
Return the shape of a tensor or the "column" shape of a TT operator.
Returns:
list[int]: the shape.
"""
return self.__N.copy()
@property
def R(self):
"""
The rank of the TT decomposition.
It's length should be `len(R)==len(N)+1`.
Returns:
list[int]: the rank.
"""
return self.__R.copy()
def __init__(self, source, shape=None, eps=1e-10, rmax=sys.maxsize):
"""
Constructor of the TT class. Can convert full tensor in the TT-format (from `torch.tensor` or `numpy.array`).
In the case of tensor operators of full shape `M1 x ... Md x N1 x ... x Nd`, the shape must be specified as a list of tuples `[(M1,N1),...,(Md,Nd)]`.
A TT-object can also be computed from cores if the list of cores is passed as argument.
If None is provided, an empty tensor is created.
The TT decomposition of a tensor is
\(\\mathsf{x}=\\sum\\limits_{r_1...r_{d-1}=1}^{R_1,...,R_{d-1}} \\mathsf{x}^{(1)}_{1i_1r_1}\\cdots\\mathsf{x}^{(d)}_{r_{d-1}i_d1}\),
where \(\\{\\mathsf{x}^{(k)}\\}_{k=1}^d\) are the TT cores and \(\\mathbf{R}=(1,R_1,...,R_{d-1},1)\) is the TT rank.
Using the constructor, a TT decomposition of a tensor can be computed. The TT cores are stored as a list in `torchtt.TT.cores`.
This class implements basic operators such as `+,-,*,/,@,**` (add, subtract, elementwise multiplication, elementwise division, matrix vector product and Kronecker product) between TT instances.
The `examples\` folder server as a tutorial for all the possibilities of the toolbox.
Examples:
```
import torchtt
import torch
x = torch.reshape(torch.arange(0,128,dtype = torch.float64),[8,4,4])
xtt = torchtt.TT(x)
ytt = torchtt.TT(torch.squeeze(x),[8,4,4])
# create a TT matrix
A = torch.reshape(torch.arange(0,20160,dtype = torch.float64),[3,5,7,4,6,8])
Att = torchtt.TT(A,[(3,4),(5,6),(7,8)])
print(Att)
```
Args:
source (torch.tensor ot list[torch.tensor] or numpy.array or None): the input tensor in full format or the cores. If a `torch.tensor` or `numpy.array` is provided
shape (list[int] or list[tuple[int]], optional): the shape (if it differs from the one provided). For the TT-matrix case is mandatory. Defaults to None.
eps (float, optional): tolerance of the TT approximation. Defaults to 1e-10.
rmax (int or list[int], optional): maximum rank (either a list of integer or an integer). Defaults to the maximum possible integer.
Raises:
RankMismatch: Ranks of the given cores do not match (change the spaces of the cores).
InvalidArguments: Invalid input: TT-cores have to be either 4d or 3d.
InvalidArguments: Check the ranks and the mode size.
NotImplementedError: Function only implemented for torch tensors, numpy arrays, list of cores as torch tensors and None
"""
if source is None:
# empty TT
self.cores = []
self.__M = []
self.__N = []
self.__R = [1,1]
self.__is_ttm = False
elif isinstance(source, list):
# tt cores were passed directly
# check if sizes are consistent
prev = 1
N = []
M = []
R = [source[0].shape[0]]
d = len(source)
for i in range(len(source)):
s = source[i].shape
if s[0] != R[-1]:
raise RankMismatch("Ranks of the given cores do not match: for core number %d previous rank is %d and and current rank is %d."%(i,R[-1],s[0]))
if len(s) == 3:
R.append(s[2])
N.append(s[1])
elif len(s)==4:
R.append(s[3])
M.append(s[1])
N.append(s[2])
else:
raise InvalidArguments("Invalid input: TT-cores have to be either 4d or 3d.")
if len(N) != d or len(R) != d+1 or R[0] != 1 or R[-1] != 1 or (len(M)!=0 and len(M)!=len(N)) :
raise InvalidArguments("Check the ranks and the mode size.")
self.cores = source
self.__R = R
self.__N = N
if len(M) == len(N):
self.__M = M
self.__is_ttm = True
else:
self.__is_ttm = False
self.shape = [ (m,n) for m,n in zip(self.__M,self.__N) ] if self.__is_ttm else [n for n in self.N]
elif tn.is_tensor(source):
if shape == None:
# no size is given. Deduce it from the tensor. No TT-matrix in this case.
self.__N = list(source.shape)
if len(self.__N)>1:
self.cores, self.__R = to_tt(source,self.__N,eps,rmax,is_sparse=False)
else:
self.cores = [tn.reshape(source,[1,self.__N[0],1])]
self.__R = [1,1]
self.__is_ttm = False
elif isinstance(shape,list) and isinstance(shape[0],tuple):
# if the size contains tuples, we have a TT-matrix.
if len(shape) > 1:
self.__M = [s[0] for s in shape]
self.__N = [s[1] for s in shape]
self.cores, self.__R = mat_to_tt(source, self.__M, self.__N, eps, rmax)
self.__is_ttm = True
else:
self.__M = [shape[0][0]]
self.__N = [shape[0][1]]
self.cores, self.__R = [tn.reshape(source,[1,shape[0][0],shape[0][1],1])], [1,1]
self.__is_ttm = True
else:
# TT-decomposition with prescribed size
# perform reshape first
self.__N = shape
self.cores, self.__R = to_tt(tn.reshape(source,shape),self.__N,eps,rmax,is_sparse=False)
self.__is_ttm = False
self.shape = [ (m,n) for m,n in zip(self.__M,self.__N) ] if self.__is_ttm else [n for n in self.N]
elif isinstance(source, np.ndarray):
source = tn.tensor(source)
if shape == None:
# no size is given. Deduce it from the tensor. No TT-matrix in this case.
self.__N = list(source.shape)
if len(self.__N)>1:
self.cores, self.__R = to_tt(source,self.__N,eps,rmax,is_sparse=False)
else:
self.cores = [tn.reshape(source,[1,self.__N[0],1])]
self.__R = [1,1]
self.__is_ttm = False
elif isinstance(shape,list) and isinstance(shape[0],tuple):
# if the size contains tuples, we have a TT-matrix.
self.__M = [s[0] for s in shape]
self.__N = [s[1] for s in shape]
self.cores, self.__R = mat_to_tt(source, self.__M, self.__N, eps, rmax)
self.__is_ttm = True
else:
# TT-decomposition with prescribed size
# perform reshape first
self.__N = shape
self.cores, self.__R = to_tt(tn.reshape(source,shape),self.__N,eps,rmax,is_sparse=False)
self.__is_ttm = False
self.shape = [ (m,n) for m,n in zip(self.__M,self.__N) ] if self.__is_ttm else [n for n in self.N]
else:
raise NotImplementedError("Function only implemented for torch tensors, numpy arrays, list of cores as torch tensors and None.")
def cuda(self, device = None):
"""
Return a torchtt.TT object on the CUDA device by cloning all the cores on the GPU.
Args:
device (torch.device, optional): The CUDA device (None for CPU). Defaults to None.
Returns:
torchtt.TT: The TT-object. The TT-cores are on CUDA.
"""
t = TT([ c.cuda(device) for c in self.cores])
return t
def cpu(self):
"""
Retrive the cores from the GPU.
Returns:
torchtt.TT: The TT-object on CPU.
"""
return TT([ c.cpu() for c in self.cores])
def is_cuda(self):
"""
Return True if the tensor is on GPU.
Returns:
bool: Is the torchtt.TT on GPU or not.
"""
return all([c.is_cuda for c in self.core])
def to(self, device = None, dtype = None):
"""
Moves the TT instance to the given device with the given dtype.
Args:
device (torch.device, optional): The desired device. If none is provided, the device is the CPU. Defaults to None.
dtype (torch.dtype, optional): The desired dtype (torch.float64, torch.float32,...). If None is provided the dtype is not changed. Defaults to None.
"""
return TT( [ c.to(device=device,dtype=dtype) for c in self.cores])
def detach(self):
"""
Detaches the TT tensor. Similar to torch.tensor.detach().
Returns:
torchtt.TT: the detached tensor.
"""
return TT([c.detach() for c in self.cores])
def clone(self):
"""
Clones the torchtt.TT instance. Similar to torch.tensor.clone().
Returns:
torchtt.TT: the cloned TT object.
"""
return TT([c.clone() for c in self.cores])
def full(self):
"""
Return the full tensor.
In case of a TTM, the result has the shape M1 x M2 x ... x Md x N1 x N2 x ... x Nd.
Returns:
torch.tensor: the full tensor.
"""
if self.__is_ttm:
# the case of tt-matrix
tfull = self.cores[0][0,:,:,:]
for i in range(1,len(self.cores)-1) :
tfull = tn.einsum('...i,ijkl->...jkl',tfull,self.cores[i])
if len(self.__N) != 1:
tfull = tn.einsum('...i,ijk->...jk',tfull,self.cores[-1][:,:,:,0])
tfull = tn.permute(tfull,list(np.arange(len(self.__N))*2)+list(np.arange(len(self.N))*2+1))
else:
tfull = tfull[:,:,0]
else:
# the case of a normal tt
tfull = self.cores[0][0,:,:]
for i in range(1,len(self.cores)-1) :
tfull = tn.einsum('...i,ijk->...jk',tfull,self.cores[i])
if len(self.__N) != 1:
tfull = tn.einsum('...i,ij->...j',tfull,self.cores[-1][:,:,0])
else:
tfull = tn.squeeze(tfull)
return tfull
def numpy(self):
"""
Return the full tensor as a numpy.array.
In case of a TTM, the result has the shape M1 x M2 x ... x Md x N1 x N2 x ... x Nd.
If it is involved in an AD graph, an error will occur.
Returns:
numpy.array: the full tensor in numpy.
"""
return self.full().cpu().numpy()
def __repr__(self):
"""
Show the information as a string
Returns:
string: the string representation of a torchtt.TT
"""
if self.__is_ttm:
output = 'TT-matrix'
output += ' with sizes and ranks:\n'
output += 'M = ' + str(self.__M) + '\nN = ' + str(self.__N) + '\n'
output += 'R = ' + str(self.__R) + '\n'
output += 'Device: '+str(self.cores[0].device)+', dtype: '+str(self.cores[0].dtype)+'\n'
entries = sum([tn.numel(c) for c in self.cores])
output += '#entries ' + str(entries) +' compression ' + str(entries/np.prod(np.array(self.__N,dtype=np.float64)*np.array(self.__M,dtype=np.float64))) + '\n'
else:
output = 'TT'
output += ' with sizes and ranks:\n'
output += 'N = ' + str(self.__N) + '\n'
output += 'R = ' + str(self.__R) + '\n\n'
output += 'Device: '+str(self.cores[0].device)+', dtype: '+str(self.cores[0].dtype)+'\n'
entries = sum([tn.numel(c) for c in self.cores])
output += '#entries ' + str(entries) +' compression ' + str(entries/np.prod(np.array(self.__N,dtype=np.float64))) + '\n'
return output
def __radd__(self,other):
"""
Addition in the TT format. Implements the "+" operator. This function is called in the case a non-torchtt.TT object is added to the left.
Args:
other (int or float or torch.tensor scalar): the left operand.
Returns:
torchtt.TT: the result.
"""
return self.__add__(other)
def __add__(self,other):
"""
Addition in the TT format. Implements the "+" operator. The following type pairs are supported:
- both operands are TT-tensors.
- both operands are TT-matrices.
- first operand is a TT-tensor or a TT-matrix and the second is a scalar (either torch.tensor scalar or int or float).
The broadcasting rules from `torch` apply here.
Args:
other (torchtt.TT or float or int or torch.tensor with 1 element): second operand.
Raises:
ShapeMismatch: Dimension mismatch.
IncompatibleTypes: Addition between a tensor and a matrix is not defined.
Returns:
torchtt.TT: the result.
"""
if np.isscalar(other) or ( tn.is_tensor(other) and tn.numel(other) == 1):
# the second term is a scalar
cores = []
for i in range(len(self.__N)):
if self.__is_ttm:
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0,0 , 0 if i==0 else self.R[i],0)
othr = tn.ones([1,1,1,1],dtype=self.cores[i].dtype) * (other if i ==0 else 1)
else:
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
othr = tn.ones([1,1,1],dtype=self.cores[i].dtype) * (other if i ==0 else 1)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(othr,pad2))
result = TT(cores)
elif isinstance(other,TT):
#second term is TT object
if self.__is_ttm and other.is_ttm:
# both are TT-matrices
if self.__M != self.M or self.__N != self.N:
raise ShapeMismatch("Shapes are incompatible: first operand is %s x %s, second operand is %s x %s."%(str(self.M), str(self.N), str(other.M), str(other.N)))
cores = []
for i in range(len(self.__N)):
pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1], 0,0 , 0,0 , 0,0 if i==0 else other.R[i])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(other.cores[i],pad2))
result = TT(cores)
elif self.__is_ttm==False and other.is_ttm==False:
# normal tensors in TT format.
if self.__N == other.N:
cores = []
for i in range(len(self.__N)):
pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1] , 0,0 , 0,0 if i==0 else other.R[i])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(other.cores[i],pad2))
else:
if len(self.__N) < len(other.N):
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
cores = []
for i in range(len(self.cores)-len(other.cores)):
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(tn.ones((1,self.__N[i],1), device = self.cores[i].device),pad2))
for k,i in zip(range(len(other.cores)), range(len(self.cores)-len(other.cores), len(self.cores))):
if other.N[k] == self.__N[i]:
pad1 = (0,0 if i == len(self.__N)-1 else other.R[k+1] , 0,0 , 0,0 if i==0 else other.R[k])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(other.cores[k],pad2))
elif other.N[k] == 1:
pad1 = (0,0 if i == len(self.__N)-1 else other.R[k+1] , 0,0 , 0,0 if i==0 else other.R[k])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(tn.tile(other.cores[k],(1,self.__N[i],1)),pad2))
else:
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
result = TT(cores)
else:
# incompatible types
raise IncompatibleTypes('Addition between a tensor and a matrix is not defined.')
else:
InvalidArguments('Second term is incompatible.')
return result
def __rsub__(self,other):
"""
Subtract 2 tensors in the TT format. Implements the "-" operator.
Args:
other (int or float or torch.tensor with 1 element): the first operand.
Returns:
torchtt.TT: the result.
"""
T = self.__sub__(other)
T.cores[0] = -T.cores[0]
return T
def __sub__(self,other):
"""
Subtract 2 tensors in the TT format. Implements the "-" operator.
Possible second operands are: torchtt.TT, float, int, torch.tensor with 1 element.
Broadcasting rules from `torch` apply for this operation as well.
Args:
other (torchtt.TT or float or int or torch.tensor with 1 element): the second operand.
Raises:
ShapeMismatch: Both dimensions of the TT matrix should be equal.
ShapeMismatch: Dimension mismatch.
IncompatibleTypes: Addition between a tensor and a matrix is not defined.
InvalidArguments: Second term is incompatible (must be either torchtt.TT or int or float or torch.tensor with 1 element).
Returns:
torchtt.TT: the result.
"""
if np.isscalar(other) or ( tn.is_tensor(other) and other.shape == []):
# the second term is a scalar
cores = []
for i in range(len(self.__N)):
if self.__is_ttm:
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0,0 , 0 if i==0 else self.R[i],0)
othr = tn.ones([1,1,1,1],dtype=self.cores[i].dtype) * (-other if i ==0 else 1)
else:
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
othr = tn.ones([1,1,1],dtype=self.cores[i].dtype) * (-other if i ==0 else 1)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(othr,pad2))
result = TT(cores)
elif isinstance(other,TT):
#second term is TT object
if self.__is_ttm and other.is_ttm:
# both are TT-matrices
if self.__M != self.M or self.__N != self.N:
raise ShapeMismatch("Shapes are incompatible: first operand is %s x %s, second operand is %s x %s."%(str(self.M), str(self.N), str(other.M), str(other.N)))
cores = []
for i in range(len(self.__N)):
pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1] , 0,0 , 0,0 , 0,0 if i==0 else other.R[i])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(-other.cores[i] if i==0 else other.cores[i],pad2))
result = TT(cores)
elif self.__is_ttm==False and other.is_ttm==False:
# normal tensors in TT format.
if self.__N == other.N:
cores = []
for i in range(len(self.__N)):
pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1] , 0,0 , 0,0 if i==0 else other.R[i])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i], pad1)+tnf.pad(-other.cores[i] if i==0 else other.cores[i],pad2))
else:
if len(self.__N) < len(other.N):
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
cores = []
for i in range(len(self.cores)-len(other.cores)):
pad1 = (0,0 if i == len(self.__N)-1 else 1 , 0,0 , 0,0 if i==0 else 1)
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad((-1 if i==0 else 1)*tn.ones((1,self.__N[i],1), device = self.cores[i].device),pad2))
for k,i in zip(range(len(other.cores)), range(len(self.cores)-len(other.cores), len(self.cores))):
if other.N[k] == self.__N[i]:
pad1 = (0,0 if i == len(self.__N)-1 else other.R[k+1] , 0,0 , 0,0 if i==0 else other.R[k])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(-other.cores[k] if i==0 else other.cores[k],pad2))
elif other.N[k] == 1:
pad1 = (0,0 if i == len(self.__N)-1 else other.R[k+1] , 0,0 , 0,0 if i==0 else other.R[k])
pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(tn.tile(-other.cores[k] if i==0 else other.cores[k],(1,self.__N[i],1)),pad2))
else:
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
result = TT(cores)
else:
# incompatible types
raise IncompatibleTypes('Addition between a tensor and a matrix is not defined.')
else:
InvalidArguments('Second term is incompatible (must be either torchtt.TT or int or float or torch.tensor with 1 element).')
return result
def __rmul__(self,other):
"""
Elementwise multiplication in the TT format.
This implements the "*" operator when the left operand is not torchtt.TT.
Following are supported:
* TT tensor and TT tensor
* TT matrix and TT matrix
* TT tensor and scalar(int, float or torch.tensor scalar)
Args:
other (torchtt.TT or float or int or torch.tensor with 1 element): the second operand.
Raises:
ShapeMismatch: Shapes must be equal.
IncompatibleTypes: Second operand must be the same type as the fisrt (both should be either TT matrices or TT tensors).
InvalidArguments: Second operand must be of type: torchtt.TT, float, int of torch.tensor.
Returns:
torchtt.TT: [description]
"""
return self.__mul__(other)
def __mul__(self,other):
"""
Elementwise multiplication in the TT format.
This implements the "*" operator.
Following are supported:
- TT tensor and TT tensor
- TT matrix and TT matrix
- TT tensor and scalar(int, float or torch.tensor scalar)
The broadcasting rules are the same as in torch (see [here](https://pytorch.org/docs/stable/notes/broadcasting.html)).
Args:
other (torchtt.TT or float or int or torch.tensor with 1 element): the second operand.
Raises:
ShapeMismatch: Shapes are incompatible (see the broadcasting rules).
IncompatibleTypes: Second operand must be the same type as the fisrt (both should be either TT matrices or TT tensors).
InvalidArguments: Second operand must be of type: torchtt.TT, float, int of torch.tensor.
Returns:
torchtt.TT: the result.
"""
# elementwise multiplication
if isinstance(other, TT):
if self.__is_ttm and other.is_ttm:
if self.__N == other.N and self.__M == other.M:
# raise ShapeMismatch('Shapes must be equal.')
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('aijb,mijn->amijbn',self.cores[i],other.cores[i]),[self.__R[i]*other.R[i],self.__M[i],self.__N[i],self.R[i+1]*other.R[i+1]])
cores_new.append(core)
else:
raise ShapeMismatch("Shapes are incompatible: first operand is %s x %s, second operand is %s x %s."%(str(self.M), str(self.N), str(other.M), str(other.N)))
# if len(self.__N) < len(other.N):
# raise ShapeMismatch("Shapes are incompatible: first operand is %s x %s, second operand is %s x %s."%(str(self.M), str(self.N), str(other.M), str(other.N)))
# cores_new = []
# raise NotImplementedError("Not yet implemented.")
elif self.__is_ttm == False and other.is_ttm == False:
# broadcasting rul;es have to be applied. Sperate if else to make the non-broadcasting case the fastest.
if self.__N == other.N:
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('aib,min->amibn',self.cores[i],other.cores[i]),[self.__R[i]*other.R[i],self.__N[i],self.R[i+1]*other.R[i+1]])
cores_new.append(core)
else:
if len(self.__N) < len(other.N):
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
cores_new = []
for i in range(len(self.cores)-len(other.cores)):
cores_new.append(self.cores[i]*1)
for k,i in zip(range(len(other.cores)), range(len(self.cores)-len(other.cores), len(self.cores))):
if other.N[k] == self.__N[i]:
core = tn.reshape(tn.einsum('aib,min->amibn',self.cores[i],other.cores[k]),[self.__R[i]*other.R[k],self.__N[i],self.R[i+1]*other.R[k+1]])
elif other.N[k] == 1:
core = tn.reshape(tn.einsum('aib,mn->amibn',self.cores[i],other.cores[k][:,0,:]),[self.__R[i]*other.R[k],self.__N[i],self.R[i+1]*other.R[k+1]])
else:
raise ShapeMismatch("Shapes are incompatible: first operand is %s, second operand is %s."%(str(self.N), str(other.N)))
cores_new.append(core)
else:
raise IncompatibleTypes('Second operand must be the same type as the fisrt (both should be either TT matrices or TT tensors).')
result = TT(cores_new)
elif isinstance(other,int) or isinstance(other,float) or isinstance(other,tn.tensor):
if other != 0:
cores_new = [c+0 for c in self.cores]
cores_new[0] *= other
result = TT(cores_new)
else:
result = zeros([(m,n) for m,n in zip(self.M,self.N)] if self.is_ttm else self.N, device=self.cores[0].device)
else:
raise InvalidArguments('Second operand must be of type: TT, float, int of tensorflow Tensor.')
return result
def __matmul__(self,other):
"""
Matrix-vector multiplication in TT-format
Supported operands:
- TT-matrix @ TT-tensor -> TT-tensor: y_i = A_ij * x_j
- TT-tensor @ TT-matrix -> TT-tensor: y_j = x_i * A_ij
- TT-matrix @ TT-matrix -> TT-matrix: Y_ij = A_ik * B_kj
- TT-matrix @ torch.tensor -> torch.tensor: y_bi = A_ij * x_bj
In the last case, the multiplication is performed along the last modes and a full torch.tensor is returned.
Args:
other (torchtt.TT or torch.tensor): the second operand.
Raises:
ShapeMismatch: Shapes do not match.
InvalidArguments: Wrong arguments.
Returns:
torchtt.TT or torch.tensor: the result. Can be full tensor if the second operand is full tensor.
"""
if self.__is_ttm and tn.is_tensor(other):
if self.__N != list(other.shape)[-len(self.N):]:
raise ShapeMismatch("Shapes do not match.")
result = dense_matvec(self.cores,other)
return result
elif self.__is_ttm and other.is_ttm == False:
# matrix-vector multiplication
if self.__N != other.N:
raise ShapeMismatch("Shapes do not match.")
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('ijkl,mkp->imjlp',self.cores[i],other.cores[i]),[self.cores[i].shape[0]*other.cores[i].shape[0],self.cores[i].shape[1],self.cores[i].shape[3]*other.cores[i].shape[2]])
cores_new.append(core)
elif self.__is_ttm and other.is_ttm:
# multiplication between 2 TT-matrices
if self.__N != other.M:
raise ShapeMismatch("Shapes do not match.")
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('ijkl,mknp->imjnlp',self.cores[i],other.cores[i]),[self.cores[i].shape[0]*other.cores[i].shape[0],self.cores[i].shape[1],other.cores[i].shape[2],self.cores[i].shape[3]*other.cores[i].shape[3]])
cores_new.append(core)
elif self.__is_ttm == False and other.is_ttm:
# vector-matrix multiplication
if self.__N != other.M:
raise ShapeMismatch("Shapes do not match.")
cores_new = []
for i in range(len(self.cores)):
core = tn.reshape(tn.einsum('mkp,ikjl->imjlp',self.cores[i],other.cores[i]),[self.cores[i].shape[0]*other.cores[i].shape[0],other.cores[i].shape[2],self.cores[i].shape[2]*other.cores[i].shape[3]])
cores_new.append(core)
else:
raise InvalidArguments("Wrong arguments.")
result = TT(cores_new)
return result
def fast_matvec(self,other, eps = 1e-12, nswp = 20, verb = False):
"""
Fast matrix vector multiplication A@x using DMRG iterations. Faster than traditional matvec + rounding.
Args:
other (torchtt.TT): the TT tensor.
eps (float, optional): relative accuracy for DMRG. Defaults to 1e-12.
nswp (int, optional): number of DMRG iterations. Defaults to 40.
verb (bool, optional): show info for debug. Defaults to False.
Raises:
InvalidArguments: Second operand has to be TT object.
IncompatibleTypes: First operand should be a TT matrix and second a TT vector.
Returns:
torchtt.TT: the result.
"""
if not isinstance(other,TT):
raise InvalidArguments('Second operand has to be TT object.')
if not self.__is_ttm or other.is_ttm:
raise IncompatibleTypes('First operand should be a TT matrix and second a TT vector.')
return dmrg_matvec(self, other, eps = eps, verb = verb, nswp = nswp)
def apply_mask(self,indices):
"""
Evaluate the tensor on the given index list.
Examples:
```
x = torchtt.random([10,12,14],[1,4,5,1])
indices = torch.tensor([[0,0,0],[1,2,3],[1,1,1]])
val = x.apply_mask(indices)
```
Args:
indices (list[list[int]]): the index list where the tensor should be evaluated. Length is M.
Returns:
torch.tensor: the values of the tensor
"""
result = apply_mask(self.cores,self.__R,indices)
return result
def __truediv__(self,other):
"""
This function implements the "/" operator.
This operation is performed using the AMEN solver. The number of sweeps and rthe relative accuracy are fixed.
For most cases it is sufficient but sometimes it can fail.
Check the function torchtt.elementwise_divide() if you want to change the arguments of the AMEN solver.
Args:
other (torchtt.TT or float or int or torch.tensor with 1 element): the first operand.
Raises:
IncompatibleTypes: Operands should be either TT or TTM.
ShapeMismatch: Both operands should have the same shape.
InvalidArguments: Operand not permitted. A TT-object can be divided only with scalars.
Returns:
torchtt.TT: the result.
"""
if isinstance(other,int) or isinstance(other,float) or tn.is_tensor(other):
# divide by a scalar
cores_new = self.cores.copy()
cores_new[0] /= other
result = TT(cores_new)
elif isinstance(other,TT):
if self.__is_ttm != other.is_ttm:
raise IncompatibleTypes('Operands should be either TT or TTM.')
if self.__N != other.N or (self.__is_ttm and self.__M != other.M):
raise ShapeMismatch("Both operands should have the same shape.")
result = TT(amen_divide(other,self,50,None,1e-12,500,verbose=False))
else:
raise InvalidArguments('Operand not permitted. A TT-object can be divided only with scalars.')
return result
def __rtruediv__(self,other):
"""
Right true division. this function is called when a non TT object is divided by a TT object.
This operation is performed using the AMEN solver. The number of sweeps and rthe relative accuracy are fixed.
For most cases it is sufficient but sometimes it can fail.
Check the function torchtt.elementwise_divide() if you want to change the arguments of the AMEN solver.
Example:
```
z = 1.0/x # x is TT instance
```
Args:
other (torchtt.TT or float or int or torch.tensor with 1 element): the first operand.
Raises:
InvalidArguments: The first operand must be int, float or 1d torch.tensor.
Returns:
torchtt.TT: the result.
"""
if isinstance(other,int) or isinstance(other,float) or ( tn.is_tensor(other) and other.numel()==1):
o = ones(self.__N,dtype=self.cores[0].dtype,device = self.cores[0].device)
o.cores[0] *= other
cores_new = amen_divide(self,o,50,None,1e-12,500,verbose=False)
else:
raise InvalidArguments("The first operand must be int, float or 1d torch.tensor.")
return TT(cores_new)
def t(self):
"""
Returns the transpose of a given TT matrix.
Returns:
torchtt.TT: the transpose.
Raises:
InvalidArguments: Has to be TT matrix.
"""
if not self.__is_ttm:
raise InvalidArguments('Has to be TT matrix.')
cores_new = [tn.permute(c,[0,2,1,3]) for c in self.cores]
return TT(cores_new)
def norm(self,squared=False):
"""
Computes the frobenius norm of a TT object.
Args:
squared (bool, optional): returns the square of the norm if True. Defaults to False.
Returns:
torch.tensor: the norm.
"""
if any([c.requires_grad or c.grad_fn != None for c in self.cores]):
norm = tn.tensor([[1.0]],dtype = self.cores[0].dtype, device=self.cores[0].device)
if self.__is_ttm:
for i in range(len(self.__N)):
norm = tn.einsum('ab,aijm,bijn->mn',norm, self.cores[i], tn.conj(self.cores[i]))
norm = tn.squeeze(norm)
else:
for i in range(len(self.__N)):
norm = tn.einsum('ab,aim,bin->mn',norm, self.cores[i], tn.conj(self.cores[i]))
norm = tn.squeeze(norm)
if squared:
return norm
else:
return tn.sqrt(tn.abs(norm))
else:
d = len(self.cores)
core_now = self.cores[0]
for i in range(d-1):
if self.__is_ttm:
mode_shape = [core_now.shape[1],core_now.shape[2]]
core_now = tn.reshape(core_now,[core_now.shape[0]*core_now.shape[1]*core_now.shape[2],-1])
else:
mode_shape = [core_now.shape[1]]
core_now = tn.reshape(core_now,[core_now.shape[0]*core_now.shape[1],-1])
# perform QR
Qmat, Rmat = QR(core_now)
# take next core
core_next = self.cores[i+1]
shape_next = list(core_next.shape[1:])
core_next = tn.reshape(core_next,[core_next.shape[0],-1])
core_next = Rmat @ core_next
core_next = tn.reshape(core_next,[Qmat.shape[1]]+shape_next)
# update the cores
core_now = core_next
if squared:
return tn.linalg.norm(core_next)**2
else:
return tn.linalg.norm(core_next)
def sum(self,index = None):
"""
Contracts a tensor in the TT format along the given indices and retuyrns the resulting tensor in the TT format.
If no index list is given, the sum over all indices is performed.
Examples:
```
a = torchtt.ones([3,4,5,6,7])
print(a.sum())
print(a.sum([0,2,4]))
print(a.sum([1,2]))
print(a.sum([0,1,2,3,4]))
```
Args:
index (int or list[int] or None, optional): the indices along which the summation is performed. None selects all of them. Defaults to None.
Raises:
InvalidArguments: Invalid index.
Returns:
torchtt.TT/torch.tensor: the result.
"""
if index != None and isinstance(index,int):
index = [index]
if not isinstance(index,list) and index != None:
raise InvalidArguments('Invalid index.')
if index == None:
# the case we need to sum over all modes
if self.__is_ttm:
C = tn.reduce_sum(self.cores[0],[0,1,2])
for i in range(1,len(self.__N)):
C = tn.sum(tn.einsum('i,ijkl->jkl',C,self.cores[i]),[0,1])
S = tn.sum(C)
else:
C = tn.sum(self.cores[0],[0,1])
for i in range(1,len(self.__N)):
C = tn.sum(tn.einsum('i,ijk->jk',C,self.cores[i]),0)
S = tn.sum(C)
else:
# we return the TT-tensor with summed indices
cores = []
if self.__is_ttm:
tmp = [1,2]
else:
tmp = [1]
for i in range(len(self.__N)):
if i in index:
C = tn.sum(self.cores[i], tmp, keepdim = True)
cores.append(C)
else:
cores.append(self.cores[i])
S = TT(cores)
S.reduce_dims()
if len(S.cores)==1 and tn.numel(S.cores[0])==1:
S = tn.squeeze(S.cores[0])
return S
def to_ttm(self):
"""
Converts a TT-tensor to the TT-matrix format. In the tensor has the shape N1 x ... x Nd, the result has the shape
N1 x ... x Nd x 1 x ... x 1.
Returns:
torch.TT: the result
"""
cores_new = [tn.reshape(c,(c.shape[0],c.shape[1],1,c.shape[2])) for c in self.cores]
return TT(cores_new)
def reduce_dims(self, exclude = []):
"""
Reduces the size 1 modes of the TT-object.
At least one mode should be larger than 1.
Args:
exclude (list, optional): Indices to exclude. Defaults to [].
"""
# TODO: implement a version that reduces the rank also. by spliting the cores with modes 1 into 2 using the SVD.
if self.__is_ttm:
cores_new = []
for i in range(len(self.__N)):
if self.cores[i].shape[1] == 1 and self.cores[i].shape[2] == 1 and not i in exclude:
if self.cores[i].shape[0] > self.cores[i].shape[3] or i == len(self.__N)-1:
# multiply to the left
if len(cores_new) > 0:
cores_new[-1] = tn.einsum('ijok,kl->ijol',cores_new[-1], self.cores[i][:,0,0,:])
else:
# there is no core to the left. Multiply right.
if i != len(self.__N)-1:
self.cores[i+1] = tn.einsum('ij,jkml->ikml', self.cores[i][:,0,0,:],self.cores[i+1])
else:
cores_new.append(self.cores[i])
else:
# multiply to the right. Set the carry
self.cores[i+1] = tn.einsum('ij,jkml->ikml',self.cores[i][:,0,0,:],self.cores[i+1])
else:
cores_new.append(self.cores[i])
# update the cores and ranks and shape
self.__N = []
self.__M = []
self.__R = [1]
for i in range(len(cores_new)):
self.__N.append(cores_new[i].shape[2])
self.__M.append(cores_new[i].shape[1])
self.__R.append(cores_new[i].shape[3])
self.cores = cores_new
else:
cores_new = []
for i in range(len(self.__N)):
if self.cores[i].shape[1] == 1 and not i in exclude:
if self.cores[i].shape[0] > self.cores[i].shape[2] or i == len(self.__N)-1:
# multiply to the left
if len(cores_new) > 0:
cores_new[-1] = tn.einsum('ijk,kl->ijl',cores_new[-1], self.cores[i][:,0,:])
else:
# there is no core to the left. Multiply right.
if i != len(self.__N)-1:
self.cores[i+1] = tn.einsum('ij,jkl->ikl', self.cores[i][:,0,:],self.cores[i+1])
else:
cores_new.append(self.cores[i])
else:
# multiply to the right. Set the carry
self.cores[i+1] = tn.einsum('ij,jkl->ikl',self.cores[i][:,0,:],self.cores[i+1])
else:
cores_new.append(self.cores[i])
# update the cores and ranks and shape
self.__N = []
self.__R = [1]
for i in range(len(cores_new)):
self.__N.append(cores_new[i].shape[1])
self.__R.append(cores_new[i].shape[2])
self.cores = cores_new
self.shape = [ (m,n) for m,n in zip(self.__M,self.__N) ] if self.__is_ttm else [n for n in self.N]
def __getitem__(self,index):
"""
Performs slicing of a TT object.
Both TT matrix and TT tensor are supported.
Similar to pytorch or numpy slicing.
Args:
index (tuple[slice] or tuple[int] or int or Ellipsis or slice): the slicing.
Raises:
NotImplementedError: Ellipsis are not supported.
InvalidArguments: Slice size is invalid.
InvalidArguments: Slice carguments not valid. They have to be either int, slice or None.
InvalidArguments: Invalid slice. Tensor is not 1d.
Returns:
torchtt.TT or torch.tensor: the result. If all the indices are fixed, a scalar torch.tensor is returned otherwise a torchtt.TT.
"""
# slicing function
##### TODO: include Ellipsis support for tensor operators.
# if a slice containg integers is passed, an element is returned
# if ranged slices are used, a TT-object has to be returned.
exclude = []
if isinstance(index,tuple):
# check if more than two Ellipsis are to be found.
if index.count(Ellipsis) > 1 or (self.is_ttm and index.count(Ellipsis) > 0):
raise NotImplementedError('Ellipsis are not supported more than once of for tensor operators.')
if self.__is_ttm:
cores_new = []
k=0
for i in range(len(index)//2):
idx1 = index[i]
idx2 = index[i+len(index)//2]
if isinstance(idx1,slice) and isinstance(idx2,slice):
cores_new.append(self.cores[k][:,idx1,idx2,:])
k+=1
elif idx1==None and idx2==None:
# extend the tensor
tmp = tn.eye(cores_new[-1].shape[-1] if len(cores_new)!=0 else 1, device = self.cores[0].device, dtype = self.cores[0].dtype)[:,None,None,:]
cores_new.append(tmp)
exclude.append(i)
elif isinstance(idx1, int) and isinstance(idx2,int):
cores_new.append(tn.reshape(self.cores[k][:,idx1,idx2,:],[self.__R[k],1,1,self.R[k+1]]))
k+=1
else:
raise InvalidArguments("Slice carguments not valid. They have to be either int, slice or None.")
if k<len(self.cores):
raise InvalidArguments('Slice size is invalid.')
else:
# if len(index) != len(self.__N):
# raise InvalidArguments('Slice size is invalid.')
if index[0] == Ellipsis:
index = (slice(None, None, None),)*(len(self.__N)-len(index)+1) + index[1:]
elif index[-1] == Ellipsis:
index = index[:-1] + (slice(None, None, None),)*(len(self.__N)-len(index)+1)
cores_new = []
k = 0
for i,idx in enumerate(index):
if isinstance(idx,slice):
cores_new.append(self.cores[k][:,idx,:])
k+=1
elif idx==None:
# extend the tensor
tmp = tn.eye(cores_new[-1].shape[-1] if len(cores_new)!=0 else 1, device = self.cores[0].device, dtype = self.cores[0].dtype)[:,None,:]
cores_new.append(tmp)
exclude.append(i)
elif isinstance(idx, int):
cores_new.append(tn.reshape(self.cores[k][:,idx,:],[self.__R[k],-1,self.R[k+1]]))
k+=1
else:
raise InvalidArguments("Slice carguments not valid. They have to be either int, slice or None.")
if k<len(self.cores):
raise InvalidArguments('Slice size is invalid.')
sliced = TT(cores_new)
sliced.reduce_dims(exclude)
if (sliced.is_ttm == False and sliced.N == [1]) or (sliced.is_ttm and sliced.N == [1] and sliced.M == [1]):
sliced = tn.squeeze(sliced.cores[0])
# cores = None
elif isinstance(index,int):
# tensor is 1d and one element is retrived
if len(self.__N) == 1:
sliced = self.cores[0][0,index,0]
else:
raise InvalidArguments('Invalid slice. Tensor is not 1d.')
## TODO
elif index == Ellipsis:
# return a copy of the tensor
sliced = TT([c.clone() for c in self.cores])
elif isinstance(index,slice):
# tensor is 1d and one slice is extracted
if len(self.__N) == 1:
sliced = TT(self.cores[0][:,index,:])
else:
raise InvalidArguments('Invalid slice. Tensor is not 1d.')
## TODO
else:
raise InvalidArguments('Invalid slice.')
return sliced
def __pow__(self,other):
"""
Computes the tensor Kronecker product.
This implements the "**" operator.
If None is provided as input the reult is the other tensor.
If A is N_1 x ... x N_d and B is M_1 x ... x M_p, then kron(A,B) is N_1 x ... x N_d x M_1 x ... x M_p
Args:
first (torchtt.TT or None): first argument.
second (torchtt.TT or none): second argument.
Raises:
IncompatibleTypes: Incompatible data types (make sure both are either TT-matrices or TT-tensors).
InvalidArguments: Invalid arguments.
Returns:
torchtt.TT: the result.
"""
result = kron(self,other)
return result
def __rpow__(self,other):
"""
Computes the tensor Kronecker product.
This implements the "**" operator.
If None is provided as input the reult is the other tensor.
If A is N_1 x ... x N_d and B is M_1 x ... x M_p, then kron(A,B) is N_1 x ... x N_d x M_1 x ... x M_p
Args:
first (torchtt.TT or None): first argument.
second (torchtt.TT or none): second argument.
Raises:
IncompatibleTypes: Incompatible data types (make sure both are either TT-matrices or TT-tensors).
InvalidArguments: Invalid arguments.
Returns:
torchtt.TT: the result.
"""
result = kron(self,other)
return result
def __neg__(self):
"""
Returns the negative of a given TT tensor.
This implements the unery operator "-"
Returns:
torchtt.TT: the negated tensor.
"""
cores_new = [c.clone() for c in self.cores]
cores_new[0] = -cores_new[0]
return TT(cores_new)
def __pos__(self):
"""
Implements the unary "+" operator returning a copy o the tensor.
Returns:
torchtt.TT: the tensor clone.
"""
cores_new = [c.clone() for c in self.cores]
return TT(cores_new)
def round(self, eps=1e-12, rmax = sys.maxsize):
"""
Implements the rounding operations within a given tolerance epsilon.
The maximum rank is also provided.
Args:
eps (float, optional): the relative accuracy. Defaults to 1e-12.
rmax (int, optional): the maximum rank. Defaults to the maximum possible integer.
Returns:
torchtt.TT: the result.
"""
# rmax is not list
if not isinstance(rmax,list):
rmax = [1] + len(self.__N)*[rmax] + [1]
# call the round function
tt_cores, R = round_tt(self.cores, self.__R.copy(), eps, rmax,self.__is_ttm)
# creates a new TT and return it
T = TT(tt_cores)
return T
def to_qtt(self, eps = 1e-12, mode_size = 2, rmax = sys.maxsize):
"""
Converts a tensor to the QTT format: N1 x N2 x ... x Nd -> mode_size x mode_size x ... x mode_size.
The product of the mode sizes should be a power of mode_size.
The tensor in QTT can be converted back using the qtt_to_tens() method.
Examples:
```
x = torchtt.random([16,8,64,128],[1,2,10,12,1])
x_qtt = x.to_qtt()
print(x_qtt)
xf = x_qtt.qtt_to_tens(x.N) # a TT-rounding is recommended.
```
Args:
eps (float,optional): the accuracy. Defaults to 1e-12.
mode_size (int, optional): the size of the modes. Defaults to 2.
rmax (int): the maximum rank. Defaults to the maximum possible integer.
Raises:
ShapeMismatch: Only quadratic TTM can be tranformed to QTT.
ShapeMismatch: Reshaping error: check if the dimensions are powers of the desired mode size.
Returns:
torchtt.TT: the resulting reshaped tensor.
"""
cores_new = []
if self.__is_ttm:
shape_new = []
for i in range(len(self.__N)):
if self.__N[i]!=self.__M[i]:
raise ShapeMismatch('Only quadratic TTM can be tranformed to QTT.')
if self.__N[i]==mode_size**int(math.log(self.N[i],mode_size)):
shape_new += [(mode_size,mode_size)]*int(math.log(self.__N[i],mode_size))
else:
raise ShapeMismatch('Reshaping error: check if the dimensions are powers of the desired mode size:\r\ncore size '+str(list(self.cores[i].shape))+' cannot be reshaped.')
result = reshape(self, shape_new, eps, rmax)
else:
for core in self.cores:
if int(math.log(core.shape[1],mode_size))>1:
Nnew = [core.shape[0]*mode_size]+[mode_size]*(int(math.log(core.shape[1],mode_size))-2)+[core.shape[2]*mode_size]
try:
core = tn.reshape(core,Nnew)
except:
raise ShapeMismatch('Reshaping error: check if the dimensions care powers of the desired mode size:\r\ncore size '+str(list(core.shape))+' cannot be reshaped to '+str(Nnew))
cores,_ = to_tt(core,Nnew,eps,rmax,is_sparse=False)
cores_new.append(tn.reshape(cores[0],[-1,mode_size,cores[0].shape[-1]]))
cores_new += cores[1:-1]
cores_new.append(tn.reshape(cores[-1],[cores[-1].shape[0],mode_size,-1]))
else:
cores_new.append(core)
result = TT(cores_new)
return result
def qtt_to_tens(self, original_shape):
"""
Transform a tensor back from QTT.
Args:
original_shape (list): the original shape.
Raises:
InvalidArguments: Original shape must be a list.
ShapeMismatch: Mode sizes do not match.
Returns:
torchtt.TT: the folded tensor.
"""
if not isinstance(original_shape,list):
raise InvalidArguments("Original shape must be a list.")
core = None
cores_new = []
if self.__is_ttm:
pass
else:
k = 0
for c in self.cores:
if core==None:
core = c
so_far = core.shape[1]
else:
core = tn.einsum('...i,ijk->...jk',core,c)
so_far *= c.shape[1]
if so_far==original_shape[k]:
core = tn.reshape(core,[core.shape[0],-1,core.shape[-1]])
cores_new.append(core)
core = None
k += 1
if k!= len(original_shape):
raise ShapeMismatch('Mode sizes do not match.')
return TT(cores_new)
def mprod(self, factor_matrices, mode):
"""
n-mode product.
Args:
factor_matrices (torch.tensor or list[torch.tensor]): either a single matrix is directly provided or a list of matrices for product along multiple modes.
mode (int or list[int]): the mode for the product. If factor_matrices is a torch.tensor then mode is an integer and the multiplication will be performed along a single mode.
If factor_matrices is a list, the mode has to be list[int] of equal size.
Raises:
InvalidArguments: Invalid arguments.
ShapeMismatch: The n-th mode of the tensor must be equal with the 2nd mode of the matrix.
IncompatibleTypes: n-model product works only with TT-tensors and not TT matrices.
Returns:
torchtt.TT: the result
"""
if self.__is_ttm:
raise IncompatibleTypes("n-model product works only with TT-tensors and not TT matrices.")
if isinstance(factor_matrices,list) and isinstance(mode, list):
cores_new = [c.clone() for c in self.cores]
for i in range(len(factor_matrices)):
if cores_new[mode[i]].shape[1] != factor_matrices[i].shape[1]:
raise ShapeMismatch("The n-th mode of the tensor must be equal with the 2nd mode of the matrix.")
cores_new[mode[i]] = tn.einsum('ijk,lj->ilk',cores_new[mode[i]],factor_matrices[i]) # if self.__is_ttm else tn.einsum('ijk,lj->ilk',cores_new[mode[i]],factor_matrices[i])
elif isinstance(mode, int) and tn.is_tensor(factor_matrices):
cores_new = [c.clone() for c in self.cores]
if cores_new[mode].shape[1] != factor_matrices.shape[1]:
raise ShapeMismatch("The n-th mode of the tensor must be equal with the 2nd mode of the matrix.")
cores_new[mode] = tn.einsum('ijk,lj->ilk',cores_new[mode],factor_matrices) # if self.__is_ttm else tn.einsum('ijk,lj->ilk',cores_new[mode],factor_matrices)
else:
raise InvalidArguments('Invalid arguments.')
return TT(cores_new)
def conj(self):
"""
Return the complex conjugate of a tensor in TT format.
Returns:
torchtt.TT: the complex conjugated tensor.
"""
return TT([tn.conj(c) for c in self.cores])
def eye(shape, dtype=tn.float64, device = None):
"""
Construct the TT decomposition of a multidimensional identity matrix.
all the TT ranks are 1.
Args:
shape (list[int]): the shape.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Returns:
torchtt.TT: the one tensor.
"""
shape = list(shape)
cores = [tn.unsqueeze(tn.unsqueeze(tn.eye(s, dtype=dtype, device = device),0),3) for s in shape]
return TT(cores)
def zeros(shape, dtype=tn.float64, device = None):
"""
Construct a tensor that contains only zeros.
the shape can be a list of ints or a list of tuples of ints. The second case creates a TT matrix.
Args:
shape (list[int] or list[tuple[int]]): the shape.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: the one tensor.
"""
if isinstance(shape,list):
d = len(shape)
if isinstance(shape[0],tuple):
# we create a TT-matrix
cores = [tn.zeros([1,shape[i][0],shape[i][1],1],dtype=dtype, device = device) for i in range(d)]
else:
# we create a TT-tensor
cores = [tn.zeros([1,shape[i],1],dtype=dtype, device = device) for i in range(d)]
else:
raise InvalidArguments('Shape must be a list.')
return TT(cores)
def kron(first, second):
"""
Computes the tensor Kronecker product.
If None is provided as input the reult is the other tensor.
If A is N_1 x ... x N_d and B is M_1 x ... x M_p, then kron(A,B) is N_1 x ... x N_d x M_1 x ... x M_p
Args:
first (torchtt.TT or None): first argument.
second (torchtt.TT or none): second argument.
Raises:
IncompatibleTypes: Incompatible data types (make sure both are either TT-matrices or TT-tensors).
InvalidArguments: Invalid arguments.
Returns:
torchtt.TT: the result.
"""
if first == None and isinstance(second,TT):
cores_new = [c.clone() for c in second.cores]
result = TT(cores_new)
elif second == None and isinstance(first,TT):
cores_new = [c.clone() for c in first.cores]
result = TT(cores_new)
elif isinstance(first,TT) and isinstance(second,TT):
if first.is_ttm != second.is_ttm:
raise IncompatibleTypes('Incompatible data types (make sure both are either TT-matrices or TT-tensors).')
# concatenate the result
cores_new = [c.clone() for c in first.cores] + [c.clone() for c in second.cores]
result = TT(cores_new)
else:
raise InvalidArguments('Invalid arguments.')
return result
def ones(shape, dtype=tn.float64, device = None):
"""
Construct a tensor that contains only ones.
the shape can be a list of ints or a list of tuples of ints. The second case creates a TT matrix.
Args:
shape (list[int] or list[tuple[int]]): the shape.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: the one tensor.
"""
if isinstance(shape,list):
d = len(shape)
if d==0:
return TT(None)
else:
if isinstance(shape[0],tuple):
# we create a TT-matrix
cores = [tn.ones([1,shape[i][0],shape[i][1],1],dtype=dtype,device=device) for i in range(d)]
else:
# we create a TT-tensor
cores = [tn.ones([1,shape[i],1],dtype=dtype,device=device) for i in range(d)]
else:
raise InvalidArguments('Shape must be a list.')
return TT(cores)
def xfun(shape, dtype = tn.float64, device = None):
"""
Construct a tensor from 0 to tn.prod(shape)-1.
the shape must be a list of ints.
Args:
shape (list[int]): the shape.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: the xfun tensor.
"""
if isinstance(shape, list):
d = len(shape)
if d == 0:
return TT(None)
if d == 1:
return TT(tn.arange(shape[0], dtype = dtype, device = device))
else:
cores = []
firstcore = tn.ones(1, shape[0], 2, dtype = dtype, device = device)
firstcore[0, :, 0] = tn.arange(shape[0], dtype = dtype, device = device)
cores.append(firstcore)
ni = tn.tensor(shape[0], dtype = dtype, device = device)
for i in range(1, d - 1):
core = tn.zeros((2, shape[i], 2), dtype = dtype, device = device)
for j in range(shape[i]):
core[:, j, :] = tn.eye(2, dtype = dtype, device = device)
core[1, :, 0] = ni * tn.arange(shape[i], dtype = dtype, device = device)
ni *= shape[i]
cores.append(core)
core = tn.ones((2, shape[d - 1], 1), dtype = dtype, device = device)
core[1, :, 0] = ni * tn.arange(shape[d - 1], dtype = dtype, device = device)
cores.append(core)
else:
raise InvalidArguments('Shape must be a list.')
return TT(cores)
def linspace(shape = [1], a = 0.0, b = 0.0, dtype = tn.float64, device = None):
"""
Construct an evenly spaced tensor from a to b with a given shape in TT decomposition.
the shape must be a list of ints.
Args:
shape (list[int]): the shape.
a (float): start value
b (float): end value
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: a linspace tensor.
"""
if isinstance(shape,list):
d = len(shape)
if d == 0:
return TT(None)
if d == 1:
return TT(tn.linspace(shape[0], a, b, dtype = dtype, device = device))
else:
x = xfun(shape)
oneTensor = ones(shape)
N = tn.prod(tn.tensor(shape), dtype = dtype, device = device).numpy()
stepsize = (b - a) * 1.0 / (N - 1)
T = a * oneTensor + x * stepsize
else:
raise InvalidArguments('Shape must be a list.')
return T.round(1e-15)
def arange(shape = [1], a = 0, b = 0, step = 1, dtype = tn.float64, device = None):
"""
Construct a tensor of size (a-b)/step with a given shape, if possible.
the shape must be a list of int and the vector has to fit the shape.
Args:
shape (list[int] or list[tuple[int]]): the shape.
a (float): start value
b (float): end value
step (int): stepsize
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Shape must be a list.
Returns:
torchtt.TT: an evenly spaced tensor within a given interval.
"""
if isinstance(shape,list):
d = len(shape)
if d == 0:
return TT(None)
if d == 1:
return TT(tn.arange(a, b, step, dtype = dtype, device = device))
else:
raise InvalidArguments('Shape must be a list.')
return reshape(TT(tn.arange(a, b, step, dtype = dtype, device = device)), shape)
def random(N, R, dtype = tn.float64, device = None):
"""
Returns a tensor of shape N with random cores of rank R.
Each core is a normal distributed with mean 0 and variance 1.
Check also the method torchtt.randn()for better random tensors in the TT format.
Args:
N (list[int] or list[tuple[int]]): the shape of the tensor. If the elements are tuples of integers, we deal with a TT-matrix.
R (list[int] or int): can be a list if the exact rank is specified or an integer if the maximum rank is secified.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Raises:
InvalidArguments: Check if N and R are right.
Returns:
torchtt.TT: the result.
"""
if isinstance(R,int):
R = [1]+[R]*(len(N)-1)+[1]
elif len(N)+1 != len(R) or R[0] != 1 or R[-1] != 1 or len(N)==0:
raise InvalidArguments('Check if N and R are right.')
cores = []
for i in range(len(N)):
cores.append(tn.randn([R[i],N[i][0],N[i][1],R[i+1]] if isinstance(N[i],tuple) else [R[i],N[i],R[i+1]], dtype = dtype, device = device))
T = TT(cores)
return T
def randn(N, R, var = 1.0, dtype = tn.float64, device = None):
"""
A torchtt.TT tensor of shape N = [N1 x ... x Nd] and rank R is returned.
The entries of the fuill tensor are alomst normal distributed with the variance var.
Args:
N (list[int]): the shape.
R (list[int]): the rank.
var (float, optional): the variance. Defaults to 1.0.
dtype (torch.dtype, optional): the dtype of the returned tensor. Defaults to tn.float64.
device (torch.device, optional): the device where the TT cores are created (None means CPU). Defaults to None.
Returns:
torchtt.TT: the result.
"""
d = len(N)
v1 = var / np.prod(R)
v = v1**(1/d)
cores = [None] * d
for i in range(d):
cores[i] = tn.randn([R[i],N[i][0],N[i][1],R[i+1]] if isinstance(N[i],tuple) else [R[i],N[i],R[i+1]], dtype = dtype, device = device)*np.sqrt(v)
return TT(cores)
def reshape(tens, shape, eps = 1e-16, rmax = sys.maxsize):
"""
Reshapes a torchtt.TT tensor in the TT format.
A rounding is also performed.
Args:
tens (torchtt.TT): the input tensor.
shape (list[int] or list[tuple[int]]): the desired shape. In the case of a TT operator the shape has to be given as list of tuples of ints [(M1,N1),...,(Md,Nd)].
eps (float, optional): relative accuracy. Defaults to 1e-16.
rmax (int, optional): maximum rank. Defaults to the maximum possible integer.
Raises:
ShapeMismatch: The product of modes should remain equal. Check the given shape.
Returns:
torchtt.TT: the resulting tensor.
"""
if tens.is_ttm:
M = []
N = []
for t in shape:
M.append(t[0])
N.append(t[1])
if np.prod(tens.N)!=np.prod(N) or np.prod(tens.M)!=np.prod(M):
raise ShapeMismatch('The product of modes should remain equal. Check the given shape.')
core = tens.cores[0]
cores_new = []
idx = 0
idx_shape = 0
while True:
if core.shape[1] % M[idx_shape] == 0 and core.shape[2] % N[idx_shape] == 0:
if core.shape[1] // M[idx_shape] > 1 and core.shape[2] // N[idx_shape] > 1:
m1 = M[idx_shape]
m2 = core.shape[1] // m1
n1 = N[idx_shape]
n2 = core.shape[2] // n1
r1 = core.shape[0]
r2 = core.shape[-1]
tmp = tn.reshape(core,[r1*m1,m2,n1,n2*r2])
crz,_ = mat_to_tt(tmp, [r1*m1,m2], [n1,n2*r2], eps, rmax)
cores_new.append(tn.reshape(crz[0],[r1,m1,n1,-1]))
core = tn.reshape(crz[1],[-1,m2,n2,r2])
else:
cores_new.append(core+0)
if idx == len(tens.cores)-1:
break
else:
idx+=1
core = tens.cores[idx]
idx_shape += 1
if idx_shape == len(shape):
break
else:
idx += 1
if idx>=len(tens.cores):
break
core = tn.einsum('ijkl,lmno->ijmkno',core,tens.cores[idx])
core = tn.reshape(core,[core.shape[0],core.shape[1]*core.shape[2],-1,core.shape[-1]])
else:
if np.prod(tens.N)!=np.prod(shape):
raise ShapeMismatch('The product of modes should remain equal. Check the given shape.')
core = tens.cores[0]
cores_new = []
idx = 0
idx_shape = 0
while True:
if core.shape[1] % shape[idx_shape] == 0:
if core.shape[1] // shape[idx_shape] > 1:
s1 = shape[idx_shape]
s2 = core.shape[1] // s1
r1 = core.shape[0]
r2 = core.shape[2]
tmp = tn.reshape(core,[r1*s1,s2*r2])
crz,_ = to_tt(tmp,tmp.shape,eps,rmax)
cores_new.append(tn.reshape(crz[0],[r1,s1,-1]))
core = tn.reshape(crz[1],[-1,s2,r2])
else:
cores_new.append(core+0)
if idx == len(tens.cores)-1:
break
else:
idx+=1
core = tens.cores[idx]
idx_shape += 1
if idx_shape == len(shape):
break
else:
idx += 1
if idx>=len(tens.cores):
break
core = tn.einsum('ijk,klm->ijlm',core,tens.cores[idx])
core = tn.reshape(core,[core.shape[0],-1,core.shape[-1]])
return TT(cores_new).round(eps)
def meshgrid(vectors):
"""
Creates a meshgrid of torchtt.TT objects. Similar to numpy.meshgrid or torch.meshgrid.
The input is a list of d torch.tensor vectors of sizes N_1, ... ,N_d
The result is a list of torchtt.TT instances of shapes N1 x ... x Nd.
Args:
vectors (list[torch.tensor]): the vectors (1d tensors).
Returns:
list[TT]: the resulting meshgrid.
"""
Xs = []
dtype = vectors[0].dtype
for i in range(len(vectors)):
lst = [tn.ones((1,v.shape[0],1),dtype=dtype) for v in vectors]
lst[i] = tn.reshape(vectors[i],[1,-1,1])
Xs.append(TT(lst))
return Xs
def dot(a,b,axis=None):
"""
Computes the dot product between 2 tensors in TT format.
If both a and b have identical mode sizes the result is the dot product.
If a and b have inequal mode sizes, the function perform index contraction.
The number of dimensions of a must be greater or equal as b.
The modes of the tensor a along which the index contraction with b is performed are given in axis.
For the compelx case (a,b) = b^H . a.
Examples:
```
a = torchtt.randn([3,4,5,6,7],[1,2,2,2,2,1])
b = torchtt.randn([3,4,5,6,7],[1,2,2,2,2,1])
c = torchtt.randn([3,5,6],[1,2,2,1])
print(torchtt.dot(a,b))
print(torchtt.dot(a,c,[0,2,3]))
```
Args:
a (torchtt.TT): the first tensor.
b (torchtt.TT): the second tensor.
axis (list[int], optional): the mode indices for index contraction. Defaults to None.
Raises:
InvalidArguments: Both operands should be TT instances.
NotImplementedError: Operation not implemented for TT-matrices.
ShapeMismatch: Operands are not the same size.
ShapeMismatch: Number of the modes of the first tensor must be equal with the second.
Returns:
float or torchtt.TT: the result. If no axis index is provided the result is a scalar otherwise a torchtt.TT object.
"""
if not isinstance(a, TT) or not isinstance(b, TT):
raise InvalidArguments('Both operands should be TT instances.')
if axis == None:
# treat first the full dot product
# faster than partial projection
if a.is_ttm or b.is_ttm:
raise NotImplementedError('Operation not implemented for TT-matrices.')
if a.N != b.N:
raise ShapeMismatch('Operands are not the same size.')
result = tn.tensor([[1.0]],dtype = a.cores[0].dtype, device=a.cores[0].device)
for i in range(len(a.N)):
result = tn.einsum('ab,aim,bin->mn',result, a.cores[i], tn.conj(b.cores[i]))
result = tn.squeeze(result)
else:
# partial case
if a.is_ttm or b.is_ttm:
raise NotImplementedError('Operation not implemented for TT-matrices.')
if len(a.N)<len(b.N):
raise ShapeMismatch('Number of the modes of the first tensor must be equal with the second.')
# if a.N[axis] != b.N:
# raise Exception('Dimension mismatch.')
k = 0 # index for the tensor b
cores_new = []
rank_left = 1
for i in range(len(a.N)):
if i in axis:
cores_new.append(tn.conj(b.cores[k]))
rank_left = b.cores[k].shape[2]
k+=1
else:
rank_right = b.cores[k].shape[0] if i+1 in axis else rank_left
cores_new.append(tn.conj(tn.einsum('ik,j->ijk',tn.eye(rank_left,rank_right,dtype=a.cores[0].dtype),tn.ones([a.N[i]],dtype=a.cores[0].dtype))))
result = (a*TT(cores_new)).sum(axis)
return result
def bilinear_form(x,A,y):
"""
Computes the bilinear form x^T A y for TT tensors:
Args:
x (torchtt.TT): the tensors.
A (torchtt.TT): the tensors (must be TT matrix).
y (torchtt.TT): the tensors.
Raises:
InvalidArguments: Inputs must be torchtt.TT instances.
IncompatibleTypes: x and y must be TT tensors and A must be TT matrix.
ShapeMismatch: Check the shapes. Required is x.N == A.M and y.N == A.N.
Returns:
torch.tensor: the result of the bilienar form as tensor with 1 element.
"""
if not isinstance(x,TT) or not isinstance(A,TT) or not isinstance(y,TT):
raise InvalidArguments("Inputs must be torchtt.TT instances.")
if x.is_ttm or y.is_ttm or A.is_ttm==False:
raise IncompatibleTypes("x and y must be TT tensors and A must be TT matrix.")
if x.N != A.M or y.N != A.N:
raise ShapeMismatch("Check the shapes. Required is x.N == A.M and y.N == A.N.")
d = len(x.N)
return bilinear_form_aux(x.cores,A.cores,y.cores,d)
def elementwise_divide(x, y, eps = 1e-12, starting_tensor = None, nswp = 50, kick = 4, local_iterations = 40, resets = 2, preconditioner = None, verbose = False):
"""
Perform the elemntwise division x/y of two tensors in the TT format using the AMEN method.
Use this method if different AMEN arguments are needed.
This method does not check the validity of the inputs.
Args:
x (torchtt.TT or scalar): first tensor (can also be scalar of type float, int, torch.tensor with shape (1)).
y (torchtt.TT): second tensor.
eps (float, optional): relative acccuracy. Defaults to 1e-12.
starting_tensor (torchtt.TT or None, optional): initial guess of the result (None for random initial guess). Defaults to None.
nswp (int, optional): number of iterations. Defaults to 50.
kick (int, optional): size of rank enrichment. Defaults to 4.
local_iterations (int, optional): the number of iterations for the local iterative solver. Defaults to 40.
resets (int, optional): the number of restarts in the GMRES solver. Defaults to 2.
preconditioner (string, optional): Use preconditioner for the local solver (possible vaules None, 'c'). Defaults to None.
verbose (bool, optional): display debug info. Defaults to False.
Returns:
torchtt.TT: the result
"""
cores_new = amen_divide(y,x,nswp,starting_tensor,eps,rmax = 1000, kickrank = kick, local_iterations = local_iterations, resets = resets, verbose=verbose, preconditioner = preconditioner)
return TT(cores_new)
def rank1TT(elements):
"""
Compute the rank 1 TT from a list of vectors (or matrices).
Args:
elements (list[torch.tensor]): the list of vectors (or matrices in case a TT matrix should be created).
Returns:
torchtt.TT: the resulting TT object.
"""
return TT([e[None,...,None] for e in elements])
def numel(tensor):
"""
Return the number of entries needed to store the TT cores for the given tensor.
Args:
tensor (torchtt.TT): the TT representation of the tensor.
Returns:
int: number of floats stored for the TT decomposition.
"""
return sum([tn.numel(tensor.cores[i]) for i in range(len(tensor.N))])
def diag(input):
"""
Creates diagonal TT matrix from TT tensor or extracts the diagonal of a TT matrix:
* If a TT matrix is provided the result is a TT tensor representing the diagonal \( \\mathsf{x}_{i_1...i_d} = \\mathsf{A}_{i_1...i_d,i_1...i_d} \)
* If a TT tensor is provided the result is a diagonal TT matrix with the entries \( \\mathsf{A}_{i_1...i_d,j_1...j_d} = \\mathsf{x}_{i_1...i_d} \\delta_{i_1}^{j_1} \\cdots \\delta_{i_d}^{j_d} \)
Args:
input (TT): the input.
Raises:
InvalidArguments: Input must be a torchtt.TT instance.
Returns:
torchtt.TT: the result.
"""
if not isinstance(input, TT):
raise InvalidArguments("Input must be a torchtt.TT instance.")
if input.is_ttm:
return TT([tn.diagonal(c, dim1 = 1, dim2 = 2) for c in input.cores])
else:
return TT([tn.einsum('ijk,jm->ijmk',c,tn.eye(c.shape[1])) for c in input.cores])
def permute(input, dims, eps = 1e-12):
"""
Permutes the dimensions of the tensor. Works similarily to `torch.permute`.
Works like a bubble sort for both TT tensors and TT matrices.
Examples:
```
x_tt = torchtt.random([5,6,7,8,9],[1,2,3,4,2,1])
xp_tt = torchtt.permute(x_tt, [4,3,2,1,0], 1e-10)
print(xp_tt) # the shape of this tensor should be [9,8,7,6,5]
```
Args:
input (torchtt.TT): the input tensor.
dims (list[int]): the order of the indices in the new tensor.
eps (float, optional): the relative accuracy of the decomposition. Defaults to 1e-12.
Raises:
InvalidArguments: The input must be a TT tensor dims must be a list of integers or a tple of integers.
ShapeMismatch: dims must be the length of the number of dimensions.
Returns:
torch.TT: the resulting tensor.
"""
if not isinstance(input, TT) :
raise InvalidArguments("The input must be a TT tensor dims must be a list of integers or a tple of integers.")
if len(dims) != len(input.N):
raise ShapeMismatch("dims must be the length of the number of dimensions.")
cores, R = rl_orthogonal(input.cores, input.R, input.is_ttm)
d = len(cores)
eps = eps/(d**1.5)
indices = list(range(d))
last_idx = 0
inversions = True
while inversions:
inversions = False
for i in range(d-1):
i1 = indices[i]
i2 = indices[i+1]
if dims.index(i1)>dims.index(i2):
# inverion in the index permutation => the cores must be swapped.
inversions = True
indices[i] = i2
indices[i+1] = i1
# print(indices,' permute ', i1, i2)
last_idx = i
if input.is_ttm:
#reorthonormalize
for k in range(last_idx, i):
Q, R = QR(tn.reshape(cores[k],[cores[k].shape[0]*cores[k].shape[1]*cores[k].shape[2], cores[k].shape[3]]))
R[k+1] = Q.shape[1]
cores[k] = tn.reshape(Q, [cores[k].shape[0], cores[k].shape[1], cores[k].shape[2], -1])
cores[k+1] = tn.einsum('ij,jkl->ikl',R,cores[k+1])
n2 = [cores[i].shape[1], cores[i].shape[2]]
core = tn.einsum('ijkl,lmno->ijkmno',cores[i],cores[i+1])
core = tn.permute(core, [0,3,4,1,2,5])
U,S,V = SVD(tn.reshape(core, [core.shape[0]*core.shape[1]*core.shape[2],-1]))
if S.is_cuda:
r_now = min([rank_chop(S.cpu().numpy(),tn.linalg.norm(S).cpu().numpy()*eps)])
else:
r_now = min([rank_chop(S.numpy(),tn.linalg.norm(S).numpy()*eps)])
US = U[:,:r_now]@tn.diag(S[:r_now])
V = V[:r_now,:]
cores[i] = tn.reshape(US,[cores[i].shape[0],cores[i+1].shape[1],cores[i+1].shape[2],-1])
R[i+1] = cores[i].shape[2]
cores[i+1] = tn.reshape(V, [-1]+ n2 +[cores[i+1].shape[3]])
else:
#reorthonormalize
for k in range(last_idx, i):
Q, R = QR(tn.reshape(cores[k],[cores[k].shape[0]*cores[k].shape[1], cores[k].shape[2]]))
R[k+1] = Q.shape[1]
cores[k] = tn.reshape(Q, [cores[k].shape[0], cores[k].shape[1],-1])
cores[k+1] = tn.einsum('ij,jkl->ikl',R,cores[k+1])
n2 = cores[i].shape[1]
core = tn.einsum('ijk,klm->ijlm',cores[i],cores[i+1])
core = tn.permute(core, [0,2,1,3])
U,S,V = SVD(tn.reshape(core, [core.shape[0]*core.shape[1],-1]))
if S.is_cuda:
r_now = min([rank_chop(S.cpu().numpy(),tn.linalg.norm(S).cpu().numpy()*eps)])
else:
r_now = min([rank_chop(S.numpy(),tn.linalg.norm(S).numpy()*eps)])
US = U[:,:r_now]@tn.diag(S[:r_now])
V = V[:r_now,:]
cores[i] = tn.reshape(US,[cores[i].shape[0],cores[i+1].shape[1],-1])
R[i+1] = cores[i].shape[2]
cores[i+1] = tn.reshape(V, [-1, n2, cores[i+1].shape[2]])
return TT(cores)
def save(tensor, path):
"""
Save a `torchtt.TT` object in a file.
Examples:
```
import torchtt
#generate a TT object
A = torchtt.randn([10,20,30,40,4,5],[1,6,5,4,3,2,1])
# save the TT object
torchtt.save(A,"./test.TT")
# load the TT object
B = torchtt.load("./test.TT")
# the loaded should be the same
print((A-B).norm()/A.norm())
```
Args:
tensor (torchtt.TT): the tensor to be saved.
path (str): the file name.
Raises:
InvalidArguments: First argument must be a torchtt.TT instance.
"""
if not isinstance(tensor, TT):
raise InvalidArguments("First argument must be a torchtt.TT instance.")
if tensor.is_ttm:
dct = {"is_ttm": tensor.is_ttm, "R": tensor.R, "M": tensor.M, "N": tensor.N, "cores": tensor.cores}
tn.save(dct, path)
else:
dct = {"is_ttm": tensor.is_ttm, "R": tensor.R, "N": tensor.N, "cores": tensor.cores}
tn.save(dct, path)
def load(path):
"""
Load a torchtt.TT object from a file.
Examples:
```
import torchtt
#generate a TT object
A = torchtt.randn([10,20,30,40,4,5],[1,6,5,4,3,2,1])
# save the TT object
torchtt.save(A,"./test.TT")
# load the TT object
B = torchtt.load("./test.TT")
# the loaded should be the same
print((A-B).norm()/A.norm())
```
Args:
path (str): the file name.
Returns:
torchtt.TT: the tensor.
"""
dct = tn.load(path)
return TT(dct['cores'])
def cat(tensors, dim = 0):
"""
Concatenate tensors in the TT format along a given dimension `dim`. Only works for TT tensors and not TT matrices.
Examples:
```
import torchtt
import torch
a1 = torchtt.randn((3,4,2,6,7), [1,2,3,4,2,1])
a2 = torchtt.randn((3,4,8,6,7), [1,3,1,7,5,1])
a3 = torchtt.randn((3,4,15,6,7), [1,3,10,2,4,1])
a = torchtt.cat((a1,a2,a3),2)
af = torch.cat((a1.full(), a2.full(),
print(torch.linalg.norm(a.full()-af))
```
Args:
tensors (tuple[TT]): the tensors to be concatenated. Their mode sizes must match for all modex except the concatenating dimension.
dim (int, optional): The dimension to be concatenated after. Defaults to 0.
Raises:
InvalidArguments: Not implemented for tensor matrices.
InvalidArguments: The mode sizes must be the same on the nonconcatenated dimensions for all the provided tensors.
InvalidArguments: The tensors must have the same number of dimensions.
Returns:
torchtt.TT: the result.
"""
if(len(tensors) == 0):
return None
if tensors[0].is_ttm:
raise InvalidArguments("Not implemented for tensor matrices.")
Rs = [tensors[0].R]
for i in range(1, len(tensors)):
if tensors[i].is_ttm:
raise InvalidArguments("Not implemented for tensor matrices.")
if tensors[i].N[:dim] != tensors[0].N[:dim] and tensors[i].N[(dim+1):] != tensors[0].N[(dim+1):]:
raise InvalidArguments("The mode sizes must be the same on the nonconcatenated dimensions for all the provided tensors.")
if len(tensors[i].N) != len(tensors[0].N):
raise InvalidArguments("The tensors must have the same number of dimensions.")
Rs.append(tensors[i].R)
cores = []
if tensors[0].is_ttm:
pass
else:
r_sum = [1]
for i in range(1,len(tensors[0].N)):
r_sum.append(sum([Rs[k][i] for k in range(len(tensors))]))
r_sum.append(1)
for i in range(len(tensors[0].N)):
if i == dim:
n = sum([t.N[dim] for t in tensors])
cores.append(tn.zeros((r_sum[i], n, r_sum[i+1]), device = tensors[0].cores[0].device, dtype = tensors[0].cores[0].dtype))
else:
cores.append(tn.zeros((r_sum[i], tensors[0].N[i], r_sum[i+1]), device = tensors[0].cores[0].device, dtype = tensors[0].cores[0].dtype))
offset1 = 0
offset2 = 0
offset3 = 0
for t in tensors:
if i==dim:
cores[i][offset1:(offset1+t.cores[i].shape[0]),offset2:(offset2+t.cores[i].shape[1]),offset3:(offset3+t.cores[i].shape[2])] = t.cores[i]
if i>0: offset1 += t.cores[i].shape[0]
offset2 += t.cores[i].shape[1]
if i<len(tensors[0].N)-1: offset3 += t.cores[i].shape[2]
else:
cores[i][offset1:(offset1+t.cores[i].shape[0]),:,offset3:(offset3+t.cores[i].shape[2])] = t.cores[i]
if i>0: offset1 += t.cores[i].shape[0]
if i<len(tensors[0].N)-1: offset3 += t.cores[i].shape[2]
#for i in range(len(self.__N)):
# pad1 = (0,0 if i == len(self.__N)-1 else other.R[i+1] , 0,0 , 0,0 if i==0 else other.R[i])
# pad2 = (0 if i == len(self.__N)-1 else self.__R[i+1],0 , 0,0 , 0 if i==0 else self.R[i],0)
# cores.append(tnf.pad(self.cores[i],pad1)+tnf.pad(other.cores[i],pad2))
return TT(cores)
def pad(tensor, padding, value = 0.0):
"""
Pad a tensor in the TT format.
The `padding` argument is a tuple of tuples `((b1, a1), (b2, a2), ... , (bd, ad))`.
Each dimension is padded with `bk` at the beginning and `ak` at the end. The padding value is constant and is given as the argument `value`.
In case of a TT operator, duiagual padding is performed. On the diagonal, the provided `value` is inserted.
Args:
tensor (TT): the tensor to be padded.
padding (tuple(tuple(int))): the paddings.
value (float, optional): the value to pad. Defaults to 0.0.
Raises:
InvalidArguments: The number of paddings should not exceed the number of dimensions of the tensor.
Returns:
TT: the result.
"""
if(len(padding) > len(tensor.N)):
raise InvalidArguments("The number of paddings should not exceed the number of dimensions of the tensor.")
if tensor.is_ttm:
cores = [c.clone() for c in tensor.cores]
for pad,k in zip(reversed(padding),reversed(range(len(tensor.N)))):
cores[k] = tnf.pad(cores[k],(1 if k < len(tensor.N)-1 else 0,1 if k < len(tensor.N)-1 else 0,pad[0],pad[1],pad[0],pad[1],1 if k>0 else 0,1 if k>0 else 0),value = 0)
cores[k][0,:pad[0],:pad[0],0] = value*tn.eye(pad[0], device = cores[k].device, dtype = cores[k].dtype)
cores[k][-1,(pad[0]+tensor.M[k]):,(pad[0]+tensor.N[k]):,-1] = value*tn.eye(pad[1], device = cores[k].device, dtype = cores[k].dtype)
value = 1
else:
rprod = np.prod(tensor.R)
value = value/rprod
cores = [c.clone() for c in tensor.cores]
for pad,k in zip(reversed(padding),reversed(range(len(tensor.N)))):
cores[k] = tnf.pad(cores[k],(0,0,pad[0],pad[1],0,0),value = value)
value = 1
return TT(cores)
| 97,941 | 40.448159 | 297 | py |
torchTT | torchTT-main/torchtt/solvers.py | """
System solvers in the TT format.
"""
import torch as tn
import numpy as np
import torchtt
import datetime
from torchtt._decomposition import QR, SVD, lr_orthogonal, rl_orthogonal
from torchtt._iterative_solvers import BiCGSTAB_reset, gmres_restart
import opt_einsum as oe
from .errors import *
try:
import torchttcpp
_flag_use_cpp = True
except:
import warnings
warnings.warn("\x1B[33m\nC++ implementation not available. Using pure Python.\n\033[0m")
_flag_use_cpp = False
def cpp_enabled():
"""
Is the C++ backend enabled?
Returns:
bool: the flag
"""
return _flag_use_cpp
def _local_product(Phi_right, Phi_left, coreA, core, shape):
"""
Compute local matvec product
Args:
Phi (torch.tensor): right tensor of shape r x R x r.
Psi (torch.tensor): left tensor of shape lp x Rp x lp.
coreA (torch.tensor): current core of A, shape is rp x N x N x r.
x (torch.tensor): the current core of x, shape is rp x N x r.
shape (torch.Size): the shape of x.
Returns:
torch.tensor: the reuslt.
"""
# tme1 = datetime.datetime.now()
# w = tn.einsum('lsr,smnS,LSR,rnR->lmL',Phi_left,coreA,Phi_right,core)
# tme1 = datetime.datetime.now() - tme1
# tme2 = datetime.datetime.now()
w = oe.contract('lsr,smnS,LSR,rnR->lmL',Phi_left,coreA,Phi_right,core)
#tme2 = datetime.datetime.now() - tme2
#print('################### ',tme1,tme2)
# product = tn.reshape(w,[-1])
return w
class _LinearOp():
def __init__(self,Phi_left,Phi_right,coreA,shape,prec):
self.Phi_left = Phi_left
self.Phi_right = Phi_right
self.coreA = coreA
self.shape = shape
self.prec = prec
#tme = datetime.datetime.now()
#self.contraction = oe.contract_expression('lsr,smnS,LSR,rnR->lmL', Phi_left.shape, coreA.shape, Phi_right.shape, shape)
#tme = datetime.datetime.now() - tme
#print('contr ',tme)
# tme = datetime.datetime.now()
if prec == 'c':
# Jl = oe.contract('sd,smnS->dmnS',tn.diagonal(Phi_left,0,0,2),coreA)
Jl = tn.einsum('sd,smnS->dmnS',tn.diagonal(Phi_left,0,0,2),coreA)
Jr = tn.diagonal(Phi_right,0,0,2)
# J = oe.contract('dmnS,SD->dDmn',Jl,Jr)
J = tn.einsum('dmnS,SD->dDmn',Jl,Jr)
self.J = tn.linalg.inv(J)
if shape[0]*shape[1]*shape[2] > 1e5:
self.contraction = oe.contract_expression('lsr,smnS,LSR,raR,rRna->lmL', Phi_left.shape, coreA.shape, Phi_right.shape, shape, self.J.shape)
else:
self.contraction = None
if prec == 'r':
Jl = tn.einsum('sd,smnS->dmnS',tn.diagonal(Phi_left,0,0,2),coreA)
J = tn.einsum('dmnS,LSR->dmLnR',Jl,Phi_right)
sh = J.shape
J = tn.reshape(J, [-1,J.shape[1]*J.shape[2], J.shape[3]*J.shape[4]])
self.J = tn.reshape(tn.linalg.inv(J), sh)
if shape[0]*shape[1]*shape[2] > 2*1e4:
self.contraction = oe.contract_expression('lsr,smnS,LSR,rab,rnRab->lmL', Phi_left.shape, coreA.shape, Phi_right.shape, shape, self.J.shape)
else:
self.contraction = None
# tme = datetime.datetime.now() - tme
# print('contr ',tme)
def apply_prec(self,x):
if self.prec == 'c':
y = tn.einsum('rnR,rRmn->rmR',x,self.J) # no improvement using opt_einsum
return y
elif self.prec == 'r':
y = tn.einsum('rnR,rmLnR->rmL', x, self.J)
return y
def matvec(self, x, apply_prec = True):
if self.prec == None or not apply_prec:
x = tn.reshape(x,self.shape)
# tme = datetime.datetime.now()
#w = oe.contract('lsr,smnS,LSR,rnR->lmL',self.Phi_left,self.coreA,self.Phi_right,x)
# # path = oe.contract_path('lsr,smnS,LSR,rnR->lmL',self.Phi_left,self.coreA,self.Phi_right,x,optimize = 'optimal')
# # print(path[1])
# tme = datetime.datetime.now() - tme
# print('time 1 ',tme)
# tme = datetime.datetime.now()
# #w = tn.einsum('lsr,smnS,LSR,rnR->lmL',self.Phi_left,self.coreA,self.Phi_right,x)
# w = tn.einsum('rnR,lsr->nRls',x,self.Phi_left)
w = tn.tensordot(x,self.Phi_left,([0],[2])) # shape rnR,lsr->nRls
w = tn.tensordot(w,self.coreA,([0,3],[2,0])) # nRls,smnS->RlmS
w = tn.tensordot(w,self.Phi_right,([0,3],[2,1])) # RlmS,LSR->lmL
# w = self.contraction(self.Phi_left,self.coreA,self.Phi_right,x)
# tme = datetime.datetime.now() - tme
# # print('time 2 ',tme)
#elif self.prec == 'c':
#
# x = tn.reshape(x,self.shape)
# w = self.contraction(self.Phi_left, self.coreA, self.Phi_right, x, self.J)
elif self.prec == 'c' or self.prec == 'r':
# tme = datetime.datetime.now()
x = tn.reshape(x,self.shape)
# tme = datetime.datetime.now() - tme
# print('reshape ',tme)
if not self.contraction is None:
#tme = datetime.datetime.now()
w = self.contraction(self.Phi_left, self.coreA, self.Phi_right, x, self.J)
#tme = datetime.datetime.now() - tme
#print('optimized ',tme)
#tme = datetime.datetime.now()
else:
x = self.apply_prec(x)
w = tn.tensordot(x,self.Phi_left,([0],[2])) # shape rnR,lsr->nRls
w = tn.tensordot(w,self.coreA,([0,3],[2,0])) # nRls,smnS->RlmS
w = tn.tensordot(w,self.Phi_right,([0,3],[2,1])) # RlmS,LSR->lmL
#tme = datetime.datetime.now() - tme
#print('custom ',tme)
else:
raise Exception('Preconditioner '+str(self.prec)+' not defined.')
return tn.reshape(w,[-1,1])
def amen_solve(A, b, nswp = 22, x0 = None, eps = 1e-10,rmax = 32768, max_full = 500, kickrank = 4, kick2 = 0, trunc_norm = 'res', local_solver = 1, local_iterations = 40, resets = 2, verbose = False, preconditioner = None, use_cpp = True, use_single_precision = False):
"""
Solve a multilinear system :math:`\\mathsf{Ax} = \\mathsf{b}` in the Tensor Train format.
This method implements the algorithm from `Sergey V Dolgov, Dmitry V Savostyanov, Alternating minimal energy methods for linear systems in higher dimensions <https://epubs.siam.org/doi/abs/10.1137/140953289>`_.
Example:
.. code-block:: python
import torchtt
A = torchtt.random([(4,4),(5,5),(6,6)],[1,2,3,1]) # create random matrix
x = torchtt.random([4,5,6],[1,2,3,1]) # invent a random solution
b = A @ x # compute the rhs
xx = torchtt.solvers.amen_solve(A,b) # solve
print((xx-x).norm()/x.norm()) # error
Args:
A (torchtt.TT): the system matrix in TT.
b (torchtt.TT): the right hand side in TT.
nswp (int, optional): number of sweeps. Defaults to 22.
x0 (torchtt.TT, optional): initial guess. In None is provided the initial guess is a ones tensor. Defaults to None.
eps (float, optional): relative residual. Defaults to 1e-10.
rmax (int, optional): maximum rank. Defaults to 100000.
max_full (int, optional): the maximum size of the core until direct solver is used for the local subproblem. Defaults to 500.
kickrank (int, optional): rank enrichment. Defaults to 4.
kick2 (int, optional): [description]. Defaults to 0.
trunc_norm (str, optional): [description]. Defaults to 'res'.
local_solver (int, optional): choose local iterative solver: 1 for GMRES and 2 for BiCGSTAB. Defaults to 1.
local_iterations (int, optional): number of GMRES iterations for the local subproblems. Defaults to 40.
resets (int, optional): number of resets in the GMRES. Defaults to 2.
verbose (bool, optional): choose whether to display or not additional information during the runtime. Defaults to True.
preconditioner (string, optional): Choose the preconditioner for the local system. Possible values are None, 'c' (central Jacobi preconditioner). No preconditioner is used if None is provided. Defaults to None.
use_cpp (bool, optional): use the C++ implementation of AMEn. Defaults to True.
Raises:
InvalidArguments: A and b must be TT instances.
InvalidArguments: Invalid preconditioner.
IncompatibleTypes: A must be TT-matrix and b must be vector.
ShapeMismatch: A is not quadratic.
ShapeMismatch: Dimension mismatch.
Returns:
torchtt.TT: the approximation of the solution in TT format.
"""
# perform checks of the input data
if not (isinstance(A,torchtt.TT) and isinstance(b,torchtt.TT)):
raise InvalidArguments('A and b must be TT instances.')
if not (A.is_ttm and not b.is_ttm) :
raise IncompatibleTypes('A must be TT-matrix and b must be vector.')
if A.M != A.N:
raise ShapeMismatch('A is not quadratic.')
if A.N != b.N:
raise ShapeMismatch('Dimension mismatch.')
if use_cpp and _flag_use_cpp:
if x0 == None:
x_cores = []
x_R = [1]*(1+len(A.N))
else:
x_cores = x0.cores
x_R = x0.R
if preconditioner == None:
prec = 0
elif preconditioner == 'c':
prec = 1
elif preconditioner == 'r':
prec = 2
else:
raise InvalidArguments("Invalid preconditioner.")
cores = torchttcpp.amen_solve(A.cores, b.cores, x_cores, b.N, A.R, b.R, x_R, nswp, eps, rmax, max_full, kickrank, kick2, local_iterations, resets, verbose, prec)
return torchtt.TT(list(cores))
else:
return _amen_solve_python(A, b, nswp, x0, eps,rmax, max_full, kickrank, kick2, trunc_norm, local_solver, local_iterations, resets, verbose, preconditioner, use_single_precision)
def _amen_solve_python(A, b, nswp = 22, x0 = None, eps = 1e-10,rmax = 1024, max_full = 500, kickrank = 4, kick2 = 0, trunc_norm = 'res', local_solver = 1, local_iterations = 40, resets = 2, verbose = False, preconditioner = None, use_single_precision = False):
if verbose: time_total = datetime.datetime.now()
dtype = A.cores[0].dtype
device = A.cores[0].device
rank_search = 1 # binary rank search
damp = 2
if x0 == None:
x = torchtt.ones(b.N, dtype = dtype, device = device)
else:
x = x0
# kkt = torchttcpp.amen_solve(A.cores, b.cores, x.cores, b.N, A.R, b.R, x.R, nswp, eps, rmax, max_full, kickrank, kick2, local_iterations, resets, verbose, 0)
rA = A.R
N = b.N
d = len(N)
x_cores = x.cores.copy()
rx = x.R.copy()
# check if rmax is a list
if isinstance(rmax, int):
rmax = [1] + (d-1) * [rmax] + [1]
# z cores
rz = [1]+(d-1)*[kickrank+kick2]+[1]
z_tt = torchtt.random(N,rz,dtype,device = device)
z_cores = z_tt.cores
z_cores, rz = rl_orthogonal(z_cores, rz, False)
norms = np.zeros(d)
Phiz = [tn.ones((1,1,1), dtype = dtype, device = device)] + [None] * (d-1) + [tn.ones((1,1,1), dtype = dtype, device = device)] # size is rzk x Rk x rxk
Phiz_b = [tn.ones((1,1), dtype = dtype, device = device)] + [None] * (d-1) + [tn.ones((1,1), dtype = dtype, device = device)] # size is rzk x rzbk
Phis = [tn.ones((1,1,1), dtype = dtype, device = device)] + [None] * (d-1) + [tn.ones((1,1,1), dtype = dtype, device = device)] # size is rk x Rk x rk
Phis_b = [tn.ones((1,1), dtype = dtype, device = device)] + [None] * (d-1) + [tn.ones((1,1), dtype = dtype, device = device)] # size is rk x rbk
last = False
normA = np.ones((d-1))
normb = np.ones((d-1))
normx = np.ones((d-1))
nrmsc = 1.0
if verbose:
print('Starting AMEn solve with:\n\tepsilon: %g\n\tsweeps: %d\n\tlocal iterations: %d\n\tresets: %d\n\tpreconditioner: %s'%(eps, nswp, local_iterations, resets, str(preconditioner)))
print()
for swp in range(nswp):
# right to left orthogonalization
if verbose:
print()
print('Starting sweep %d %s...'%(swp+1,"(last one) " if last else ""))
tme_sweep = datetime.datetime.now()
tme = datetime.datetime.now()
for k in range(d-1,0,-1):
# update the z part (ALS) update
if not last:
if swp > 0:
czA = _local_product(Phiz[k+1],Phiz[k],A.cores[k],x_cores[k],x_cores[k].shape) # shape rzp x N x rz
czy = tn.einsum('br,bnB,BR->rnR',Phiz_b[k],b.cores[k],Phiz_b[k+1]) # shape is rzp x N x rz
cz_new = czy*nrmsc - czA
_,_,vz = SVD(tn.reshape(cz_new,[cz_new.shape[0],-1]))
cz_new = vz[:min(kickrank,vz.shape[0]),:].t() # truncate to kickrank
if k < d-1: # extend cz_new with random elements
cz_new = tn.cat((cz_new,tn.randn((cz_new.shape[0],kick2), dtype = dtype, device = device)),1)
else:
cz_new = tn.reshape(z_cores[k],[rz[k],-1]).t()
qz, _ = QR(cz_new)
rz[k] = qz.shape[1]
z_cores[k] = tn.reshape(qz.t(),[rz[k],N[k],rz[k+1]])
# norm correction ?
if swp > 0: nrmsc = nrmsc * normA[k-1] * normx[k-1] / normb[k-1]
core = tn.reshape(x_cores[k],[rx[k],N[k]*rx[k+1]]).t()
Qmat, Rmat = QR(core)
core_prev = tn.einsum('ijk,km->ijm',x_cores[k-1],Rmat.T)
rx[k] = Qmat.shape[1]
current_norm = tn.linalg.norm(core_prev)
if current_norm>0:
core_prev = core_prev / current_norm
else:
current_norm = 1.0
normx[k-1] = normx[k-1]*current_norm
x_cores[k] = tn.reshape(Qmat.t(),[rx[k],N[k],rx[k+1]])
x_cores[k-1] = core_prev[:]
# update phis (einsum)
# print(x_cores[k].shape,A.cores[k].shape,x_cores[k].shape)
Phis[k] = _compute_phi_bck_A(Phis[k+1],x_cores[k],A.cores[k],x_cores[k])
Phis_b[k] = _compute_phi_bck_rhs(Phis_b[k+1],b.cores[k],x_cores[k])
# ... and norms
norm = tn.linalg.norm(Phis[k])
norm = norm if norm>0 else 1.0
normA[k-1] = norm
Phis[k] = Phis[k] / norm
norm = tn.linalg.norm(Phis_b[k])
norm = norm if norm>0 else 1.0
normb[k-1] = norm
Phis_b[k] = Phis_b[k]/norm
# norm correction
nrmsc = nrmsc * normb[k-1]/ (normA[k-1] * normx[k-1])
# compute phis_z
if not last:
Phiz[k] = _compute_phi_bck_A(Phiz[k+1], z_cores[k], A.cores[k], x_cores[k]) / normA[k-1]
Phiz_b[k] = _compute_phi_bck_rhs(Phiz_b[k+1], b.cores[k], z_cores[k]) / normb[k-1]
# start loop
max_res = 0
max_dx = 0
for k in range(d):
if verbose: print('\tCore',k)
previous_solution = tn.reshape(x_cores[k],[-1,1])
# assemble rhs
rhs = tn.einsum('br,bmB,BR->rmR',Phis_b[k] , b.cores[k] * nrmsc, Phis_b[k+1])
rhs = tn.reshape(rhs,[-1,1])
norm_rhs = tn.linalg.norm(rhs)
#residuals
real_tol = (eps/np.sqrt(d))/damp
# solve the local system
use_full = rx[k]*N[k]*rx[k+1] < max_full
if use_full:
# solve the full system
if verbose: print('\t\tChoosing direct solver (local size %d)....'%(rx[k]*N[k]*rx[k+1]))
Bp = tn.einsum('smnS,LSR->smnRL',A.cores[k],Phis[k+1]) # shape is Rp x N x N x r x r
B = tn.einsum('lsr,smnRL->lmLrnR',Phis[k],Bp)
B = tn.reshape(B,[rx[k]*N[k]*rx[k+1],rx[k]*N[k]*rx[k+1]])
solution_now = tn.linalg.solve(B,rhs)
res_old = tn.linalg.norm(B@previous_solution-rhs)/norm_rhs
res_new = tn.linalg.norm(B@solution_now-rhs)/norm_rhs
else:
# iterative solver
if verbose:
print('\t\tChoosing iterative solver %s (local size %d)....'%('GMRES' if local_solver==1 else 'BiCGSTAB_reset', rx[k]*N[k]*rx[k+1]))
time_local = datetime.datetime.now()
shape_now = [rx[k],N[k],rx[k+1]]
if use_single_precision:
Op = _LinearOp(Phis[k].to(tn.float32),Phis[k+1].to(tn.float32),A.cores[k].to(tn.float32),shape_now, preconditioner)
# solution_now, flag, nit, res_new = BiCGSTAB_reset(Op, rhs,previous_solution[:], eps_local, local_iterations)
eps_local = real_tol * norm_rhs
drhs = Op.matvec(previous_solution.to(tn.float32), False)
drhs = rhs.to(tn.float32)-drhs
eps_local = eps_local / tn.linalg.norm(drhs)
if local_solver == 1:
solution_now, flag, nit = gmres_restart(Op, drhs, previous_solution.to(tn.float32)*0, rhs.shape[0], local_iterations+1, eps_local, resets)
elif local_solver == 2:
solution_now, flag, nit, _ = BiCGSTAB_reset(Op, drhs, previous_solution.to(tn.float32)*0, eps_local, local_iterations)
else:
raise InvalidArguments('Solver not implemented.')
if preconditioner != None:
solution_now = Op.apply_prec(tn.reshape(solution_now,shape_now))
solution_now = tn.reshape(solution_now,[-1,1])
solution_now = previous_solution + solution_now.to(dtype)
res_old = tn.linalg.norm(Op.matvec(previous_solution.to(tn.float32), False).to(dtype)-rhs)/norm_rhs
res_new = tn.linalg.norm(Op.matvec(solution_now.to(tn.float32), False).to(dtype)-rhs)/norm_rhs
else:
Op = _LinearOp(Phis[k],Phis[k+1],A.cores[k],shape_now, preconditioner)
# solution_now, flag, nit, res_new = BiCGSTAB_reset(Op, rhs,previous_solution[:], eps_local, local_iterations)
eps_local = real_tol * norm_rhs
drhs = Op.matvec(previous_solution, False)
drhs = rhs-drhs
eps_local = eps_local / tn.linalg.norm(drhs)
if local_solver == 1:
solution_now, flag, nit = gmres_restart(Op, drhs, previous_solution*0, rhs.shape[0], local_iterations+1, eps_local, resets)
elif local_solver == 2:
solution_now, flag, nit, _ = BiCGSTAB_reset(Op, drhs, previous_solution*0, eps_local, local_iterations)
else:
raise InvalidArguments('Solver not implemented.')
if preconditioner != None:
solution_now = Op.apply_prec(tn.reshape(solution_now,shape_now))
solution_now = tn.reshape(solution_now,[-1,1])
solution_now = previous_solution + solution_now
res_old = tn.linalg.norm(Op.matvec(previous_solution, False)-rhs)/norm_rhs
res_new = tn.linalg.norm(Op.matvec(solution_now, False)-rhs)/norm_rhs
if verbose:
print('\t\tFinished with flag %d after %d iterations with relres %g (from %g)'%(flag,nit,res_new, real_tol * norm_rhs))
time_local = datetime.datetime.now() - time_local
print('\t\tTime needed ',time_local)
# residual damp check
if res_old/res_new < damp and res_new > real_tol:
if verbose: print('WARNING: residual increases. res_old %g, res_new %g, real_tol %g'%(res_old,res_new,real_tol)) # warning (from tt toolbox)
# compute residual and step size
dx = tn.linalg.norm(solution_now-previous_solution)/tn.linalg.norm(solution_now)
if verbose:
print('\t\tdx = %g, res_now = %g, res_old = %g'%(dx,res_new,res_old))
max_dx = max(dx,max_dx)
max_res = max(max_res,res_old)
solution_now = tn.reshape(solution_now,[rx[k]*N[k],rx[k+1]])
# truncation
if k<d-1:
u, s, v = SVD(solution_now)
if trunc_norm == 'fro':
pass
else:
# search for a rank such that offeres small enough residuum
# TODO: binary search?
r = 0
for r in range(u.shape[1]-1,0,-1):
solution = u[:,:r] @ tn.diag(s[:r]) @ v[:r,:] # solution has the same size
# res = tn.linalg.norm(tn.reshape(local_product(Phis[k+1],Phis[k],A.cores[k],tn.reshape(solution,[rx[k],N[k],rx[k+1]]),solution_now.shape),[-1,1]) - rhs)/norm_rhs
if use_full:
res = tn.linalg.norm(B@tn.reshape(solution,[-1,1])-rhs)/norm_rhs
else:
# res = tn.linalg.norm(tn.reshape(local_product(Phis[k+1],Phis[k],A.cores[k],tn.reshape(solution,[rx[k],N[k],rx[k+1]]),solution_now.shape),[-1,1]) - rhs)/norm_rhs
res = tn.linalg.norm(Op.matvec(solution.to(tn.float32 if use_single_precision else dtype)).to(dtype)-rhs)/norm_rhs
if res > max(real_tol*damp,res_new):
break
r += 1
r = min([r,tn.numel(s),rmax[k+1]])
else:
u, v = QR(solution_now)
# v = v.t()
r = u.shape[1]
s = tn.ones(r, dtype = dtype, device = device)
u = u[:,:r]
v = tn.diag(s[:r]) @ v[:r,:]
v = v.t()
if not last:
czA = _local_product(Phiz[k+1], Phiz[k], A.cores[k], tn.reshape(u@v.t(),[rx[k],N[k],rx[k+1]]), [rx[k],N[k],rx[k+1]]) # shape rzp x N x rz
czy = tn.einsum('br,bnB,BR->rnR',Phiz_b[k],b.cores[k]*nrmsc,Phiz_b[k+1]) # shape is rzp x N x rz
cz_new = czy - czA
uz,_,_ = SVD(tn.reshape(cz_new, [rz[k]*N[k],rz[k+1]]))
cz_new = uz[:,:min(kickrank,uz.shape[1])] # truncate to kickrank
if k < d-1: # extend cz_new with random elements
cz_new = tn.cat((cz_new,tn.randn((cz_new.shape[0],kick2), dtype = dtype, device = device)),1)
qz,_ = QR(cz_new)
rz[k+1] = qz.shape[1]
z_cores[k] = tn.reshape(qz,[rz[k],N[k],rz[k+1]])
if k < d-1:
if not last:
left_res = _local_product(Phiz[k+1],Phis[k],A.cores[k],tn.reshape(u@v.t(),[rx[k],N[k],rx[k+1]]),[rx[k],N[k],rx[k+1]])
left_b = tn.einsum('br,bmB,BR->rmR',Phis_b[k],b.cores[k]*nrmsc,Phiz_b[k+1])
uk = left_b - left_res # rx_k x N_k x rz_k+1
u, Rmat = QR(tn.cat((u,tn.reshape(uk,[u.shape[0],-1])),1))
r_add = uk.shape[2]
v = tn.cat((v,tn.zeros([rx[k+1],r_add], dtype = dtype, device = device)), 1)
v = v @ Rmat.t()
r = u.shape[1]
v = tn.einsum('ji,jkl->ikl',v,x_cores[k+1])
# remove norm correction
nrmsc = nrmsc * normA[k] * normx[k] / normb[k]
norm_now = tn.linalg.norm(v)
if norm_now>0:
v = v / norm_now
else:
norm_now = 1.0
normx[k] = normx[k] * norm_now
x_cores[k] = tn.reshape(u, [rx[k],N[k],r])
x_cores[k+1] = tn.reshape(v, [r,N[k+1],rx[k+2]])
rx[k+1] = r
# next phis with norm correction
Phis[k+1] = _compute_phi_fwd_A(Phis[k], x_cores[k], A.cores[k], x_cores[k])
Phis_b[k+1] = _compute_phi_fwd_rhs(Phis_b[k], b.cores[k],x_cores[k])
# ... and norms
norm = tn.linalg.norm(Phis[k+1])
norm = norm if norm>0 else 1.0
normA[k] = norm
Phis[k+1] = Phis[k+1] / norm
norm = tn.linalg.norm(Phis_b[k+1])
norm = norm if norm>0 else 1.0
normb[k] = norm
Phis_b[k+1] = Phis_b[k+1] / norm
# norm correction
nrmsc = nrmsc * normb[k] / ( normA[k] * normx[k] )
# next phiz
if not last:
Phiz[k+1] = _compute_phi_fwd_A(Phiz[k], z_cores[k], A.cores[k], x_cores[k]) / normA[k]
Phiz_b[k+1] = _compute_phi_fwd_rhs(Phiz_b[k], b.cores[k],z_cores[k]) / normb[k]
else:
x_cores[k] = tn.reshape(u@tn.diag(s[:r]) @ v[:r,:].t(),[rx[k],N[k],rx[k+1]])
if verbose:
print('Solution rank is',rx)
print('Maxres ',max_res)
tme_sweep = datetime.datetime.now()-tme_sweep
print('Time ',tme_sweep)
if last:
break
if max_res < eps:
last = True
if verbose:
time_total = datetime.datetime.now() - time_total
print()
print('Finished after' ,swp+1,' sweeps and ',time_total)
print()
normx = np.exp(np.sum(np.log(normx))/d)
for k in range(d):
x_cores[k] = x_cores[k] * normx
x = torchtt.TT(x_cores)
return x
def _compute_phi_bck_A(Phi_now,core_left,core_A,core_right):
"""
Compute the phi backwards for the form dot(left,A @ right)
Args:
Phi_now (torch.tensor): The current phi. Has shape r1_k+1 x R_k+1 x r2_k+1
core_left (torch.tensor): the core on the left. Has shape r1_k x N_k x r1_k+1
core_A (torch.tensor): the core of the matrix. Has shape R_k x N_k x N_k x R_k
core_right (torch.tensor): the core to the right. Has shape r2_k x N_k x r2_k+1
Returns:
torch.tensor: The following phi (backward). Has shape r1_k x R_k x r2_k
"""
# Phip = tn.einsum('ijk,klm->ijlm',core_right,Phi_now)
# Phipp = tn.einsum('ijkl,abjk->ilba',Phip,core_A)
# Phi = tn.einsum('ijkl,akj->ila',Phipp,core_left)
Phi = oe.contract('LSR,lML,sMNS,rNR->lsr',Phi_now,core_left,core_A,core_right)
# print(oe.contract_path('LSR,lML,sMNS,rNR->lsr',Phi_now,core_left,core_A,core_right))
return Phi
def _compute_phi_fwd_A(Phi_now, core_left, core_A, core_right):
"""
Compute the phi forward for the form dot(left,A @ right)
Args:
Phi_now (torch.tensor): The current phi. Has shape r1_k x R_k x r2_k
core_left (torch.tensor): the core on the left. Has shape r1_k x N_k x r1_k+1
core_A (torch.tensor): the core of the matrix. Has shape R_k x N_k x N_k x R_k
core_right (torch.tensor): the core to the right. Has shape r2_k x N_k x r2_k+1
Returns:
torch.tensor: The following phi (backward). Has shape r1_k+1 x R_k+1 x r2_k+1
"""
# Psip = tn.einsum('ijk,kbc->ijbc', Phi_now, core_left) # shape is rk-1 x Rk-1 x Nk x rk
# Psipp = tn.einsum('ijkl,aijd->klad', core_A, Psip) # shape is nk x Rk x rk-1 x rk
# Phi_next= tn.einsum('ijk,jbid->kbd',core_right,Psipp) # shape is rk x Rk x rk
# tme1 = datetime.datetime.now()
# Phi_next = tn.einsum('lsr,lML,sMNS,rNR->LSR',Phi_now,core_left,core_A,core_right)
# tme1 = datetime.datetime.now() - tme1
# tme2 = datetime.datetime.now()
Phi_next = oe.contract('lsr,lML,sMNS,rNR->LSR',Phi_now,core_left,core_A,core_right)
# print(oe.contract_path('lsr,lML,sMNS,rNR->LSR',Phi_now,core_left,core_A,core_right))
# tme2 = datetime.datetime.now() - tme2
# print('\n>>>>>>>>>>>>>>>>>>>>>>>>>>Time1 ',tme1,' time 2', tme2)
return Phi_next
def _compute_phi_bck_rhs(Phi_now,core_b,core):
"""
Args:
Phi_now (torch.tensor): The current phi. Has shape rb_k+1 x r_k+1
core_b (torch.tensor): The current core of the rhs. Has shape rb_k x N_k x rb_k+1
core (torch.tensor): The current core. Has shape r_k x N_k x r_k+1
Returns:
torch.tensor: The backward phi corresponding to the rhs. Has shape rb_k x r_k
"""
#Phit = tn.einsum('ij,abj->iba',Phi_now,core_b)
#Phi = tn.einsum('ijk,kjc->ic',core,Phit)
Phi = oe.contract('BR,bnB,rnR->br',Phi_now,core_b,core)
return Phi
def _compute_phi_fwd_rhs(Phi_now,core_rhs,core):
"""
Args:
Phi_now (torch.tensor): The current phi. Has shape rb_k x r_k
core_b (torch.tensor): The current core of the rhs. Has shape rb_k x N_k+1 x rb_k+1
core (torch.tensor): The current core. Has shape r_k x N_k x r_k+1
Returns:
torch.tensor: The forward computer phi for the rhs. Has shape rb_k+1 x r_k+1
"""
# tmp = tn.einsum('ij,jbc->ibc',Phi_now,core_rhs) # shape rk-1 x Nk x rbk
# Phi_next = tn.einsum('ijk,ijc->kc',core,tmp)
Phi_next = oe.contract('br,bnB,rnR->BR', Phi_now, core_rhs, core)
return Phi_next
| 30,074 | 44.022455 | 269 | py |
torchTT | torchTT-main/torchtt/grad.py | """
Adds AD functionality to torchtt.
"""
import torch as tn
from torchtt import TT
def watch(tens, core_indices = None):
"""
Watch the TT-cores of a given tensor.
Necessary for autograd.
Args:
tens (torchtt.TT): the TT-object to be watched.
core_indices (list[int], optional): The list of cores to be watched. If None is provided, all the cores are watched. Defaults to None.
"""
if core_indices == None:
for i in range(len(tens.cores)):
tens.cores[i].requires_grad_(True)
else:
for i in core_indices:
tens.cores[i].requires_grad_(True)
def watch_list(tensors):
"""
Watch the TT-cores for amultiple tensors givgen in a list.
Necessary for autograd.
Args:
tensors (list[torchtt.TT]): the list of tensors to be wtched for autograd.
"""
for i in range(len(tensors)):
for j in range(len(tensors[i].cores)):
tensors[i].cores[j].requires_grad_(True)
def unwatch(tens):
"""
Cancel the autograd graph recording.
Args:
tens (torchtt.TT): the tensor.
"""
for i in range(len(tens.cores)):
tens.cores[i].requires_grad_(False)
def grad(val, tens, core_indices = None):
"""
Compute the gradient w.r.t. the cores of the given TT-tensor (or TT-matrix).
Args:
val (torch.tensor): Scalar tensor that has to be differentiated.
tens (torchtt.TT): The given tensor.
core_indices (list[int], optional): The list of cores to construct the gradient. If None is provided, all the cores are watched. Defaults to None.
Returns:
list[torch.tensor]: the list of cores representing the derivative of the expression w.r.t the tensor.
"""
val.retain_grad()
val.backward()
if core_indices == None:
cores = [ c.grad for c in tens.cores]
else:
cores = []
for idx in core_indices:
cores.append(tens.cores[idx].grad)
return cores
def grad_list(val, tensors, all_in_one = True):
"""
Compute the gradient w.r.t. the cores of several given TT-tensors (or TT-oeprators).
Watch must be called on all of them beforehand.
Args:
val (torch.tensor): scalar tensor to be differentiated.
tensors (list[torch.TT]): the tensors with respect to which the differentiation is made.
all_in_one (bool, optional): Put all the cores in one list or create a list of lists with the cores. Defaults to True.
Returns:
list[list[torchtt.TT]]: the resulting derivatives.
"""
val.backward()
cores_list = []
if all_in_one:
for t in tensors:
cores_list += [ c.grad for c in t.cores]
else:
for t in tensors:
cores_list.append([ c.grad for c in t.cores])
return cores_list | 2,852 | 30.01087 | 154 | py |
torchTT | torchTT-main/torchtt/_iterative_solvers.py | """
Contains iteratiove solvers like GMRES and BiCGSTAB
@author: ion
"""
import torch as tn
import datetime
import numpy as np
def BiCGSTAB(Op, rhs, x0, eps=1e-6, nmax = 40):
pass
def BiCGSTAB_reset(Op,rhs,x0,eps=1e-6,nmax=40):
"""
BiCGSTAB solver.
"""
# initial residual
r = rhs - Op.matvec(x0)
# choose rop
r0p = tn.rand(r.shape,dtype = x0.dtype)
while tn.dot(r.squeeze(),r0p.squeeze()) == 0:
r0p = tn.rand(r.shape,dtype = x0.dtype)
p = r
x = x0
norm_rhs = tn.linalg.norm(rhs)
r_nn = tn.linalg.norm(r)
nit = 0
for k in range(nmax):
nit += 1
Ap = Op.matvec(p)
alpha = tn.dot(r.squeeze(),r0p.squeeze()) / tn.dot(Ap.squeeze(),r0p.squeeze())
s = r - alpha * Ap
if tn.linalg.norm(s)<eps:
x_n = x+alpha*p
break
As = Op.matvec(s)
omega = tn.dot(As.squeeze(),s.squeeze()) / tn.dot(As.squeeze(),As.squeeze())
x_n = x + alpha*p + omega*s
r_n = s - omega*As
r_nn = tn.linalg.norm(r_n)
# print('\t\t\t',r_nn)
# print(r_nn,eps,norm_rhs)
if r_nn < eps * norm_rhs:
# if tf.linalg.norm(r_n)<eps:
#print(r_n)
break
beta = (alpha/omega)*tn.dot(r_n.squeeze(),r0p.squeeze())/tn.dot(r.squeeze(),r0p.squeeze())
p = r_n+beta*(p-omega*Ap)
if abs(tn.dot(r_n.squeeze(),r0p.squeeze())) < 1e-6:
r0p = r_n
p_n = r_n
# updates
r = r_n
x = x_n
flag = False if k==nmax else True
relres = r_nn/norm_rhs
return x_n,flag,nit,relres
def gmres_restart(LinOp, b, x0 , N, max_iterations, threshold, resets = 4):
iters = 0
converged = False
for r in range(resets):
x0, flag, it = gmres(LinOp,b,x0, N, max_iterations,threshold)
iters += it
if flag:
converged = True
break
return x0, converged, iters
def gmres( LinOp, b, x0, N, max_iterations, threshold):
converged = False
r = b - LinOp.matvec(x0)
b_norm = tn.linalg.norm(b)
error = tn.linalg.norm(r) / b_norm
sn = tn.zeros((max_iterations), dtype = b.dtype, device = b.device)
cs = tn.zeros((max_iterations), dtype = b.dtype, device = b.device)
e1 = tn.zeros((max_iterations+1), dtype = b.dtype, device = b.device)
e1[0] = 1
err = [error]
r_norm = tn.linalg.norm(r)
if not r_norm>0:
return x0, True, 0
Q = tn.zeros((N,max_iterations+1), dtype = b.dtype, device = b.device)
Q[:,0] = r[:,0] / r_norm
# Qs = [r/r_norm]
H = tn.zeros((max_iterations+1,max_iterations), dtype = b.dtype, device = b.device)
beta = r_norm * e1
for k in range(max_iterations):
tme = datetime.datetime.now()
q = LinOp.matvec(Q[:,k])
# q = LinOp.matvec(Qs[k])
tme = datetime.datetime.now() - tme
# print()
# print('time 1',tme, ' k',k,' size ',q.shape[0])
tme = datetime.datetime.now()
for i in range(k+1):
H[i,k] = tn.dot(q.squeeze(),Q[:,i])
q = q - tn.reshape(H[i,k]*Q[:,i],[-1,1])
# H[i,k] = tn.sum(q*Qs[i])
# q = q - H[i,k]*Qs[i]
h = tn.linalg.norm(q)
# tme = datetime.datetime.now() - tme
# print('time 2',tme)
tme = datetime.datetime.now()
q = q / h
H[k+1,k] = h
Q[:,k+1] = q[:,0]
# Qs.append(q.clone())
tme2 = datetime.datetime.now()
h, c, s = apply_givens_rotation(H[:(k+2),k]+0,cs,sn,k+1)
tme2 = datetime.datetime.now() - tme2
H[:(k+2),k] = h
cs[k] = c
sn[k] = s
tme = datetime.datetime.now() - tme
# print('time 3',tme,' time 32', tme2)
beta[k+1] = -sn[k]*beta[k]
beta[k] = cs[k]*beta[k]
error = tn.abs(beta[k+1]) / b_norm
err.append(error)
if error <= threshold:
converged = True
break
y = tn.linalg.solve(H[:k+1,:k+1],tn.reshape(beta[:k+1],[-1,1]))
x = x0 + Q[:,:k+1] @ y
# for i in range(k+1):
# x = x0+Qs[i]*y[i]
return x, converged, k
def apply_givens_rotation(h, cs, sn, k):
dev = h.device
h = h.cpu().numpy()
cs = cs.cpu().numpy()
sn = sn.cpu().numpy()
for i in range(k-1):
temp = cs[i]* h[i] + sn[i] * h[i+1]
h[i+1] = -sn[i] * h[i] + cs[i] * h[i+1]
h[i] = temp
cs_k, sn_k = givens_rotation(h[k-1], h[k])
h[k-1] = cs_k * h[k-1] + sn_k * h[k]
h[k] = 0.0
return tn.tensor(h).to(dev), tn.tensor(cs_k).to(dev), tn.tensor(sn_k).to(dev)
def givens_rotation(v1,v2):
den = np.sqrt(v1**2+v2**2)
return v1/den, v2/den
# class Lop():
# def __init__(self):
# n = 30
# self.n = n # mode size
# self.A = -2*tn.eye(n, dtype = tn.float64)+tn.diag(tn.ones(n-1,dtype = tn.float64),-1)+tn.diag(tn.ones(n-1,dtype = tn.float64),1)
# self.A[0,1] = 0
# self.A[-1,-2] = 0
# self.b = tn.ones((n,1),dtype=tn.float64)
# self.b[0,0] = 0
# self.b[-1,0] = 0
# def matvec(self, x):
# return tn.reshape(self.A@x,[-1,1])
# lop = Lop()
# x,flag,nit = gmres(lop,lop.b,lop.b,lop.n,40,1e-7)
# x_n,flag,nit,relres = BiCGSTAB_reset(lop,lop.b,lop.b) | 5,472 | 26.094059 | 138 | py |
torchTT | torchTT-main/torchtt/interpolate.py | """
Implements the cross approximation methods (DMRG).
"""
import torch as tn
import numpy as np
import torchtt
import datetime
from torchtt._decomposition import QR, SVD, rank_chop, lr_orthogonal, rl_orthogonal
from torchtt._iterative_solvers import BiCGSTAB_reset, gmres_restart
import opt_einsum as oe
def _LU(M):
"""
Perform an LU decomposition and returns L, U and a permutation vector P.
Args:
M (torch.tensor): [description]
Returns:
tuple[torch.tensor,torch.tensor,torch.tensor]: L, U, P
"""
LU,P = tn.linalg.lu_factor(M)
P,L,U = tn.lu_unpack(LU,P) # P transpose or not transpose?
P = P@tn.reshape(tn.arange(P.shape[1],dtype=P.dtype,device=P.device),[-1,1])
# P = tn.reshape(tn.arange(P.shape[1],dtype=P.dtype,device=P.device),[1,-1]) @ P
return L, U, tn.squeeze(P).to(tn.int64)
def _max_matrix(M):
values, indices = M.flatten().topk(1)
indices = [np.unravel_index(i, M.shape) for i in indices]
return values, indices
def _maxvol(M):
"""
Maxvol
Args:
M (torch.tensor): input matrix.
Returns:
torch.tensor: indices of tha maxvol submatrix.
"""
if M.shape[1] >= M.shape[0]:
# more cols than row -> return all the row indices
idx = tn.tensor(range(M.shape[0]),dtype = tn.int64)
return idx
else:
L, U, P = _LU(M)
idx = P[:M.shape[1]]
Msub = M[idx,:]
Mat = tn.linalg.solve(Msub.T,M.T).t()
for i in range(100):
val_max, idx_max = _max_matrix(tn.abs(Mat))
idx_max = idx_max[0]
if val_max<=1+5e-2:
idx = tn.sort(idx)[0]
return idx
Mat += tn.outer(Mat[:,idx_max[1]],Mat[idx[idx_max[1]]]-Mat[idx_max[0],:])/Mat[idx_max[0],idx_max[1]]
idx[idx_max[1]]=idx_max[0]
return idx
def function_interpolate(function, x, eps = 1e-9, start_tens = None, nswp = 20, kick = 2, dtype = tn.float64, verbose = False):
"""
Appication of a nonlinear function on a tensor in the TT format (using DMRG). Two cases are distinguished:
* Univariate interpoaltion:
Let :math:`f:\\mathbb{R}\\rightarrow\\mathbb{R}` be a function and :math:`\\mathsf{x}\\in\\mathbb{R}^{N_1\\times\\cdots\\times N_d}` be a tensor with a known TT approximation.
The goal is to determine the TT approximation of :math:`\\mathsf{y}_{i_1...i_d}=f(\\mathsf{x}_{i_1...i_d})` within a prescribed relative accuracy `eps`.
* Multivariate interpolation
Let :math:`f:\\mathbb{R}\\rightarrow\\mathbb{R}` be a function and :math:`\\mathsf{x}^{(1)},...,\\mathsf{x}^{(d)}\\in\\mathbb{R}^{N_1\\times\\cdots\\times N_d}` be tensors with a known TT approximation. The goal is to determine the TT approximation of :math:`\\mathsf{y}_{i_1...i_d}=f(\\mathsf{x}_{i_1...i_d}^{(1)},...,\\mathsf{x}^{(d)})_{i_1...i_d}` within a prescribed relative accuracy `eps`.
Example:
* Univariate interpolation:
.. code-block:: python
func = lambda t: torch.log(t)
y = tntt.interpolate.function_interpolate(func, x, 1e-9) # the tensor x is chosen such that y has an afforbable low rank structure
* Multivariate interpolation:
.. code-block:: python
xs = tntt.meshgrid([tn.arange(0,n,dtype=torch.float64) for n in N])
func = lambda x: 1/(2+tn.sum(x,1).to(dtype=torch.float64))
z = tntt.interpolate.function_interpolate(func, xs)
Args:
function (Callable): function handle. If the argument `x` is a `torchtt.TT` instance, the the function handle has to be appliable elementwise on torch tensors.
If a list is passed as `x`, the function handle takes as argument a :math:`M\times d` torch.tensor and every of the :math:`M` lines corresponds to an evaluation of the function :math:`f` at a certain tensor entry. The function handle returns a torch tensor of length M.
x (torchtt.TT or list[torchtt.TT]): the argument/arguments of the function.
eps (float, optional): the relative accuracy. Defaults to 1e-9.
start_tens (torchtt.TT, optional): initial approximation of the output tensor (None coresponds to random initialization). Defaults to None.
nswp (int, optional): number of iterations. Defaults to 20.
kick (int, optional): enrichment rank. Defaults to 2.
dtype (torch.dtype, optional): the dtype of the result. Defaults to tn.float64.
verbose (bool, optional): display debug information to the console. Defaults to False.
Returns:
torchtt.TT: the result.
"""
if isinstance(x,list) or isinstance(x,tuple):
eval_mv = True
N = x[0].N
else:
eval_mv = False
N = x.N
device = None
if not eval_mv and len(N)==1:
return torchtt.TT(function(x.full())).to(device)
if eval_mv and len(N)==1:
return torchtt.TT(function(x[0].full())).to(device)
d = len(N)
#random init of the tensor
if start_tens == None:
rank_init = 2
cores = torchtt.random(N,rank_init, dtype, device).cores
rank = [1]+[rank_init]*(d-1)+[1]
else:
rank = start_tens.R.copy()
cores = [c+0 for c in start_tens.cores]
# cores = (ones(N,dtype=dtype)).cores
cores, rank = rl_orthogonal(cores,rank,False)
cores, rank = lr_orthogonal(cores,rank,False)
Mats = []*(d+1)
Ps = [tn.ones((1,1),dtype=dtype,device=device)]+(d-1)*[None] + [tn.ones((1,1),dtype=dtype,device=device)]
# ortho
Rm = tn.ones((1,1),dtype=dtype,device=device)
Idx = [tn.zeros((1,0),dtype=tn.int64)]+(d-1)*[None] + [tn.zeros((0,1),dtype=tn.int64)]
for k in range(d-1,0,-1):
tmp = tn.einsum('ijk,kl->ijl',cores[k],Rm)
tmp = tn.reshape(tmp,[rank[k],-1]).t()
core, Rmat = QR(tmp)
rnew = min(N[k]*rank[k+1], rank[k])
Jk = _maxvol(core)
# print(Jk)
tmp = np.unravel_index(Jk[:rnew],(rank[k+1],N[k]))
#if k==d-1:
# idx_new = tn.tensor(tmp[1].reshape([1,-1]))
# else:
idx_new = tn.tensor(np.vstack( ( tmp[1].reshape([1,-1]),Idx[k+1][:,tmp[0]] ) ))
Idx[k] = idx_new+0
Rm = core[Jk,:]
core = tn.linalg.solve(Rm.T,core.T)
Rm = (Rm@Rmat).t()
cores[k] = tn.reshape(core,[rnew,N[k],rank[k+1]])
core = tn.reshape(core,[-1,rank[k+1]]) @ Ps[k+1]
core = tn.reshape(core,[rank[k],-1]).t()
_,Ps[k] = QR(core)
cores[0] = tn.einsum('ijk,kl->ijl',cores[0],Rm)
# for p in Ps:
# print(p)
# for i in Idx:
# print(i)
# return
n_eval = 0
for swp in range(nswp):
max_err = 0.0
if verbose:
print('Sweep %d: '%(swp+1))
#left to right
for k in range(d-1):
if verbose: print('\tLR supercore %d,%d'%(k+1,k+2))
I1 = tn.reshape(tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.arange(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),[-1,1])
I2 = tn.reshape(tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.arange(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),[-1,1])
I3 = Idx[k][tn.kron(tn.kron(tn.arange(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),:]
I4 = Idx[k+2][:,tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.arange(rank[k+2],dtype=tn.int64)))].t()
eval_index = tn.concat((I3, I1, I2, I4),1)
eval_index = tn.reshape(eval_index,[-1,d]).to(dtype=tn.int64)
if verbose: print('\t\tnumber evaluations',eval_index.shape[0])
if eval_mv:
ev = tn.zeros((eval_index.shape[0],0),dtype = dtype)
for j in range(d):
core = x[j].cores[0][0,eval_index[:,0],:]
for i in range(1,d):
core = tn.einsum('ij,jil->il',core,x[j].cores[i][:,eval_index[:,i],:])
core = tn.reshape(core[...,0],[-1,1])
ev = tn.hstack((ev,core))
supercore = tn.reshape(function(ev),[rank[k],N[k],N[k+1],rank[k+2]])
n_eval += core.shape[0]
else:
core = x.cores[0][0,eval_index[:,0],:]
for i in range(1,d):
core = tn.einsum('ij,jil->il',core,x.cores[i][:,eval_index[:,i],:])
core = core[...,0]
supercore = tn.reshape(function(core),[rank[k],N[k],N[k+1],rank[k+2]])
n_eval += core.shape[0]
# multiply with P_k left and right
supercore = tn.einsum('ij,jklm,mn->ikln',Ps[k],supercore.to(dtype=dtype),Ps[k+2])
rank[k] = supercore.shape[0]
rank[k+2] = supercore.shape[3]
supercore = tn.reshape(supercore,[supercore.shape[0]*supercore.shape[1],-1])
# split the super core with svd
U,S,V = SVD(supercore)
rnew = rank_chop(S.cpu().numpy(),tn.linalg.norm(S).cpu().numpy()*eps/np.sqrt(d-1))+1
rnew = min(S.shape[0],rnew)
U = U[:,:rnew]
S = S[:rnew]
V = V[:rnew,:]
# print('kkt new',tn.linalg.norm(supercore-U@tn.diag(S)@V))
# kick the rank
V = tn.diag(S) @ V
UK = tn.randn((U.shape[0],kick), dtype = dtype, device = device)
U, Rtemp = QR( tn.cat( (U,UK) , 1) )
radd = Rtemp.shape[1] - rnew
if radd>0:
V = tn.cat( (V,tn.zeros((radd,V.shape[1]), dtype = dtype, device = device)) , 0 )
V = Rtemp @ V
# print('kkt new',tn.linalg.norm(supercore-U@V))
# compute err (dx)
super_prev = tn.einsum('ijk,kmn->ijmn',cores[k],cores[k+1])
super_prev = tn.einsum('ij,jklm,mn->ikln',Ps[k],super_prev,Ps[k+2])
err = tn.linalg.norm(supercore.flatten()-super_prev.flatten())/tn.linalg.norm(supercore)
max_err = max(max_err,err)
# update the rank
if verbose:
print('\t\trank updated %d -> %d, local error %e'%(rank[k+1],U.shape[1],err))
rank[k+1] = U.shape[1]
U = tn.linalg.solve(Ps[k],tn.reshape(U,[rank[k],-1]))
V = tn.linalg.solve(Ps[k+2].t(),tn.reshape(V,[rank[k+1]*N[k+1],rank[k+2]]).t()).t()
# U = tn.einsum('ij,jkl->ikl',tn.linalg.inv(Ps[k]),tn.reshape(U,[rank[k],N[k],-1]))
# V = tn.einsum('ijk,kl->ijl',tn.reshape(V,[-1,N[k+1],rank[k+2]]),tn.linalg.inv(Ps[k+2]))
V = tn.reshape(V,[rank[k+1],-1])
U = tn.reshape(U,[-1,rank[k+1]])
# split cores
Qmat, Rmat = QR(U)
idx = _maxvol(Qmat)
Sub = Qmat[idx,:]
core = tn.linalg.solve(Sub.T,Qmat.T).t()
core_next = Sub@Rmat@V
cores[k] = tn.reshape(core,[rank[k],N[k],rank[k+1]])
cores[k+1] = tn.reshape(core_next,[rank[k+1],N[k+1],rank[k+2]])
# calc Ps
tmp = tn.einsum('ij,jkl->ikl',Ps[k],cores[k])
_,Ps[k+1] = QR(tn.reshape(tmp,[rank[k]*N[k],rank[k+1]]))
# calc Idx
tmp = np.unravel_index(idx[:rank[k+1]],(rank[k],N[k]))
idx_new = tn.tensor(np.hstack( ( Idx[k][tmp[0],:] , tmp[1].reshape([-1,1]) ) ))
Idx[k+1] = idx_new+0
#right to left
for k in range(d-2,-1,-1):
if verbose: print('\tRL supercore %d,%d'%(k+1,k+2))
I1 = tn.reshape(tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.arange(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),[-1,1])
I2 = tn.reshape(tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.arange(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),[-1,1])
I3 = Idx[k][tn.kron(tn.kron(tn.arange(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),:]
I4 = Idx[k+2][:,tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.arange(rank[k+2],dtype=tn.int64)))].t()
eval_index = tn.concat((I3, I1, I2, I4),1)
eval_index = tn.reshape(eval_index,[-1,d]).to(dtype=tn.int64)
if verbose: print('\t\tnumber evaluations',eval_index.shape[0])
if eval_mv:
ev = tn.zeros((eval_index.shape[0],0),dtype = dtype)
for j in range(d):
core = x[j].cores[0][0,eval_index[:,0],:]
for i in range(1,d):
core = tn.einsum('ij,jil->il',core,x[j].cores[i][:,eval_index[:,i],:])
core = tn.reshape(core[...,0],[-1,1])
ev = tn.hstack((ev,core))
supercore = tn.reshape(function(ev),[rank[k],N[k],N[k+1],rank[k+2]])
n_eval += core.shape[0]
else:
core = x.cores[0][0,eval_index[:,0],:]
for i in range(1,d):
core = tn.einsum('ij,jil->il',core,x.cores[i][:,eval_index[:,i],:])
core = core[...,0]
supercore = tn.reshape(function(core),[rank[k],N[k],N[k+1],rank[k+2]])
n_eval +=core.shape[0]
# multiply with P_k left and right
supercore = tn.einsum('ij,jklm,mn->ikln',Ps[k],supercore.to(dtype=dtype),Ps[k+2])
rank[k] = supercore.shape[0]
rank[k+2] = supercore.shape[3]
supercore = tn.reshape(supercore,[supercore.shape[0]*supercore.shape[1],-1])
# split the super core with svd
U,S,V = SVD(supercore)
rnew = rank_chop(S.cpu().numpy(),tn.linalg.norm(S).cpu().numpy()*eps/np.sqrt(d-1))+1
rnew = min(S.shape[0],rnew)
U = U[:,:rnew]
S = S[:rnew]
V = V[:rnew,:]
# print('kkt new',tn.linalg.norm(supercore-U@tn.diag(S)@V))
#kick the rank
# print('u before', U.shape)
U = U @ tn.diag(S)
VK = tn.randn((kick,V.shape[1]) , dtype=dtype, device = device)
# print('V enrich', V.shape)
V, Rtemp = QR( tn.cat( (V,VK) , 0).t() )
radd = Rtemp.shape[1] - rnew
# print('V after QR',V.shape,Rtemp.shape,radd)
if radd>0:
U = tn.cat( (U,tn.zeros((U.shape[0],radd), dtype = dtype, device = device)) , 1 )
U = U @ Rtemp.T
V = V.t()
# print('kkt new',tn.linalg.norm(supercore-U@V))
# compute err (dx)
super_prev = tn.einsum('ijk,kmn->ijmn',cores[k],cores[k+1])
super_prev = tn.einsum('ij,jklm,mn->ikln',Ps[k],super_prev,Ps[k+2])
err = tn.linalg.norm(supercore.flatten()-super_prev.flatten())/tn.linalg.norm(supercore)
max_err = max(max_err,err)
# update the rank
if verbose:
print('\t\trank updated %d -> %d, local error %e'%(rank[k+1],U.shape[1],err))
rank[k+1] = U.shape[1]
U = tn.linalg.solve(Ps[k],tn.reshape(U,[rank[k],-1]))
V = tn.linalg.solve(Ps[k+2].t(),tn.reshape(V,[rank[k+1]*N[k+1],rank[k+2]]).t()).t()
# U = tn.einsum('ij,jkl->ikl',tn.linalg.inv(Ps[k]),tn.reshape(U,[rank[k],N[k],-1]))
# V = tn.einsum('ijk,kl->ijl',tn.reshape(V,[-1,N[k+1],rank[k+2]]),tn.linalg.inv(Ps[k+2]))
V = tn.reshape(V,[rank[k+1],-1])
U = tn.reshape(U,[-1,rank[k+1]])
# split cores
Qmat, Rmat = QR(V.T)
idx = _maxvol(Qmat)
Sub = Qmat[idx,:]
core_next = tn.linalg.solve(Sub.T,Qmat.T)
core =U@(Sub@Rmat).t()
cores[k] = tn.reshape(core,[rank[k],N[k],-1])
cores[k+1] = tn.reshape(core_next,[-1,N[k+1],rank[k+2]])
# calc Ps
tmp = tn.einsum('ijk,kl->ijl',cores[k+1],Ps[k+2])
_,tmp = QR(tn.reshape(tmp,[rank[k+1],-1]).t())
Ps[k+1] = tmp
# calc Idx
tmp = np.unravel_index(idx[:rank[k+1]],(N[k+1],rank[k+2]))
idx_new = tn.tensor(np.vstack( ( tmp[0].reshape([1,-1]),Idx[k+2][:,tmp[1]] ) ))
Idx[k+1] = idx_new+0
#xxx = TT(cores)
#print('# ',xxx[1,2,3,4])
# exit condition
if max_err<eps:
if verbose: print('Max error %e < %e ----> DONE'%(max_err,eps))
break
else:
if verbose: print('Max error %g'%(max_err))
if verbose:
print('number of function calls ',n_eval)
print()
return torchtt.TT(cores)
def dmrg_cross(function, N, eps = 1e-9, nswp = 10, x_start = None, kick = 2, dtype = tn.float64, device = None, eval_vect = True, verbose = False):
"""
Approximate a tensor in the TT format given that the individual entries are given using a function.
The function is given as a function handle taking as arguments a matrix of integer indices.
Example:
.. code-block:: python
func = lambda I: 1/(2+I[:,0]+I[:,1]+I[:,2]+I[:,3]).to(dtype=torch.float64)
N = [20]*4
x = torchtt.interpolate.dmrg_cross(func, N, eps = 1e-7)
Args:
function (Callable): function handle.
N (list[int]): the shape of the tensor.
eps (float, optional): the relative accuracy. Defaults to 1e-9.
nswp (int, optional): number of iterations. Defaults to 20.
x_start (torchtt.TT, optional): initial approximation of the output tensor (None coresponds to random initialization). Defaults to None.
kick (int, optional): enrichment rank. Defaults to 2.
dtype (torch.dtype, optional): the dtype of the result. Defaults to tn.float64.
device (torch.device, optional): the device where the approximation will be stored. Defaults to None.
eval_vect (bool, optional): not yet implemented. Defaults to True.
verbose (bool, optional): display debug information to the console. Defaults to False.
Returns:
torchtt.TT: the result.
"""
# store the computed values
computed_vals = dict()
d = len(N)
#random init of the tensor
if x_start == None:
rank_init = 2
cores = torchtt.random(N,rank_init, dtype, device).cores
rank = [1]+[rank_init]*(d-1)+[1]
else:
rank = x_start.R.copy()
cores = [c+0 for c in x_start.cores]
# cores = (ones(N,dtype=dtype)).cores
cores, rank = lr_orthogonal(cores,rank,False)
Mats = []*(d+1)
Ps = [tn.ones((1,1),dtype=dtype,device=device)]+(d-1)*[None] + [tn.ones((1,1),dtype=dtype,device=device)]
# ortho
Rm = tn.ones((1,1),dtype=dtype,device=device)
Idx = [tn.zeros((1,0),dtype=tn.int64)]+(d-1)*[None] + [tn.zeros((0,1),dtype=tn.int64)]
for k in range(d-1,0,-1):
tmp = tn.einsum('ijk,kl->ijl',cores[k],Rm)
tmp = tn.reshape(tmp,[rank[k],-1]).t()
core, Rmat = QR(tmp)
rnew = min(N[k]*rank[k+1], rank[k])
Jk = _maxvol(core)
# print(Jk)
tmp = np.unravel_index(Jk[:rnew],(rank[k+1],N[k]))
#if k==d-1:
# idx_new = tn.tensor(tmp[1].reshape([1,-1]))
# else:
idx_new = tn.tensor(np.vstack( ( tmp[1].reshape([1,-1]),Idx[k+1][:,tmp[0]] ) ))
Idx[k] = idx_new+0
Rm = core[Jk,:]
core = tn.linalg.solve(Rm.T,core.T)
# core = tn.linalg.solve(Rm,core.T)
Rm = (Rm@Rmat).t()
# core = core.t()
cores[k] = tn.reshape(core,[rnew,N[k],rank[k+1]])
core = tn.reshape(core,[-1,rank[k+1]]) @ Ps[k+1]
core = tn.reshape(core,[rank[k],-1]).t()
_,Ps[k] = QR(core)
cores[0] = tn.einsum('ijk,kl->ijl',cores[0],Rm)
# for p in Ps:
# print(p)
# for i in Idx:
# print(i)
# return
n_eval = 0
for swp in range(nswp):
max_err = 0.0
if verbose:
print('Sweep %d: '%(swp+1))
#left to right
for k in range(d-1):
if verbose: print('\tLR supercore %d,%d'%(k+1,k+2))
I1 = tn.reshape(tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.arange(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),[-1,1])
I2 = tn.reshape(tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.arange(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),[-1,1])
I3 = Idx[k][tn.kron(tn.kron(tn.arange(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),:]
I4 = Idx[k+2][:,tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.arange(rank[k+2],dtype=tn.int64)))].t()
eval_index = tn.concat((I3, I1, I2, I4),1)
eval_index = tn.reshape(eval_index,[-1,d]).to(dtype=tn.int64)
if verbose: print('\t\tnumber evaluations',eval_index.shape[0])
if eval_vect:
supercore = tn.reshape(function(eval_index),[rank[k],N[k],N[k+1],rank[k+2]])
n_eval += eval_index.shape[0]
# multiply with P_k left and right
supercore = tn.einsum('ij,jklm,mn->ikln',Ps[k],supercore.to(dtype=dtype),Ps[k+2])
rank[k] = supercore.shape[0]
rank[k+2] = supercore.shape[3]
supercore = tn.reshape(supercore,[supercore.shape[0]*supercore.shape[1],-1])
# split the super core with svd
U,S,V = SVD(supercore)
rnew = rank_chop(S.cpu().numpy(),tn.linalg.norm(S).cpu().numpy()*eps/np.sqrt(d-1))+1
rnew = min(S.shape[0],rnew)
U = U[:,:rnew]
S = S[:rnew]
V = V[:rnew,:]
# print('kkt new',tn.linalg.norm(supercore-U@tn.diag(S)@V))
# kick the rank
V = tn.diag(S) @ V
UK = tn.randn((U.shape[0],kick), dtype = dtype, device = device)
U, Rtemp = QR( tn.cat( (U,UK) , 1) )
radd = U.shape[1] - rnew
if radd>0:
V = tn.cat( (V,tn.zeros((radd,V.shape[1]), dtype = dtype, device = device)) , 0 )
V = Rtemp @ V
# print('kkt new',tn.linalg.norm(supercore-U@V))
# compute err (dx)
super_prev = tn.einsum('ijk,kmn->ijmn',cores[k],cores[k+1])
super_prev = tn.einsum('ij,jklm,mn->ikln',Ps[k],super_prev,Ps[k+2])
err = tn.linalg.norm(supercore.flatten()-super_prev.flatten())/tn.linalg.norm(supercore)
max_err = max(max_err,err)
# update the rank
if verbose:
print('\t\trank updated %d -> %d, local error %e'%(rank[k+1],U.shape[1],err))
rank[k+1] = U.shape[1]
U = tn.linalg.solve(Ps[k],tn.reshape(U,[rank[k],-1]))
V = tn.linalg.solve(Ps[k+2].t(),tn.reshape(V,[rank[k+1]*N[k+1],rank[k+2]]).t()).t()
# U = tn.einsum('ij,jkl->ikl',tn.linalg.inv(Ps[k]),tn.reshape(U,[rank[k],N[k],-1]))
# V = tn.einsum('ijk,kl->ijl',tn.reshape(V,[-1,N[k+1],rank[k+2]]),tn.linalg.inv(Ps[k+2]))
V = tn.reshape(V,[rank[k+1],-1])
U = tn.reshape(U,[-1,rank[k+1]])
# split cores
Qmat, Rmat = QR(U)
idx = _maxvol(Qmat)
Sub = Qmat[idx,:]
core = tn.linalg.solve(Sub.T,Qmat.T).t()
core_next = Sub@Rmat@V
cores[k] = tn.reshape(core,[rank[k],N[k],rank[k+1]])
cores[k+1] = tn.reshape(core_next,[rank[k+1],N[k+1],rank[k+2]])
# calc Ps
tmp = tn.einsum('ij,jkl->ikl',Ps[k],cores[k])
_,Ps[k+1] = QR(tn.reshape(tmp,[rank[k]*N[k],rank[k+1]]))
# calc Idx
tmp = np.unravel_index(idx[:rank[k+1]],(rank[k],N[k]))
idx_new = tn.tensor(np.hstack( ( Idx[k][tmp[0],:] , tmp[1].reshape([-1,1]) ) ))
Idx[k+1] = idx_new+0
#right to left
for k in range(d-2,-1,-1):
if verbose: print('\tRL supercore %d,%d'%(k+1,k+2))
I1 = tn.reshape(tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.arange(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),[-1,1])
I2 = tn.reshape(tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.arange(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),[-1,1])
I3 = Idx[k][tn.kron(tn.kron(tn.arange(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.ones(rank[k+2],dtype=tn.int64))),:]
I4 = Idx[k+2][:,tn.kron(tn.kron(tn.ones(rank[k],dtype=tn.int64), tn.ones(N[k],dtype=tn.int64)), tn.kron(tn.ones(N[k+1],dtype=tn.int64), tn.arange(rank[k+2],dtype=tn.int64)))].t()
eval_index = tn.concat((I3, I1, I2, I4),1)
eval_index = tn.reshape(eval_index,[-1,d]).to(dtype=tn.int64)
if verbose: print('\t\tnumber evaluations',eval_index.shape[0])
if eval_vect:
supercore = tn.reshape(function(eval_index).to(dtype=dtype),[rank[k],N[k],N[k+1],rank[k+2]])
n_eval += eval_index.shape[0]
# multiply with P_k left and right
supercore = tn.einsum('ij,jklm,mn->ikln',Ps[k],supercore.to(dtype=dtype),Ps[k+2])
rank[k] = supercore.shape[0]
rank[k+2] = supercore.shape[3]
supercore = tn.reshape(supercore,[supercore.shape[0]*supercore.shape[1],-1])
# split the super core with svd
U,S,V = SVD(supercore)
rnew = rank_chop(S.cpu().numpy(),tn.linalg.norm(S).cpu().numpy()*eps/np.sqrt(d-1))+1
rnew = min(S.shape[0],rnew)
U = U[:,:rnew]
S = S[:rnew]
V = V[:rnew,:]
# print('kkt new',tn.linalg.norm(supercore-U@tn.diag(S)@V))
#kick the rank
U = U @ tn.diag(S)
VK = tn.randn((kick,V.shape[1]) , dtype=dtype, device = device)
V, Rtemp = QR( tn.cat( (V,VK) , 0).t() )
radd = V.shape[1] - rnew
if radd>0:
U = tn.cat( (U,tn.zeros((U.shape[0],radd), dtype = dtype, device = device)) , 1 )
U = U @ Rtemp.T
V = V.t()
# print('kkt new',tn.linalg.norm(supercore-U@V))
# compute err (dx)
super_prev = tn.einsum('ijk,kmn->ijmn',cores[k],cores[k+1])
super_prev = tn.einsum('ij,jklm,mn->ikln',Ps[k],super_prev,Ps[k+2])
err = tn.linalg.norm(supercore.flatten()-super_prev.flatten())/tn.linalg.norm(supercore)
max_err = max(max_err,err)
# update the rank
if verbose:
print('\t\trank updated %d -> %d, local error %e'%(rank[k+1],U.shape[1],err))
rank[k+1] = U.shape[1]
U = tn.linalg.solve(Ps[k],tn.reshape(U,[rank[k],-1]))
V = tn.linalg.solve(Ps[k+2].t(),tn.reshape(V,[rank[k+1]*N[k+1],rank[k+2]]).t()).t()
# U = tn.einsum('ij,jkl->ikl',tn.linalg.inv(Ps[k]),tn.reshape(U,[rank[k],N[k],-1]))
# V = tn.einsum('ijk,kl->ijl',tn.reshape(V,[-1,N[k+1],rank[k+2]]),tn.linalg.inv(Ps[k+2]))
V = tn.reshape(V,[rank[k+1],-1])
U = tn.reshape(U,[-1,rank[k+1]])
# split cores
Qmat, Rmat = QR(V.T)
idx = _maxvol(Qmat)
Sub = Qmat[idx,:]
core_next = tn.linalg.solve(Sub.T,Qmat.T)
core =U@(Sub@Rmat).t()
cores[k] = tn.reshape(core,[rank[k],N[k],-1])
cores[k+1] = tn.reshape(core_next,[-1,N[k+1],rank[k+2]])
# calc Ps
tmp = tn.einsum('ijk,kl->ijl',cores[k+1],Ps[k+2])
_,tmp = QR(tn.reshape(tmp,[rank[k+1],-1]).t())
Ps[k+1] = tmp
# calc Idx
tmp = np.unravel_index(idx[:rank[k+1]],(N[k+1],rank[k+2]))
idx_new = tn.tensor(np.vstack( ( tmp[0].reshape([1,-1]),Idx[k+2][:,tmp[1]] ) ))
Idx[k+1] = idx_new+0
#xxx = TT(cores)
#print('# ',xxx[1,2,3,4])
# exit condition
if max_err<eps:
if verbose: print('Max error %e < %e ----> DONE'%(max_err,eps))
break
else:
if verbose: print('Max error %g'%(max_err))
if verbose:
print('number of function calls ',n_eval)
print()
return torchtt.TT(cores)
| 29,837 | 43.139053 | 399 | py |
torchTT | torchTT-main/torchtt/cpp.py | """
Module for the C++ backend.
"""
import warnings
try:
import torchttcpp
_cpp_available = True
except:
warnings.warn("\x1B[33m\nC++ implementation not available. Using pure Python.\n\033[0m")
_cpp_available = False
def cpp_avaible():
"""
Return True if C++ backend is available.
Returns:
bool: True if C++ backend is available and False otherwise.
"""
return _cpp_available
| 430 | 16.958333 | 92 | py |
plotnine | plotnine-main/doc/conf.py | #
# plotnine documentation build configuration file, created by
# sphinx-quickstart on Wed Dec 23 22:32:29 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
from pathlib import Path
on_rtd = os.environ.get("READTHEDOCS") == "True"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here.
CUR_PATH = Path(__file__).parent
PROJECT_PATH = CUR_PATH.parent
sys.path.insert(0, str(CUR_PATH))
sys.path.insert(0, str(PROJECT_PATH))
if on_rtd:
from pprint import pprint
from unittest import mock
MOCK_MODULES = []
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
pprint(os.environ)
pprint(sys.path)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "6.1.0"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.insert(0, str(Path.cwd()))
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.autosummary",
"sphinxext.examples_and_gallery",
"sphinxext.inline_code_highlight",
"nbsphinx",
"numpydoc",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "plotnine"
copyright = "2023, Hassan Kibirige"
github_repo_url = f"https://github.com/has2k1/{project}"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
from importlib.metadata import version as _version
finally:
version = _version("plotnine")
# 1. remove +dirty if readthedocs modifies the repo,
# 2. remove the 0.0 version created by setuptools_scm when clone is too shallow
if on_rtd:
import re
p1 = re.compile(r"\.d\d{8}$")
if p1.match(version):
version = p1.sub("", version)
p2 = re.compile(r"^0\.0\.post\d+\+g")
if p2.match(version):
commit = p2.sub("", version)
version = f"Commit: {commit}"
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = [
"_build",
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# https://github.com/ryan-roemer/sphinx-bootstrap-theme
html_theme_options = {
"navbar_title": "plotnine",
"globaltoc_depth": 2,
"globaltoc_includehidden": "true",
"source_link_position": "footer",
"navbar_sidebarrel": False,
"navbar_links": [
("API", "api"),
("Gallery", "gallery"),
("Tutorials", "tutorials"),
],
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "images/logo-32.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "images/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
html_sidebars = {
# Default to no sidebar
"**": [],
# local table of contents for the API page
"api": ["localtoc.html"],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "plotninedoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
"papersize": "a4paper",
# The font size ('10pt', '11pt' or '12pt').
"pointsize": "12pt",
# Additional stuff for the LaTeX preamble.
"preamble": r"""
\usepackage{charter}
\usepackage[defaultsans]{lato}
\usepackage{inconsolata}
""",
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
"index",
"plotnine.tex",
"plotnine Documentation",
"Hassan Kibirige",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "plotnine", "plotnine Documentation", ["Hassan Kibirige"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"plotnine",
"plotnine Documentation",
"Hassan Kibirige",
"plotnine",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = "plotnine"
epub_author = "Hassan Kibirige"
epub_publisher = "Hassan Kibirige"
epub_copyright = "2023, Hassan Kibirige"
# The basename for the epub file. It defaults to the project name.
# epub_basename = 'plotnine'
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to
# save visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"matplotlib": ("https://matplotlib.org/stable", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"scipy": ("https://docs.scipy.org/doc/scipy", None),
"statsmodels": ("https://www.statsmodels.org/stable/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"sklearn": ("https://scikit-learn.org/stable/", None),
"skmisc": ("https://has2k1.github.io/scikit-misc/stable/", None),
"adjustText": ("https://adjusttext.readthedocs.io/en/latest/", None),
"patsy": ("https://patsy.readthedocs.io/en/stable", None),
}
# -- Extension configuration ----------------------------------------------
autodoc_member_order = "bysource"
autosummary_generate = True
extlinks = {
"issue": (f"{github_repo_url}/issues/%s", "#%s"),
"pr": (f"{github_repo_url}/pul/%s", "PR #%s"),
}
# numpydoc
numpydoc_show_class_members = False
numpydoc_class_members_toctree = False
numpydoc_xref_param_type = True
numpydoc_xref_aliases = {
# python
"sequence": ":term:`python:sequence`",
"iterable": ":term:`python:iterable`",
"string": "str",
"tuples": "tuple",
"boolean": "bool",
# numpy
"array": "numpy.ndarray",
"np.array": "numpy.ndarray",
"ndarray": "numpy.ndarray",
"array-like": ":term:`array-like<numpy:array_like>`",
"array_like": ":term:`numpy:array_like`",
# pandas
"dataframe": "pandas.DataFrame",
"DataFrame": "pandas.DataFrame",
"Series": "pandas.Series",
"series": "pandas.Series",
# plotnine
"geom": ":term:`geom`",
"stat": ":term:`stat`",
"position": ":term:`position`",
"expression": ":term:`expression`",
"aes": "plotnine.aes",
"ggplot": "plotnine.ggplot",
"element_line": "plotnine.themes.element_line",
"element_rect": "plotnine.themes.element_rect",
"element_text": "plotnine.themes.element_text",
}
numpydoc_xref_ignore = {"type", "optional", "default"}
def link_to_tutorials():
# Linking to the directory does not work well with
# nbsphinx. We link to the files themselves
from importlib_resources import files as _files
tut_ipynb_dir = _files("plotnine_examples.tutorials")
tut_image_dir = tut_ipynb_dir / "images"
dest_ipynb_dir = _files("plotnine") / "../doc/tutorials"
dest_image_dir = dest_ipynb_dir / "images"
dest_ipynb_dir.mkdir(parents=True, exist_ok=True)
dest_image_dir.mkdir(parents=True, exist_ok=True)
def _make_links(orig_dir, dest_dir, pattern):
# Remove any old files
for old_file in dest_dir.glob(pattern):
old_file.unlink()
# Link files for this build
for file in orig_dir.glob(pattern):
basename = Path(file).name
dest = dest_dir / basename
dest.symlink_to(file)
_make_links(tut_ipynb_dir, dest_ipynb_dir, "*.ipynb")
_make_links(tut_image_dir, dest_image_dir, "*.png")
def setup(app):
link_to_tutorials()
app.add_css_file("custom.css")
| 15,504 | 29.581854 | 79 | py |
VQA_LSTM_CNN | VQA_LSTM_CNN-master/prepro.py | """
Preoricess a raw json dataset into hdf5/json files.
Caption: Use spaCy or NLTK or split function to get tokens.
"""
import copy
from random import shuffle, seed
import sys
import os.path
import argparse
import glob
import numpy as np
from scipy.misc import imread, imresize
import scipy.io
import pdb
import string
import h5py
from nltk.tokenize import word_tokenize
import json
import spacy.en
import re
def tokenize(sentence):
return [i for i in re.split(r"([-.\"',:? !\$#@~()*&\^%;\[\]/\\\+<>\n=])", sentence) if i!='' and i!=' ' and i!='\n'];
def prepro_question(imgs, params):
# preprocess all the question
print 'example processed tokens:'
for i,img in enumerate(imgs):
s = img['question']
if params['token_method'] == 'nltk':
txt = word_tokenize(str(s).lower())
elif params['token_method'] == 'spacy':
txt = [token.norm_ for token in params['spacy'](s)]
else:
txt = tokenize(s)
img['processed_tokens'] = txt
if i < 10: print txt
if i % 1000 == 0:
sys.stdout.write("processing %d/%d (%.2f%% done) \r" % (i, len(imgs), i*100.0/len(imgs)) )
sys.stdout.flush()
return imgs
def build_vocab_question(imgs, params):
# build vocabulary for question and answers.
count_thr = params['word_count_threshold']
# count up the number of words
counts = {}
for img in imgs:
for w in img['processed_tokens']:
counts[w] = counts.get(w, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print 'top words and their counts:'
print '\n'.join(map(str,cw[:20]))
# print some stats
total_words = sum(counts.itervalues())
print 'total words:', total_words
bad_words = [w for w,n in counts.iteritems() if n <= count_thr]
vocab = [w for w,n in counts.iteritems() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print 'number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts))
print 'number of words in vocab would be %d' % (len(vocab), )
print 'number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words)
# lets now produce the final annotation
# additional special UNK token we will use below to map infrequent words to
print 'inserting the special UNK token'
vocab.append('UNK')
for img in imgs:
txt = img['processed_tokens']
question = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
img['final_question'] = question
return imgs, vocab
def apply_vocab_question(imgs, wtoi):
# apply the vocab on test.
for img in imgs:
txt = img['processed_tokens']
question = [w if wtoi.get(w,len(wtoi)+1) != (len(wtoi)+1) else 'UNK' for w in txt]
img['final_question'] = question
return imgs
def get_top_answers(imgs, params):
counts = {}
for img in imgs:
ans = img['ans']
counts[ans] = counts.get(ans, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print 'top answer and their counts:'
print '\n'.join(map(str,cw[:20]))
vocab = []
for i in range(params['num_ans']):
vocab.append(cw[i][1])
return vocab[:params['num_ans']]
def encode_question(imgs, params, wtoi):
max_length = params['max_length']
N = len(imgs)
label_arrays = np.zeros((N, max_length), dtype='uint32')
label_length = np.zeros(N, dtype='uint32')
question_id = np.zeros(N, dtype='uint32')
question_counter = 0
for i,img in enumerate(imgs):
question_id[question_counter] = img['ques_id']
label_length[question_counter] = min(max_length, len(img['final_question'])) # record the length of this sequence
question_counter += 1
for k,w in enumerate(img['final_question']):
if k < max_length:
label_arrays[i,k] = wtoi[w]
return label_arrays, label_length, question_id
def encode_answer(imgs, atoi):
N = len(imgs)
ans_arrays = np.zeros(N, dtype='uint32')
for i, img in enumerate(imgs):
ans_arrays[i] = atoi[img['ans']]
return ans_arrays
def encode_mc_answer(imgs, atoi):
N = len(imgs)
mc_ans_arrays = np.zeros((N, 18), dtype='uint32')
for i, img in enumerate(imgs):
for j, ans in enumerate(img['MC_ans']):
mc_ans_arrays[i,j] = atoi.get(ans, 0)
return mc_ans_arrays
def filter_question(imgs, atoi):
new_imgs = []
for i, img in enumerate(imgs):
if atoi.get(img['ans'],len(atoi)+1) != len(atoi)+1:
new_imgs.append(img)
print 'question number reduce from %d to %d '%(len(imgs), len(new_imgs))
return new_imgs
def get_unqiue_img(imgs):
count_img = {}
N = len(imgs)
img_pos = np.zeros(N, dtype='uint32')
for img in imgs:
count_img[img['img_path']] = count_img.get(img['img_path'], 0) + 1
unique_img = [w for w,n in count_img.iteritems()]
imgtoi = {w:i+1 for i,w in enumerate(unique_img)} # add one for torch, since torch start from 1.
for i, img in enumerate(imgs):
img_pos[i] = imgtoi.get(img['img_path'])
return unique_img, img_pos
def main(params):
if params['token_method'] == 'spacy':
print 'loading spaCy tokenizer for NLP'
params['spacy'] = spacy.en.English(data_dir=params['spacy_data'])
imgs_train = json.load(open(params['input_train_json'], 'r'))
imgs_test = json.load(open(params['input_test_json'], 'r'))
# get top answers
top_ans = get_top_answers(imgs_train, params)
atoi = {w:i+1 for i,w in enumerate(top_ans)}
itoa = {i+1:w for i,w in enumerate(top_ans)}
# filter question, which isn't in the top answers.
imgs_train = filter_question(imgs_train, atoi)
seed(123) # make reproducible
shuffle(imgs_train) # shuffle the order
# tokenization and preprocessing training question
imgs_train = prepro_question(imgs_train, params)
# tokenization and preprocessing testing question
imgs_test = prepro_question(imgs_test, params)
# create the vocab for question
imgs_train, vocab = build_vocab_question(imgs_train, params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
ques_train, ques_length_train, question_id_train = encode_question(imgs_train, params, wtoi)
imgs_test = apply_vocab_question(imgs_test, wtoi)
ques_test, ques_length_test, question_id_test = encode_question(imgs_test, params, wtoi)
# get the unique image for train and test
unique_img_train, img_pos_train = get_unqiue_img(imgs_train)
unique_img_test, img_pos_test = get_unqiue_img(imgs_test)
# get the answer encoding.
A = encode_answer(imgs_train, atoi)
MC_ans_test = encode_mc_answer(imgs_test, atoi)
# create output h5 file for training set.
N = len(imgs_train)
f = h5py.File(params['output_h5'], "w")
f.create_dataset("ques_train", dtype='uint32', data=ques_train)
f.create_dataset("ques_length_train", dtype='uint32', data=ques_length_train)
f.create_dataset("answers", dtype='uint32', data=A)
f.create_dataset("question_id_train", dtype='uint32', data=question_id_train)
f.create_dataset("img_pos_train", dtype='uint32', data=img_pos_train)
f.create_dataset("ques_test", dtype='uint32', data=ques_test)
f.create_dataset("ques_length_test", dtype='uint32', data=ques_length_test)
f.create_dataset("question_id_test", dtype='uint32', data=question_id_test)
f.create_dataset("img_pos_test", dtype='uint32', data=img_pos_test)
f.create_dataset("MC_ans_test", dtype='uint32', data=MC_ans_test)
f.close()
print 'wrote ', params['output_h5']
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['ix_to_ans'] = itoa
out['unique_img_train'] = unique_img_train
out['unique_img_test'] = unique_img_test
json.dump(out, open(params['output_json'], 'w'))
print 'wrote ', params['output_json']
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_train_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--input_test_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--num_ans', required=True, type=int, help='number of top answers for the final classifications.')
parser.add_argument('--output_json', default='data_prepro.json', help='output json file')
parser.add_argument('--output_h5', default='data_prepro.h5', help='output h5 file')
# options
parser.add_argument('--max_length', default=26, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.')
parser.add_argument('--word_count_threshold', default=0, type=int, help='only words that occur more than this number of times will be put in vocab')
parser.add_argument('--num_test', default=0, type=int, help='number of test images (to withold until very very end)')
parser.add_argument('--token_method', default='nltk', help='token method. set "spacy" for unigram paraphrasing')
parser.add_argument('--spacy_data', default='spacy_data', help='location of spacy NLP model')
parser.add_argument('--batch_size', default=10, type=int)
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print 'parsed input parameters:'
print json.dumps(params, indent = 2)
main(params)
| 9,689 | 35.156716 | 153 | py |
JOELIN | JOELIN-master/train_shared.py | #!/usr/bin/env python3
import os
import csv
import time
import copy
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn import metrics
from prediction import new_data_predict
from transformers import (
AutoTokenizer, AutoConfig,
AdamW, get_linear_schedule_with_warmup)
import torch
from model import MultiTaskBertForCovidEntityClassificationShare
from preprocessing.utils import (
make_dir_if_not_exists, format_time, log_list, plot_train_loss,
saveToJSONFile)
from torch.utils import data as torch_data
from prediction import prediction_to_submission
from preprocessing.loadData import loadData, loadNewData
import logging
import json
import h5py
EVENT_LIST = ['positive', 'negative', 'can_not_test', 'death', 'cure_and_prevention']
pd.set_option('display.max_columns', None)
################### util ####################
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output_dir", help="Path to the output directory", type=str, default='./results/global_1')
parser.add_argument("-rt", "--retrain", help="True if the model needs to be retrained", action="store_false", default=True)
parser.add_argument("-bs", "--batch_size", help="Train batch size for BERT model", type=int, default=16)
parser.add_argument("-e", "--n_epochs", help="Number of epochs", type=int, default=30)
parser.add_argument("-E", "--embedding_type",
help=("Type of Embedding, 0 for last, 1 for Sum L4 and 2 for concat L4,"
"3 for multihead concat"),
type=int, default=0)
parser.add_argument("-lr", "--learning_rate", help="learning rate", type=float, default=2e-5)
parser.add_argument("-d", "--device", help="Device for running the code", type=str, default="cuda")
parser.add_argument("-pm", "--pretrained_model", help="pretrained model version", type=str, default="digitalepidemiologylab/covid-twitter-bert")
parser.add_argument("-w", "--weighting", help="weighting for classes, 10 means 0.1:1, 5 means 0.2:1", type=int, default=10)
parser.add_argument("-fl", "--f1_loss", help="using F1 loss", type=str_to_bool, default=False)
parser.add_argument("-bu", "--batch_size_update", type=int, default=32)
parser.add_argument("-new", "--new", type=str_to_bool, default=True)
# Add Data Clean Options
parser.add_argument("-ca", "--clean_all", action="store_true", default=True)
return parser.parse_args()
def str_to_bool(value):
if isinstance(value, bool):
return value
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError('{} is not a valid boolean value'.format(value))
################### training functions ####################
# dataset
# testing/val script
def evaluation(model, dataloader, device, threshold=0.5, save_sample_flg=False):
model.eval()
total_preds, total_labels, total_batch_data = prepare_for_prediction(
model, dataloader, device)
if type(threshold) in {float, np.float32, np.float64}:
prediction = (total_preds > threshold).astype(int)
else:
prediction = np.vstack(
[(total_preds[:,subtask_idx] > threshold[subtask_idx]).astype(int)
for subtask_idx in range(len(model.subtasks))]).T
if save_sample_flg:
save_sample_to_file(prediction, total_labels, total_batch_data,
model.subtasks, "shared")
# Calculating metrics
precision = np.array(
[metrics.precision_score(total_labels[:,idx], prediction[:,idx], zero_division=0)
for idx in range(total_labels.shape[1])])
recall = np.array(
[metrics.recall_score(total_labels[:,idx], prediction[:,idx], zero_division=0)
for idx in range(total_labels.shape[1])])
f1 = np.array(
[metrics.f1_score(total_labels[:,idx], prediction[:,idx], zero_division=0)
for idx in range(total_labels.shape[1])])
# f1_micro = np.array(
# [metrics.f1_score(total_labels[:,idx], prediction[:,idx], zero_division=0, average='micro')
# for idx in range(total_labels.shape[1])])
confusion_matrix = np.array(
[metrics.confusion_matrix(total_labels[:,idx], prediction[:,idx], labels=[0, 1]).ravel()
for idx in range(total_labels.shape[1])])
# if confusion_matrix.size!=36:
# print('not 36')
classification_report = [
metrics.classification_report(total_labels[:, idx], prediction[:,idx], output_dict=True, zero_division=0)
for idx in range(total_labels.shape[1])]
return precision, recall, f1, prediction, confusion_matrix, classification_report
def save_sample_to_file(total_preds, total_labels, total_batch_data,
subtask_list, event):
save_dir = os.path.join('./test-samples', event)
make_dir_if_not_exists(save_dir)
assert len(subtask_list) == total_preds.shape[1]
for subtask_idx, subtask in enumerate(subtask_list):
filename_dict = {
(0, 0): os.path.join(save_dir, f"{subtask}-TN.jsonl"),
(0, 1): os.path.join(save_dir, f"{subtask}-FP.jsonl"),
(1, 0): os.path.join(save_dir, f"{subtask}-FN.jsonl"),
(1, 1): os.path.join(save_dir, f"{subtask}-TP.jsonl"),
}
batch_data_dict = {(0, 0): [], (0, 1): [], (1, 0): [], (1, 1): []}
for data_idx, (label, pred) in enumerate(
zip(total_labels[:, subtask_idx], total_preds[:, subtask_idx])):
batch_data_dict[(label, pred)].append(total_batch_data[data_idx])
for label_pred_tuple, batch_data_list in batch_data_dict.items():
saveToJSONFile(batch_data_list, filename_dict[label_pred_tuple])
return True
def prepare_for_prediction(model, dataloader, device):
total_preds = []
total_labels = []
total_batch_data = []
with torch.no_grad():
for step, batch in enumerate(dataloader):
input_dict = {"input_ids": batch[0].to(device),
"entity_start_positions": batch[1].to(device),
"y": batch[2].to(device)}
logits, _ = model(**input_dict)
# Post-model subtask information aggregation.
preds = torch.sigmoid(logits)
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
total_labels.append(batch[2].cpu().numpy())
#total_batch_data += batch['batch_data']
total_preds = np.vstack(total_preds)
total_labels = np.vstack(total_labels)
return total_preds, total_labels, total_batch_data
# prediction script
# NOTE We didn't use it
# def make_prediction(model, dataloader, device, threshold=0.5):
# # run model and predict without having "y" label
# # only return the prediction
# model.eval()
# dev_logits = []
# for step, batch in enumerate(dataloader):
# input_dict = {"input_ids": batch["input_ids"].to(device),
# "entity_start_positions": batch["entity_start_positions"].to(device)}
# logits, _ = model(**input_dict)
# # Post-model subtask information aggregation.
# logits = list(logits.detach().cpu().numpy())
# dev_logits += logits
# dev_logits = np.array(dev_logits)
# # Assessment on the results according to labels and logits.
# if type(threshold) == float:
# prediction = (dev_logits > threshold).astype(int)
# else:
# prediction = np.vstack([(dev_logits[:,subtask_idx] > threshold[subtask_idx]).astype(int) for subtask_idx in range(len(model.subtasks))])
# return prediction
def result_to_tsv(results, model_config, taskname, output_dir):
# results = loadFromJSONFile(results_file)
# model_config = loadFromJSONFile(model_config_file)
# We will save the classifier results and model config for each subtask in this dictionary
all_subtasks_results_and_model_configs = dict()
all_task_results_and_model_configs = dict()
all_task_question_tags = dict()
tested_tasks = list()
for key in results:
if key not in ["best_dev_threshold", "best_dev_F1s", "dev_t_F1_P_Rs"]:
tested_tasks.append(key)
results[key]["best_dev_threshold"] = results["best_dev_threshold"][key]
results[key]["best_dev_F1"] = results["best_dev_F1s"][key]
results[key]["dev_t_F1_P_Rs"] = results["dev_t_F1_P_Rs"][key]
all_subtasks_results_and_model_configs[key] = results[key], model_config
all_task_results_and_model_configs[taskname] = all_subtasks_results_and_model_configs
all_task_question_tags[taskname] = tested_tasks
# Read the results for each task and save them in csv file
# results_tsv_save_file = os.path.join("results", "all_experiments_multitask_bert_entity_classifier_results.tsv")
# NOTE: After fixing the USER and URL tags
results_tsv_save_file = os.path.join(output_dir, "result.tsv")
with open(results_tsv_save_file, "a") as tsv_out:
writer = csv.writer(tsv_out, delimiter='\t')
# header = ["Event", "Sub-task", "model name", "accuracy", "CM", "pos. F1", "dev_threshold", "dev_N",
# "dev_F1", "dev_P", "dev_R", "dev_TP", "dev_FP", "dev_FN", "N", "F1", "P", "R", "TP", "FP", "FN"]
# writer.writerow(hesader)
for taskname, question_tags in all_task_question_tags.items():
current_task_results_and_model_configs = all_task_results_and_model_configs[taskname]
for question_tag in question_tags:
results_sub, model_config = current_task_results_and_model_configs[question_tag]
# Extract results_sub
classification_report = results_sub["Classification Report"]
positive_f1_classification_report = classification_report['1.0']['f1-score']
accuracy = classification_report['accuracy']
CM = results_sub["CM"]
# Best threshold and dev F1
best_dev_threshold = results_sub["best_dev_threshold"]
dev_t_F1_P_Rs = results_sub["dev_t_F1_P_Rs"]
best_dev_threshold_index = [info[0] for info in dev_t_F1_P_Rs].index(best_dev_threshold)
# Each entry in dev_t_F1_P_Rs is of the format t, dev_F1, dev_P, dev_R, dev_TP + dev_FN, dev_TP, dev_FP, dev_FN
t, dev_F1, dev_P, dev_R, dev_N, dev_TP, dev_FP, dev_FN = dev_t_F1_P_Rs[best_dev_threshold_index]
# Alan's metrics
F1 = results_sub["F1"]
P = results_sub["P"]
R = results_sub["R"]
TP = results_sub["TP"]
FP = results_sub["FP"]
FN = results_sub["FN"]
N = results_sub["N"]
# Extract model config
model_name = model_config["model"]
row = [taskname, question_tag, model_name, accuracy, CM,
positive_f1_classification_report, best_dev_threshold,
dev_N, dev_F1, dev_P, dev_R, dev_TP, dev_FP, dev_FN,
N, F1, P, R, TP, FP, FN]
writer.writerow(row)
def cal_micro_f1(confusion_matrix):
TP, FP, FN = 0.0, 0.0, 0.0
micro_f1_list = []
len_subtasks = confusion_matrix.shape[0]
for i in range(len_subtasks):
cur_confusion_matrix = confusion_matrix[i,:]
TN_tmp, FP_tmp, FN_tmp, TP_tmp = cur_confusion_matrix.ravel()
TP += TP_tmp
FP += FP_tmp
FN += FN_tmp
if TP + FP == 0:
P = 0.0
else:
P = TP / (TP + FP)
if TP + FN == 0:
R = 0.0
else:
R = TP / (TP + FN)
if P + R == 0:
F1 = 0.0
else:
F1 = 2.0 * P * R / (P + R)
# micro_f1_list.append(F1)
return F1
def post_processing(args, model, valid_dataloader, test_dataloader, output_dir, device, verbose=False):
# Save the model name in the model_config file
model_config = dict()
results = dict()
model_config["model"] = "MultiTaskBertForCovidEntityClassificationShare"
model_config["epochs"] = args.n_epochs
# model_config["device"] = device
# Check different thresholds
#thresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
thresholds = np.arange(0, 1, 0.05)[1:] ## TODO change
#thresholds = np.arange(0, 1, 0.2)[1:]
# Evaluate on different thresholds in dev set
dev_results = []
for i, t in enumerate(thresholds):
print(f"\x1b[2K\rPost Processing {i} / {thresholds.shape[0]}"
f" [{100.0*i/thresholds.shape[0]}%]", end="")
dev_results.append(evaluation(model, valid_dataloader, device, t))
print()
# Finding out the best threshold using dev set
# Getting the f1 scores on different thresholds and in every subtasks
dev_f1_scores = np.array([dev_results[t_idx][2] for t_idx in range(len(thresholds))]) # (thresholds_idx, subtask_idx)
# Calculating the best thresholds indices on different subtasks
best_dev_thresholds_idx = np.argmax(dev_f1_scores, axis=0)
# assert best_dev_thresholds_idx.size == len(model.subtasks, "Expected subtask size: "+str(len(model.subtasks))+" with calculated "+str(best_dev_thresholds_idx.size)+".")
best_dev_F1s = {}
best_dev_thresholds = {}
dev_subtasks_t_F1_P_Rs = {subtask: list() for subtask in model.subtasks}
for subtask_idx in range(best_dev_thresholds_idx.size):
# Find the subtasks using index
subtask = model.subtasks[subtask_idx]
# Find the thresholds of that task using index
best_thresholds_idx = best_dev_thresholds_idx[subtask_idx]
# Find bests
# Find the best F1 of that task using index
best_dev_F1s[subtask] = dev_f1_scores[best_thresholds_idx, subtask_idx]
# Find the best threshold of that task using index
best_dev_thresholds[subtask] = thresholds[best_thresholds_idx]
# Log all results in output formats
for t_idx in range(len(thresholds)):
dev_P = dev_results[t_idx][0][subtask_idx]
dev_R = dev_results[t_idx][1][subtask_idx]
dev_F1 = dev_results[t_idx][2][subtask_idx]
dev_TN, dev_FP, dev_FN, dev_TP = dev_results[t_idx][4][subtask_idx]
dev_subtasks_t_F1_P_Rs[subtask].append((thresholds[t_idx], dev_F1, dev_P, dev_R, dev_TP + dev_FN, dev_TP, dev_FP, dev_FN)) # Copy-pasted from original code
results["best_dev_threshold"] = best_dev_thresholds
results["best_dev_F1s"] = best_dev_F1s
results["dev_t_F1_P_Rs"] = dev_subtasks_t_F1_P_Rs
# Apply to testset
# TODO: Squad style and raw style scores are not evaluated here yet.
logging.info("Testing on test dataset")
# Turn into list
best_thresholds = [best_dev_thresholds[subtask] for subtask in model.subtasks]
# Getting test results
test_result = evaluation(model, test_dataloader, device, best_thresholds,
save_sample_flg=False)
for subtask_idx in range(len(best_thresholds)):
subtask = model.subtasks[subtask_idx]
results[subtask] = {}
P = test_result[0][subtask_idx]
R = test_result[1][subtask_idx]
F1 = test_result[2][subtask_idx]
TN, FP, FN, TP = test_result[4][subtask_idx]
classification_report = test_result[5][subtask_idx]
results[subtask]["CM"] = [TN, FP, FN, TP] # Storing it as list of lists instead of numpy.ndarray
results[subtask]["Classification Report"] = classification_report
results[subtask]["F1"] = F1
results[subtask]["P"] = P
results[subtask]["R"] = R
results[subtask]["TN"] = TN
results[subtask]["TP"] = TP
results[subtask]["FP"] = FP
results[subtask]["FN"] = FN
N = TP + FN
results[subtask]["N"] = N
if verbose:
print(results)
logging.info("New evaluation scores:")
logging.info(f"F1: {F1}")
logging.info(f"Precision: {P}")
logging.info(f"Recall: {R}")
logging.info(f"True Positive: {TP}")
logging.info(f"False Positive: {FP}")
logging.info(f"False Negative: {FN}")
# Save model_config and results
model_config_file = os.path.join(output_dir, "model_config.json")
results_file = os.path.join(output_dir, "results.json")
logging.info(f"Saving model config at {model_config_file}")
saveToJSONFile(model_config, model_config_file)
logging.info(f"Saving results at {results_file}")
saveToJSONFile(results, results_file)
return results, model_config
def output_precision_recall_f1(precision, recall, f1, subtask_list=None):
data = np.vstack([precision, recall, f1])
if subtask_list is None:
subtask_list = [f"Label {i}" for i in np.arange(f1.shape[0])]
table = pd.DataFrame(data, columns=subtask_list, index=["Precision", "Recall", "F1"])
print(table)
return table
def h5_load(filename, data_list, dtype=None, verbose=False):
with h5py.File(filename, 'r') as infile:
data = []
for data_name in data_list:
if dtype is not None:
temp = np.empty(infile[data_name].shape, dtype=dtype)
else:
temp = np.empty(infile[data_name].shape, dtype=infile[data_name].dtype)
infile[data_name].read_direct(temp)
data.append(temp)
if verbose:
print("\n".join(
"{} = {} [{}]".format(data_name, str(real_data.shape), str(real_data.dtype))
for data_name, real_data in zip(data_list, data)
))
print()
return data
class SharedDataset(torch_data.Dataset):
def __init__(self, x, pos, y):
self.x = x
self.pos = pos
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, index):
return self.x[index], self.pos[index], self.y[index]
def my_collate_fn(data):
length = max(d[0].shape[0] for d in data)
x = np.empty([len(data), length], dtype=np.int64)
x.fill(0)
for i, d in enumerate(data):
l = d[0].shape[0]
x[i, 0:l] = d[0]
y = np.vstack([d[2] for d in data])
# build pos
pos = np.hstack([d[1] for d in data])
pos_index = np.arange(pos.shape[0])
pos = np.vstack([pos_index, pos]).T
# turn to torch tensor
x = torch.LongTensor(x)
y = torch.FloatTensor(y)
pos = torch.LongTensor(pos)
return x, pos, y
def train(logging, args):
event_output_dir = os.path.join(args.output_dir)
make_dir_if_not_exists(event_output_dir)
# parameter setting
max_len = 100 # TODO: compute the statistic of the length
# subtask_num = 5 # might need to re-assign value after loading the data
# pretrained_bert_version = "bert-base-cased"
# pretrained_bert_version = "digitalepidemiologylab/covid-twitter-bert"
# pretrained_bert_version = "roberta-base"
pretrained_bert_version = args.pretrained_model
if torch.cuda.is_available():
device = torch.device(args.device)
logging.info(f"Using {args.device} -- GPU{torch.cuda.get_device_name(0)} to train")
else:
device = torch.device("cpu")
logging.info(f"Using CPU to train")
# load data
if args.clean_all:
if args.new:
data_folder = os.path.join("temp", "clean")
else:
data_folder = os.path.join("temp", "normal")
else:
if args.new:
data_folder = os.path.join("temp", "normal")
else:
data_folder = os.path.join("temp", "normal")
subtask_list = []
data = {
"train":{"input_ids":[], "entity_start_positions":[], "labels":[]},
"valid":{"input_ids":[], "entity_start_positions":[], "labels":[]},
"test":{"input_ids":[], "entity_start_positions":[], "labels":[]},
"new":{"input_ids":[], "entity_start_positions":[], "labels":[]},
}
for event in EVENT_LIST:
print(f"loading {event} data")
# load subtask list
with open(os.path.join(data_folder, f"{event}_subtask.json"), 'r', encoding='utf-8') as infile:
event_subtask_list = json.load(infile)
subtask_list.extend(f"{event}_{t}" for t in event_subtask_list)
# load input_ids, positions, and labels
for phase in ["train", "valid", "test", "new"]:
table = pd.read_parquet(os.path.join(data_folder, f"{event}_{phase}.parquet"))
data[phase]["input_ids"].extend(table["input_ids"].to_list())
entity_start_positions, labels = h5_load(os.path.join(data_folder, f"{event}_{phase}.h5"), data_list=["entity_start_positions", "labels"])
data[phase]["entity_start_positions"].append(entity_start_positions)
data[phase]["labels"].append(labels)
for phase in ["train", "valid", "test", "new"]:
data[phase]["entity_start_positions"] = np.hstack(data[phase]["entity_start_positions"])
num_row = sum([l.shape[0] for l in data[phase]["labels"]])
num_col = sum([l.shape[1] for l in data[phase]["labels"]])
new_labels = np.zeros([num_row, num_col], dtype=np.int32)
print(f"{phase} new_labels.shape = {new_labels.shape}")
offset_row = 0
offset_col = 0
for l in data[phase]["labels"]:
new_labels[offset_row:offset_row+l.shape[0], offset_col:offset_col+l.shape[1]] = l
offset_row += l.shape[0]
offset_col += l.shape[1]
data[phase]["labels"] = new_labels
print(subtask_list)
# build dataloader
train_dataloader = torch_data.DataLoader(
SharedDataset(data["train"]["input_ids"], data["train"]["entity_start_positions"], data["train"]["labels"]),
num_workers=2,
batch_size=args.batch_size,
shuffle=True,
collate_fn=my_collate_fn,
)
valid_dataloader = torch_data.DataLoader(
SharedDataset(data["valid"]["input_ids"], data["valid"]["entity_start_positions"], data["valid"]["labels"]),
num_workers=2,
batch_size=128,
shuffle=False,
collate_fn=my_collate_fn,
)
test_dataloader = torch_data.DataLoader(
SharedDataset(data["test"]["input_ids"], data["test"]["entity_start_positions"], data["test"]["labels"]),
num_workers=2,
batch_size=128,
shuffle=False,
collate_fn=my_collate_fn,
)
new_dataloader = torch_data.DataLoader(
SharedDataset(data["new"]["input_ids"], data["new"]["entity_start_positions"], data["new"]["labels"]),
num_workers=2,
batch_size=128,
shuffle=False,
collate_fn=my_collate_fn,
)
# data loading
tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_version)
tokenizer.add_tokens(["<E>", "</E>", "<URL>", "@USER"])
tokenizer.save_pretrained(event_output_dir)
entity_start_token_id = tokenizer.convert_tokens_to_ids(["<E>"])[0]
output_hidden_states = True if args.embedding_type > 0 else False
config = AutoConfig.from_pretrained(
pretrained_bert_version, output_hidden_states=output_hidden_states)
config.subtasks = subtask_list
config.device = args.device
config.f1_loss = args.f1_loss ##f1 loss flag
config.weighting = args.weighting
config.embedding_type = args.embedding_type
model = MultiTaskBertForCovidEntityClassificationShare(pretrained_bert_version, config=config)
model.resize_token_embeddings(len(tokenizer))
model.to(args.device) ## TODO old model move classifier
# init optimizer and scheduler
optimizer = AdamW(model.parameters(), lr=args.learning_rate, eps=1e-8)
total_steps = len(train_dataloader) * args.n_epochs
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
best_model = None
best_score = 0.0
best_epoch = 0
training_stats = []
epoch_train_loss = list()
# start training
logging.info(f"Initiating training loop for {args.n_epochs} epochs...")
total_start_time = time.time()
if args.batch_size_update != -1:
accumulation_steps = args.batch_size_update // args.batch_size
if args.retrain:
for epoch in range(args.n_epochs):
model.train()
pbar = tqdm(train_dataloader)
# Reset the total loss for each epoch.
total_train_loss = 0
avg_train_loss = 0
train_loss_trajectory = list()
dev_log_frequency = 5
n_steps = len(train_dataloader)
dev_steps = int(n_steps / dev_log_frequency)
start_time = time.time()
for step, batch in enumerate(pbar):
input_dict = {"input_ids": batch[0].to(device),
"entity_start_positions": batch[1].to(device),
"y": batch[2].to(device)}
# Forward
# x = x.to(device)
# y = y.to(device)
# subtask = subtask.to(device)
# entity_position = entity_position.to(device)
logits, loss = model(**input_dict)
loss.backward()
total_train_loss += loss.item()
avg_train_loss = total_train_loss/(step+1)
elapsed = format_time(time.time() - start_time)
avg_train_loss = total_train_loss / (step + 1)
# keep track of changing avg_train_loss
train_loss_trajectory.append(avg_train_loss)
pbar.set_description(
f"Epoch:{epoch+1}|Avg. Loss:{avg_train_loss:.4f}|Loss:{loss.item():.4f}")
if args.batch_size_update == -1:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
model.zero_grad()
elif (step+1) % accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
model.zero_grad()
# Update the learning rate.
pbar.update()
# run validation
if (step) % dev_steps == 0:
# model.eval()
precision, recall, f1, prediction, confusion_matrix, classification_report = evaluation(model, valid_dataloader, device=device)
print("\nValidation Result.")
output_precision_recall_f1(precision, recall, f1, model.subtasks)
mf1 = cal_micro_f1(confusion_matrix)
print(f"Micro F1 for each task: {mf1}")
f1 = np.mean(f1)
print(f"Macro F1: {f1}")
f1 = mf1 ## comment this if using macro-F1 for sub task
if f1 >= best_score:
best_model = copy.deepcopy(model.state_dict())
best_score = f1
best_epoch = epoch
model.train()
# Calculate the average loss over all of the batches.
avg_train_loss = total_train_loss / len(train_dataloader)
training_time = format_time(time.time() - start_time)
# Record all statistics from this epoch.
training_stats.append({
'epoch': epoch + 1,
'Training Loss': avg_train_loss,
'Training Time': training_time})
# Save the loss trajectory
epoch_train_loss.append(train_loss_trajectory)
# finish training
print("Finished Training!")
logging.info(f"Training complete with total Train time:{format_time(time.time() - total_start_time)}")
log_list(training_stats)
print(f"Best Validation Score = {best_score} at {best_epoch}")
model.load_state_dict(best_model)
# #model.save_pretrained(event_output_dir)
torch.save(best_model, os.path.join(event_output_dir, "model.bin"))
# Plot the train loss trajectory in a plot
train_loss_trajectory_plot_file = os.path.join(args.output_dir, "train_loss_trajectory.png")
logging.info(f"Saving the Train loss trajectory at {train_loss_trajectory_plot_file}")
plot_train_loss(epoch_train_loss, train_loss_trajectory_plot_file)
# running testing
# (1) probing threshold for each subtask
# (2) save threshold
# precision, recall, f1, prediction = evaluation(model, test_dataloader, device=device)
print("Testing Result without Post-processing")
precision, recall, f1, prediction, confusion_matrix, classification_report = evaluation(model, test_dataloader, device=device)
mf1 = cal_micro_f1(confusion_matrix)
print(f"Micro F1 for each task: {mf1}")
f1 = np.mean(f1)
print(f"Macro F1: {f1}")
print("post_processing!")
results, model_config = post_processing(args, model, valid_dataloader, test_dataloader, event_output_dir, device=device)
print("generating tsv files!")
result_to_tsv(results, model_config, event, args.output_dir)
total_preds, total_labels, total_batch_data = prepare_for_prediction(model,new_dataloader,device)
threshold = results["best_dev_threshold"]
prediction = np.vstack( ### (28898, 33)
[(total_preds[:, i] > threshold[subtask]).astype(int)
for i,subtask in enumerate(model.subtasks)]).T
# total_batch_data = [] ##(28898
start = 0
for event in EVENT_LIST:
total_batch_data = []
with open(os.path.join(data_folder, f"{event}_new_data.json"), 'r', encoding='utf-8') as infile:
total_batch_data_tmp = json.load(infile)
total_batch_data.extend(total_batch_data_tmp)
subtask_list_event = [i for i in model.subtasks if event in i]
# print(subtask_list_event)
preds_index = []
for i,subtask in enumerate(model.subtasks):
if subtask in subtask_list_event:
preds_index.append(i)
# print(i,j)
preds_index = np.array(preds_index)
event_prediction = prediction[start:start+len(total_batch_data), preds_index]
subtask_list_event = [i[len(event)+1:] for i in subtask_list_event]
prediction_to_submission(event_prediction,total_batch_data,event,subtask_list_event)
start += len(total_batch_data)
print("generating submission files!")
# for event in EVENT_LIST:
# new_data_predict(event, logging, args, model)
def main():
args = parse_arg()
make_dir_if_not_exists(args.output_dir)
results_tsv_save_file = os.path.join(args.output_dir, "result.tsv")
if os.path.exists(results_tsv_save_file):
os.remove(results_tsv_save_file)
else:
print("Can not delete the file as it doesn't exists")
with open(results_tsv_save_file, "a") as tsv_out:
writer = csv.writer(tsv_out, delimiter='\t')
header = ["Event", "Sub-task", "model name", "accuracy", "CM", "pos. F1", "dev_threshold", "dev_N",
"dev_F1", "dev_P", "dev_R", "dev_TP", "dev_FP", "dev_FN", "N", "F1", "P", "R", "TP", "FP", "FN"]
writer.writerow(header)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logfile = os.path.join(args.output_dir, "train_output.log")
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.FileHandler(logfile, mode='w'), logging.StreamHandler()])
train(logging, args)
if __name__ == "__main__":
main()
| 32,010 | 41.511288 | 174 | py |
JOELIN | JOELIN-master/extract_data.py | #!/usr/bin/env python3
import os
import csv
import time
import copy
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn import metrics
from pprint import pprint
from transformers import (
BertTokenizerFast, BertPreTrainedModel, BertModel, BertConfig,
AutoTokenizer, AutoModel, AutoConfig,
RobertaConfig, RobertaModel,
AdamW, get_linear_schedule_with_warmup)
import torch
from torch.utils.data import Dataset, DataLoader
from preprocessing.loadData import loadData, loadNewData
from preprocessing.processText import getTextProcessingFuncList
from preprocessing.utils import (
make_dir_if_not_exists, format_time, log_list, plot_train_loss,
saveToJSONFile, loadFromJSONFile)
from model import (MultiTaskBertForCovidEntityClassification,
MultiTaskBertForCovidEntityClassificationNew)
import logging
import h5py
import json
EVENT_LIST = ['positive', 'negative', 'can_not_test', 'death', 'cure_and_prevention']
pd.set_option('display.max_columns', None)
################### util ####################
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output_dir", help="Path to the output directory", type=str, default='./results/debug_chacha')
parser.add_argument("-rt", "--retrain", help="True if the model needs to be retrained", action="store_false", default=True)
parser.add_argument("-bs", "--batch_size", help="Train batch size for BERT model", type=int, default=32)
parser.add_argument("-e", "--n_epochs", help="Number of epochs", type=int, default=8)
parser.add_argument("-lr", "--learning_rate", help="learning rate", type=float, default=2e-5)
parser.add_argument("-d", "--device", help="Device for running the code", type=str, default="cuda")
parser.add_argument("-pm", "--pretrained_model", help="pretrained model version", type=str, default="bert-base-cased")
parser.add_argument("-w", "--weighting", help="weighting for classes, 10 means 0.1:1, 5 means 0.2:1", type=int, default=None)
parser.add_argument("-fl", "--f1_loss", help="using F1 loss", type=str_to_bool, default=False)
parser.add_argument("-bu", "--batch_size_update", type=int, default=-1)
# Add Data Clean Options
parser.add_argument("-ca", "--clean_all", action="store_true", default=False)
# parser.add_argument("--replace_tags", action="store_true", default=False)
# NOTE Placeholder, dependent if we want to move them out from preprocessing
# parser.add_argument(
# '--replace_usernames', default=False, help='Replace usernames with filler',
# action="store_true")
# parser.add_argument('--replace_urls', default=False, help='Replace URLs with filler',
# action="store_true")
# parser.add_argument('--replace_multiple_usernames', default=False,
# help='Replace "@user @user" with "2 <username_filler>"', action="store_true")
# parser.add_argument('--replace_multiple_urls', default=False,
# help='Replace "http://... http://.." with "2 <url_filler>"',
# action="store_true")
parser.add_argument(
'--force_lower_case', default=False, action="store_true",
help='Convert text to lower case (not included in clean_all)')
parser.add_argument(
'--asciify_emojis', default=False, help='Asciifyi emojis', action="store_true")
parser.add_argument(
'--standardize_punctuation', default=False,
help='Standardize (asciifyi, action="store_true") special punctuation',
action="store_true")
parser.add_argument(
'--remove_unicode_symbols', default=False,
help='After preprocessing remove characters which belong to unicode category "So"',
action="store_true")
parser.add_argument(
'--remove_accented_characters', default=False,
help='Remove accents/asciify everything. Probably not recommended.',
action="store_true")
parser.add_argument(
'--replace_tags', default=False,
help='After preprocessing remove characters which belong to unicode category "So"',
action="store_true")
parser.add_argument(
'--new_data', default=True,
help='set to True if processing new data',
action="store_true")
return parser.parse_args()
def str_to_bool(value):
if isinstance(value, bool):
return value
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError('{} is not a valid boolean value'.format(value))
def extract_data(event):
args = parse_arg()
if args.clean_all:
version = "clean"
else:
version = "normal"
pretrained_bert_version = args.pretrained_model
#subtask_list = ['age', 'close_contact', 'employer', 'gender_male', 'gender_female', 'name', 'recent_travel', 'relation', 'when', 'where']
# data loading
#train_dataloader = torch.load("temp/train_dataloader.bin")
#valid_dataloader = torch.load("temp/valid_dataloader.bin")
#test_dataloader = torch.load("temp/test_dataloader.bin")
tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_version)
tokenizer.add_tokens(["<E>", "</E>", "<URL>", "@USER"])
entity_start_token_id = tokenizer.convert_tokens_to_ids(["<E>"])[0]
input_text_processing_func_list = getTextProcessingFuncList(args)
if args.new_data:
(train_dataloader, valid_dataloader, test_dataloader, subtask_list) = loadNewData(
event, entity_start_token_id, tokenizer,
batch_size=args.batch_size, train_ratio=0.6, dev_ratio=0.15,
shuffle_train_data_flg=False, num_workers=0,
input_text_processing_func_list=input_text_processing_func_list)
#print(train_dataloader.dataset)
#print(valid_dataloader.dataset)
#print(test_dataloader.dataset)
#torch.save(train_dataloader, "temp/train_dataloader.bin")
#torch.save(valid_dataloader, "temp/valid_dataloader.bin")
#torch.save(test_dataloader, "temp/test_dataloader.bin")
# extract batch
folder_path = os.path.join("temp", version)
if not os.path.isdir(folder_path):
os.makedirs(folder_path)
if args.new_data:
# for dataloader, phase in zip(
# [train_dataloader, valid_dataloader, test_dataloader],
# ["train", "valid", "test"]
# ):
for dataloader, phase in zip(
[train_dataloader],
["new"]
):
all_input_ids = []
all_entity_start_positions = []
all_labels = []
all_data = []
for batch in dataloader:
input_ids = batch["input_ids"].cpu().numpy()
print(input_ids)
print()
print(tokenizer.decode(input_ids[0]))
print()
print(batch["batch_data"][0])
quit()
entity_start_positions = batch["entity_start_positions"].numpy()[:, 1]
labels = np.vstack([batch["gold_labels"][subtask].numpy() for subtask in subtask_list]).T
all_data.extend(batch["batch_data"])
for input_id in input_ids:
input_id = input_id[input_id!=0]
all_input_ids.append(input_id)
all_entity_start_positions.append(entity_start_positions)
all_labels.append(labels)
all_entity_start_positions = np.hstack(all_entity_start_positions)
all_labels = np.vstack(all_labels)
print(phase)
print(f"input_ids.shape = {len(all_input_ids)}")
print(f"entity_start_positions.shape = {all_entity_start_positions.shape}")
print(f"labels.shape = {all_labels.shape}")
# save
with h5py.File(os.path.join(folder_path, f"{event}_{phase}.h5"), 'w') as outfile:
outfile.create_dataset("entity_start_positions", data=all_entity_start_positions)
outfile.create_dataset("labels", data=all_labels)
table = pd.DataFrame({"input_ids":all_input_ids}, index=np.arange(len(all_input_ids)))
table.to_parquet(os.path.join(folder_path, f"{event}_{phase}.parquet"))
with open(os.path.join(folder_path, f"{event}_{phase}_data.json"), 'w', encoding='utf-8') as outfile:
json.dump(all_data, outfile, indent=2)
with open(os.path.join(folder_path, f"{event}_subtask.json"), 'w', encoding='utf-8') as outfile:
json.dump(subtask_list, outfile, indent=2)
def main():
for event in EVENT_LIST:
print(event)
extract_data(event)
if __name__ == "__main__":
main()
| 8,815 | 41.796117 | 142 | py |
JOELIN | JOELIN-master/model.py | from transformers import BertTokenizer, BertTokenizerFast, BertPreTrainedModel, BertModel, BertConfig, AdamW, get_linear_schedule_with_warmup
from transformers import AutoTokenizer, AutoModel, AutoConfig
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
import torch
class F1_Loss(nn.Module):
'''Calculate F1 score. Can work with gpu tensors
The original implmentation is written by Michal Haltuf on Kaggle.
Returns
-------
torch.Tensor
`ndim` == 1. epsilon <= val <= 1
Reference
---------
- https://www.kaggle.com/rejpalcz/best-loss-function-for-f1-score-metric
- https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html#sklearn.metrics.f1_score
- https://discuss.pytorch.org/t/calculating-precision-recall-and-f1-score-in-case-of-multi-label-classification/28265/6
- http://www.ryanzhang.info/python/writing-your-own-loss-function-module-for-pytorch/
'''
def __init__(self, epsilon=1e-7):
super().__init__()
self.epsilon = epsilon
def forward(self, y_pred, y_true, ):
assert y_pred.ndim == 1
assert y_true.ndim == 1
y_true = y_true.type(torch.int64).cuda()
y_true = F.one_hot(y_true, 2).to(torch.float32).cuda()
y_pred = F.sigmoid(y_pred).cuda()
y_pred_New = torch.zeros(y_true.shape,dtype=torch.float32).cuda()
y_pred_New[:, 1] = y_pred
y_pred_New[:, 0] = 1 - y_pred
y_pred = y_pred_New.cuda()
tp = (y_true * y_pred).sum(dim=0).to(torch.float32)
tn = ((1 - y_true) * (1 - y_pred)).sum(dim=0).to(torch.float32)
fp = ((1 - y_true) * y_pred).sum(dim=0).to(torch.float32)
fn = (y_true * (1 - y_pred)).sum(dim=0).to(torch.float32)
precision = tp / (tp + fp + self.epsilon)
recall = tp / (tp + fn + self.epsilon)
f1 = 2 * (precision * recall) / (precision + recall + self.epsilon)
f1 = f1.clamp(min=self.epsilon, max=1 - self.epsilon)
return 1 - f1.mean()
class MultiTaskBertForCovidEntityClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.subtasks = config.subtasks
# We will create a dictionary of classifiers based on the number of subtasks
self.classifiers = {subtask: nn.Linear(config.hidden_size, config.num_labels) for subtask in self.subtasks}
# self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
entity_start_positions, ## TODO check what is entity_start_positions
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
# DEBUG:
# print("BERT model outputs shape", outputs[0].shape, outputs[1].shape)
# print(entity_start_positions[:, 0], entity_start_positions[:, 1])
# OLD CODE:
# pooled_output = outputs[1]
# input [8,68]
# NOTE: outputs[0] has all the hidden dimensions for the entire sequence output[0] [8,68,768]
# We will extract the embeddings indexed with entity_start_positions # TODO why start position embedding output[1] [8,768]
pooled_output = outputs[0][entity_start_positions[:, 0], entity_start_positions[:, 1], :]
pooled_output = self.dropout(pooled_output) ## [batch_size, 768]
# Get logits for each subtaskx
# logits = self.classifier(pooled_output) 10 (#subtask batch_size 8 2 (0,1)]
logits = {subtask: self.classifiers[subtask](pooled_output) for subtask in self.subtasks}
outputs = outputs[2:] + (logits,) # add hidden states and attention if they are here
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
# DEBUG:
# print(f"Logits:{logits.view(-1, self.num_labels)}, \t, Labels:{labels.view(-1)}")
for i, subtask in enumerate(self.subtasks):
# print(labels[subtask].is_cuda)
if i == 0:
loss = loss_fct(logits[subtask].view(-1, self.num_labels), labels[subtask].view(-1))
else:
loss += loss_fct(logits[subtask].view(-1, self.num_labels), labels[subtask].view(-1))
outputs = outputs + (loss,)
return outputs # (loss), logits, (hidden_states), (attentions)
class MultiTaskBertForCovidEntityClassificationNew(nn.Module):
def __init__(self, auto_model_version, config):
super(MultiTaskBertForCovidEntityClassificationNew, self).__init__()
self.num_labels = config.num_labels
self.f1_loss = config.f1_loss
self.device = config.device
self.f1loss = F1_Loss().to(self.device)
self.event = config.event
self.embedding_type = config.embedding_type
self.subtasks = config.subtasks
config.num_labels = len(config.subtasks) ##TODO
self.weighting = config.weighting
self.encoder = AutoModel.from_pretrained(auto_model_version, config=config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
if self.embedding_type == 2: # Concat
self.fc = nn.Linear(config.hidden_size * 4, config.hidden_size)
if self.embedding_type == 3: # multi-head-concat
self.fc1 = nn.Linear(config.hidden_size, config.hidden_size // 4)
self.fc2 = nn.Linear(config.hidden_size, config.hidden_size // 4)
self.fc3 = nn.Linear(config.hidden_size, config.hidden_size // 4)
self.fc4 = nn.Linear(config.hidden_size, config.hidden_size // 4)
self.fc_final = nn.Linear(config.hidden_size, config.hidden_size)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
## input [batch size, hidden size]
## output [batch size, #subtask]
def resize_token_embeddings(self, length):
self.encoder.resize_token_embeddings(length)
@staticmethod
def build_loss_weight(y, factor=10):
weight = (y*(factor-1) + 1) / factor
return weight
def forward(self,
input_ids,
entity_start_positions,
labels=None):
outputs = self.encoder(input_ids)
if self.embedding_type == 0: # last layer
pooled_output = outputs[0][entity_start_positions[:, 0], entity_start_positions[:, 1], :]
elif self.embedding_type == 1: # sum
hidden_states = torch.stack(
[x[entity_start_positions[:, 0], entity_start_positions[:, 1], :]
for x in outputs[2][-4:]], dim=0)
pooled_output = torch.sum(hidden_states, dim=0)
elif self.embedding_type == 2: # Concat
hidden_states = tuple([x[entity_start_positions[:, 0], entity_start_positions[:, 1], :]
for x in outputs[2][-4:]])
pooled_output = self.fc(torch.cat(hidden_states, dim=1))
elif self.embedding_type == 3:
hidden_states = []
hidden_states.append(
self.fc1(outputs[2][-4][entity_start_positions[:, 0], entity_start_positions[:, 1], :]))
hidden_states.append(
self.fc2(outputs[2][-4][entity_start_positions[:, 0], entity_start_positions[:, 1], :]))
hidden_states.append(
self.fc3(outputs[2][-4][entity_start_positions[:, 0], entity_start_positions[:, 1], :]))
hidden_states.append(
self.fc4(outputs[2][-4][entity_start_positions[:, 0], entity_start_positions[:, 1], :]))
pooled_output = self.fc_final(torch.cat(hidden_states, dim=1))
pooled_output = self.dropout(pooled_output) ## [batch_size, 768]
# Get logits for each subtask
all_logits = self.classifier(pooled_output) #[batch size, # subtask]
if labels is not None:
y = torch.stack([labels[subtask] for subtask in labels.keys()], dim =1).type(torch.float)
if self.weighting:
weight = self.build_loss_weight(y)
else:
weight = None
# TODO: currently, weight is only applicable to BCE loss
if self.f1_loss:
loss = 0
for i in range(len(self.subtasks)):
loss += self.f1loss(all_logits[:,i],y[:,i])
else:
loss = F.binary_cross_entropy(torch.sigmoid(all_logits), y, weight=weight)
output = (all_logits, loss)
else:
output = (all_logits, )
return output # logits, (loss)
class MultiTaskBertForCovidEntityClassificationShare(nn.Module):
def __init__(self, auto_model_version, config):
super(MultiTaskBertForCovidEntityClassificationShare, self).__init__()
self.num_labels = config.num_labels
self.f1_loss = config.f1_loss
self.device = config.device
self.f1loss = F1_Loss().to(self.device)
self.embedding_type = config.embedding_type
self.subtasks = config.subtasks
config.num_labels = len(config.subtasks) ##TODO
self.weighting = config.weighting
self.encoder = AutoModel.from_pretrained(auto_model_version, config=config)
self.dropout = nn.Dropout(config.__dict__.get("hidden_dropout_prob", 0.1))
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
if self.embedding_type == 2: # Concat
self.fc = nn.Linear(config.hidden_size * 4, config.hidden_size)
if self.embedding_type == 3: # multi-head-concat
self.fc1 = nn.Linear(config.hidden_size, config.hidden_size // 4)
self.fc2 = nn.Linear(config.hidden_size, config.hidden_size // 4)
self.fc3 = nn.Linear(config.hidden_size, config.hidden_size // 4)
self.fc4 = nn.Linear(config.hidden_size, config.hidden_size // 4)
self.fc_final = nn.Linear(config.hidden_size, config.hidden_size)
def resize_token_embeddings(self, length):
self.encoder.resize_token_embeddings(length)
def build_loss_weight(self, y, factor=10):
weight = (y*(factor-1) + 1) / factor
return weight
def forward(self,
input_ids,
entity_start_positions,
y=None):
outputs = self.encoder(input_ids)
if self.embedding_type == 0: # last layer
pooled_output = outputs[0][entity_start_positions[:, 0], entity_start_positions[:, 1], :]
elif self.embedding_type == 1: # sum
hidden_states = torch.stack(
[x[entity_start_positions[:, 0], entity_start_positions[:, 1], :]
for x in outputs[2][-4:]], dim=0)
pooled_output = torch.sum(hidden_states, dim=0)
elif self.embedding_type == 2: # Concat
hidden_states = tuple([x[entity_start_positions[:, 0], entity_start_positions[:, 1], :]
for x in outputs[2][-4:]])
pooled_output = self.fc(torch.cat(hidden_states, dim=1))
elif self.embedding_type == 3:
hidden_states = []
hidden_states.append(
self.fc1(outputs[2][-4][entity_start_positions[:, 0], entity_start_positions[:, 1], :]))
hidden_states.append(
self.fc2(outputs[2][-4][entity_start_positions[:, 0], entity_start_positions[:, 1], :]))
hidden_states.append(
self.fc3(outputs[2][-4][entity_start_positions[:, 0], entity_start_positions[:, 1], :]))
hidden_states.append(
self.fc4(outputs[2][-4][entity_start_positions[:, 0], entity_start_positions[:, 1], :]))
pooled_output = self.fc_final(torch.cat(hidden_states, dim=1))
pooled_output = self.dropout(pooled_output) ## [batch_size, 768]
# Get logits for each subtask
all_logits = self.classifier(pooled_output) #[batch size, # subtask]
if y is not None:
if self.weighting:
weight = self.build_loss_weight(y)
else:
weight = None
# TODO: currently, weight is only applicable to BCE loss
if self.f1_loss:
loss = 0
for i in range(len(self.subtasks)):
loss += self.f1loss(all_logits[:,i],y[:,i])
else:
loss = F.binary_cross_entropy(torch.sigmoid(all_logits), y, weight=weight)
output = (all_logits, loss)
else:
output = (all_logits, )
return output # logits, (loss)
| 13,239 | 42.267974 | 141 | py |
JOELIN | JOELIN-master/prediction_shared.py | #!/usr/bin/env python3
import os
import csv
import time
import copy
import argparse
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn import metrics
# from prediction import new_data_predict
from transformers import (
AutoTokenizer, AutoConfig,
AdamW, get_linear_schedule_with_warmup)
import torch
from model import MultiTaskBertForCovidEntityClassificationShare
from preprocessing.utils import (
make_dir_if_not_exists, format_time, log_list, plot_train_loss,
saveToJSONFile)
from torch.utils import data as torch_data
from prediction import prediction_to_submission
from preprocessing.loadData import loadData, loadNewData
import logging
import json
import h5py
import pickle
EVENT_LIST = ['positive', 'negative', 'can_not_test', 'death', 'cure_and_prevention']
pd.set_option('display.max_columns', None)
################### util ####################
def parse_arg():
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--model_dir", help="Name of the saved model directory", type=str,
default='global_model_e30_w10_concatL4')
parser.add_argument("-o", "--output_dir", help="Path to the output directory", type=str,
default='./results/global_1')
parser.add_argument("-rt", "--retrain", help="True if the model needs to be retrained", action="store_false",
default=True)
parser.add_argument("-bs", "--batch_size", help="Train batch size for BERT model", type=int, default=16)
parser.add_argument("-E", "--embedding_type",
help=("Type of Embedding, 0 for last, 1 for Sum L4 and 2 for concat L4,"
"3 for multihead concat"),
type=int, default=2)
parser.add_argument("-lr", "--learning_rate", help="learning rate", type=float, default=2e-5)
parser.add_argument("-d", "--device", help="Device for running the code", type=str, default="cuda")
parser.add_argument("-pm", "--pretrained_model", help="pretrained model version", type=str,
default="digitalepidemiologylab/covid-twitter-bert")
parser.add_argument("-w", "--weighting", help="weighting for classes, 10 means 0.1:1, 5 means 0.2:1", type=int,
default=10)
parser.add_argument("-fl", "--f1_loss", help="using F1 loss", type=str_to_bool, default=False)
parser.add_argument("-bu", "--batch_size_update", type=int, default=32)
parser.add_argument("-new", "--new", type=str_to_bool, default=True)
# Add Data Clean Options
parser.add_argument("-ca", "--clean_all", action="store_true", default=False)
return parser.parse_args()
def str_to_bool(value):
if isinstance(value, bool):
return value
if value.lower() in {'false', 'f', '0', 'no', 'n'}:
return False
elif value.lower() in {'true', 't', '1', 'yes', 'y'}:
return True
raise ValueError('{} is not a valid boolean value'.format(value))
################### training functions ####################
# dataset
# testing/val script
def evaluation(model, dataloader, device, threshold=0.5, save_sample_flg=False):
model.eval()
total_preds, total_labels, total_batch_data = prepare_for_prediction(
model, dataloader, device)
if type(threshold) in {float, np.float32, np.float64}:
prediction = (total_preds > threshold).astype(int)
else:
prediction = np.vstack(
[(total_preds[:, subtask_idx] > threshold[subtask_idx]).astype(int)
for subtask_idx in range(len(model.subtasks))]).T
if save_sample_flg:
save_sample_to_file(prediction, total_labels, total_batch_data,
model.subtasks, "shared")
# Calculating metrics
precision = np.array(
[metrics.precision_score(total_labels[:, idx], prediction[:, idx], zero_division=0)
for idx in range(total_labels.shape[1])])
recall = np.array(
[metrics.recall_score(total_labels[:, idx], prediction[:, idx], zero_division=0)
for idx in range(total_labels.shape[1])])
f1 = np.array(
[metrics.f1_score(total_labels[:, idx], prediction[:, idx], zero_division=0)
for idx in range(total_labels.shape[1])])
# f1_micro = np.array(
# [metrics.f1_score(total_labels[:,idx], prediction[:,idx], zero_division=0, average='micro')
# for idx in range(total_labels.shape[1])])
confusion_matrix = np.array(
[metrics.confusion_matrix(total_labels[:, idx], prediction[:, idx], labels=[0, 1]).ravel()
for idx in range(total_labels.shape[1])])
# if confusion_matrix.size!=36:
# print('not 36')
classification_report = [
metrics.classification_report(total_labels[:, idx], prediction[:, idx], output_dict=True, zero_division=0)
for idx in range(total_labels.shape[1])]
return precision, recall, f1, prediction, confusion_matrix, classification_report
def save_sample_to_file(total_preds, total_labels, total_batch_data,
subtask_list, event):
save_dir = os.path.join('./test-samples', event)
make_dir_if_not_exists(save_dir)
assert len(subtask_list) == total_preds.shape[1]
for subtask_idx, subtask in enumerate(subtask_list):
filename_dict = {
(0, 0): os.path.join(save_dir, f"{subtask}-TN.jsonl"),
(0, 1): os.path.join(save_dir, f"{subtask}-FP.jsonl"),
(1, 0): os.path.join(save_dir, f"{subtask}-FN.jsonl"),
(1, 1): os.path.join(save_dir, f"{subtask}-TP.jsonl"),
}
batch_data_dict = {(0, 0): [], (0, 1): [], (1, 0): [], (1, 1): []}
for data_idx, (label, pred) in enumerate(
zip(total_labels[:, subtask_idx], total_preds[:, subtask_idx])):
batch_data_dict[(label, pred)].append(total_batch_data[data_idx])
for label_pred_tuple, batch_data_list in batch_data_dict.items():
saveToJSONFile(batch_data_list, filename_dict[label_pred_tuple])
return True
def prepare_for_prediction(model, dataloader, device):
total_preds = []
total_labels = []
total_batch_data = []
model.eval()
with torch.no_grad():
for step, batch in enumerate(dataloader):
input_dict = {"input_ids": batch[0].to(device),
"entity_start_positions": batch[1].to(device),
"y": batch[2].to(device)}
logits, _ = model(**input_dict)
# Post-model subtask information aggregation.
preds = torch.sigmoid(logits)
preds = preds.detach().cpu().numpy()
total_preds.append(preds)
total_labels.append(batch[2].cpu().numpy())
# total_batch_data += batch['batch_data']
total_preds = np.vstack(total_preds)
total_labels = np.vstack(total_labels)
return total_preds, total_labels, total_batch_data
# prediction script
# NOTE We didn't use it
# def make_prediction(model, dataloader, device, threshold=0.5):
# # run model and predict without having "y" label
# # only return the prediction
# model.eval()
# dev_logits = []
# for step, batch in enumerate(dataloader):
# input_dict = {"input_ids": batch["input_ids"].to(device),
# "entity_start_positions": batch["entity_start_positions"].to(device)}
# logits, _ = model(**input_dict)
# # Post-model subtask information aggregation.
# logits = list(logits.detach().cpu().numpy())
# dev_logits += logits
# dev_logits = np.array(dev_logits)
# # Assessment on the results according to labels and logits.
# if type(threshold) == float:
# prediction = (dev_logits > threshold).astype(int)
# else:
# prediction = np.vstack([(dev_logits[:,subtask_idx] > threshold[subtask_idx]).astype(int) for subtask_idx in range(len(model.subtasks))])
# return prediction
def result_to_tsv(results, model_config, taskname, output_dir):
# results = loadFromJSONFile(results_file)
# model_config = loadFromJSONFile(model_config_file)
# We will save the classifier results and model config for each subtask in this dictionary
all_subtasks_results_and_model_configs = dict()
all_task_results_and_model_configs = dict()
all_task_question_tags = dict()
tested_tasks = list()
for key in results:
if key not in ["best_dev_threshold", "best_dev_F1s", "dev_t_F1_P_Rs"]:
tested_tasks.append(key)
results[key]["best_dev_threshold"] = results["best_dev_threshold"][key]
results[key]["best_dev_F1"] = results["best_dev_F1s"][key]
results[key]["dev_t_F1_P_Rs"] = results["dev_t_F1_P_Rs"][key]
all_subtasks_results_and_model_configs[key] = results[key], model_config
all_task_results_and_model_configs[taskname] = all_subtasks_results_and_model_configs
all_task_question_tags[taskname] = tested_tasks
# Read the results for each task and save them in csv file
# results_tsv_save_file = os.path.join("results", "all_experiments_multitask_bert_entity_classifier_results.tsv")
# NOTE: After fixing the USER and URL tags
results_tsv_save_file = os.path.join(output_dir, "result.tsv")
with open(results_tsv_save_file, "a") as tsv_out:
writer = csv.writer(tsv_out, delimiter='\t')
# header = ["Event", "Sub-task", "model name", "accuracy", "CM", "pos. F1", "dev_threshold", "dev_N",
# "dev_F1", "dev_P", "dev_R", "dev_TP", "dev_FP", "dev_FN", "N", "F1", "P", "R", "TP", "FP", "FN"]
# writer.writerow(hesader)
for taskname, question_tags in all_task_question_tags.items():
current_task_results_and_model_configs = all_task_results_and_model_configs[taskname]
for question_tag in question_tags:
results_sub, model_config = current_task_results_and_model_configs[question_tag]
# Extract results_sub
classification_report = results_sub["Classification Report"]
positive_f1_classification_report = classification_report['1.0']['f1-score']
accuracy = classification_report['accuracy']
CM = results_sub["CM"]
# Best threshold and dev F1
best_dev_threshold = results_sub["best_dev_threshold"]
dev_t_F1_P_Rs = results_sub["dev_t_F1_P_Rs"]
best_dev_threshold_index = [info[0] for info in dev_t_F1_P_Rs].index(best_dev_threshold)
# Each entry in dev_t_F1_P_Rs is of the format t, dev_F1, dev_P, dev_R, dev_TP + dev_FN, dev_TP, dev_FP, dev_FN
t, dev_F1, dev_P, dev_R, dev_N, dev_TP, dev_FP, dev_FN = dev_t_F1_P_Rs[best_dev_threshold_index]
# Alan's metrics
F1 = results_sub["F1"]
P = results_sub["P"]
R = results_sub["R"]
TP = results_sub["TP"]
FP = results_sub["FP"]
FN = results_sub["FN"]
N = results_sub["N"]
# Extract model config
model_name = model_config["model"]
row = [taskname, question_tag, model_name, accuracy, CM,
positive_f1_classification_report, best_dev_threshold,
dev_N, dev_F1, dev_P, dev_R, dev_TP, dev_FP, dev_FN,
N, F1, P, R, TP, FP, FN]
writer.writerow(row)
def cal_micro_f1(confusion_matrix):
TP, FP, FN = 0.0, 0.0, 0.0
micro_f1_list = []
len_subtasks = confusion_matrix.shape[0]
for i in range(len_subtasks):
cur_confusion_matrix = confusion_matrix[i, :]
TN_tmp, FP_tmp, FN_tmp, TP_tmp = cur_confusion_matrix.ravel()
TP += TP_tmp
FP += FP_tmp
FN += FN_tmp
if TP + FP == 0:
P = 0.0
else:
P = TP / (TP + FP)
if TP + FN == 0:
R = 0.0
else:
R = TP / (TP + FN)
if P + R == 0:
F1 = 0.0
else:
F1 = 2.0 * P * R / (P + R)
# micro_f1_list.append(F1)
return F1
def output_precision_recall_f1(precision, recall, f1, subtask_list=None):
data = np.vstack([precision, recall, f1])
if subtask_list is None:
subtask_list = [f"Label {i}" for i in np.arange(f1.shape[0])]
table = pd.DataFrame(data, columns=subtask_list, index=["Precision", "Recall", "F1"])
print(table)
return table
def h5_load(filename, data_list, dtype=None, verbose=False):
with h5py.File(filename, 'r') as infile:
data = []
for data_name in data_list:
if dtype is not None:
temp = np.empty(infile[data_name].shape, dtype=dtype)
else:
temp = np.empty(infile[data_name].shape, dtype=infile[data_name].dtype)
infile[data_name].read_direct(temp)
data.append(temp)
if verbose:
print("\n".join(
"{} = {} [{}]".format(data_name, str(real_data.shape), str(real_data.dtype))
for data_name, real_data in zip(data_list, data)
))
print()
return data
class SharedDataset(torch_data.Dataset):
def __init__(self, x, pos, y):
self.x = x
self.pos = pos
self.y = y
def __len__(self):
return len(self.x)
def __getitem__(self, index):
return self.x[index], self.pos[index], self.y[index]
def my_collate_fn(data):
length = max(d[0].shape[0] for d in data)
x = np.empty([len(data), length], dtype=np.int64)
x.fill(0)
for i, d in enumerate(data):
l = d[0].shape[0]
x[i, 0:l] = d[0]
y = np.vstack([d[2] for d in data])
# build pos
pos = np.hstack([d[1] for d in data])
pos_index = np.arange(pos.shape[0])
pos = np.vstack([pos_index, pos]).T
# turn to torch tensor
x = torch.LongTensor(x)
y = torch.FloatTensor(y)
pos = torch.LongTensor(pos)
return x, pos, y
def train(logging, args):
# parameter setting
max_len = 100 # TODO: compute the statistic of the length
# subtask_num = 5 # might need to re-assign value after loading the data
# pretrained_bert_version = "bert-base-cased"
# pretrained_bert_version = "digitalepidemiologylab/covid-twitter-bert"
# pretrained_bert_version = "roberta-base"
pretrained_bert_version = args.pretrained_model
if torch.cuda.is_available():
device = torch.device(args.device)
logging.info(f"Using {args.device} -- GPU{torch.cuda.get_device_name(0)} to train")
else:
device = torch.device("cpu")
logging.info(f"Using CPU to train")
# load data
if args.clean_all:
if args.new:
data_folder = os.path.join("temp", "clean")
else:
data_folder = os.path.join("temp", "normal")
else:
if args.new:
data_folder = os.path.join("temp", "normal")
else:
data_folder = os.path.join("temp", "normal")
subtask_list = []
data = {
"train": {"input_ids": [], "entity_start_positions": [], "labels": []},
"valid": {"input_ids": [], "entity_start_positions": [], "labels": []},
"test": {"input_ids": [], "entity_start_positions": [], "labels": []},
"new": {"input_ids": [], "entity_start_positions": [], "labels": []},
}
for event in EVENT_LIST:
print(f"loading {event} data")
# load subtask list
with open(os.path.join(data_folder, f"{event}_subtask.json"), 'r', encoding='utf-8') as infile:
event_subtask_list = json.load(infile)
subtask_list.extend(f"{event}_{t}" for t in event_subtask_list)
# load input_ids, positions, and labels
for phase in ["test", "new"]:
table = pd.read_parquet(os.path.join(data_folder, f"{event}_{phase}.parquet"))
data[phase]["input_ids"].extend(table["input_ids"].to_list())
entity_start_positions, labels = h5_load(os.path.join(data_folder, f"{event}_{phase}.h5"),
data_list=["entity_start_positions", "labels"])
data[phase]["entity_start_positions"].append(entity_start_positions)
data[phase]["labels"].append(labels)
print(event, phase, table.shape)
for phase in ["test", "new"]:
data[phase]["entity_start_positions"] = np.hstack(data[phase]["entity_start_positions"])
num_row = sum([l.shape[0] for l in data[phase]["labels"]])
num_col = sum([l.shape[1] for l in data[phase]["labels"]])
new_labels = np.zeros([num_row, num_col], dtype=np.int32)
print(f"{phase} new_labels.shape = {new_labels.shape}")
offset_row = 0
offset_col = 0
for l in data[phase]["labels"]:
new_labels[offset_row:offset_row + l.shape[0], offset_col:offset_col + l.shape[1]] = l
offset_row += l.shape[0]
offset_col += l.shape[1]
data[phase]["labels"] = new_labels
print(subtask_list)
# build dataloader
test_dataloader = torch_data.DataLoader(
SharedDataset(data["test"]["input_ids"], data["test"]["entity_start_positions"], data["test"]["labels"]),
num_workers=2,
batch_size=128,
shuffle=False,
collate_fn=my_collate_fn,
)
new_dataloader = torch_data.DataLoader(
SharedDataset(data["new"]["input_ids"], data["new"]["entity_start_positions"], data["new"]["labels"]),
num_workers=2,
batch_size=128,
shuffle=False,
collate_fn=my_collate_fn,
)
# data loading
tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_version)
tokenizer.add_tokens(["<E>", "</E>", "<URL>", "@USER"])
# tokenizer.save_pretrained(event_output_dir)
entity_start_token_id = tokenizer.convert_tokens_to_ids(["<E>"])[0]
output_hidden_states = True if args.embedding_type > 0 else False
config = AutoConfig.from_pretrained(
pretrained_bert_version, output_hidden_states=output_hidden_states)
config.subtasks = subtask_list
config.device = args.device
config.f1_loss = args.f1_loss ##f1 loss flag
config.weighting = args.weighting
config.embedding_type = args.embedding_type
model = MultiTaskBertForCovidEntityClassificationShare(pretrained_bert_version, config=config)
model.resize_token_embeddings(len(tokenizer))
model.to(args.device) ## TODO old model move classifier
# init optimizer and scheduler
# optimizer = AdamW(model.parameters(), lr=args.learning_rate, eps=1e-8)
# total_steps = len(train_dataloader) * args.n_epochs
# scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0, num_training_steps=total_steps)
# best_model = None
# best_score = 0.0
# best_epoch = 0
# training_stats = []
# epoch_train_loss = list()
# start training
# logging.info(f"Initiating training loop for {args.n_epochs} epochs...")
# total_start_time = time.time()
# if args.batch_size_update != -1:
# accumulation_steps = args.batch_size_update // args.batch_size
# finish training
# print("Finished Training!")
# logging.info(f"Training complete with total Train time:{format_time(time.time() - total_start_time)}")
# log_list(training_stats)
# print(f"Best Validation Score = {best_score} at {best_epoch}")
# model_dir = './results/global_model_e30_w10_concatL4'
#model_dir = os.path.join('results',args.model_dir)
model_dir = os.path.join(args.model_dir)
weights = torch.load(os.path.join(model_dir, "model.bin"))
weights = {n:w for n, w in weights.items() if n.split(".")[0] != "bert"}
weights = {n:w for n, w in weights.items() if n != "encoder.embeddings.position_ids"}
model.load_state_dict(weights)
# model.load_state_dict(best_model)
# #model.save_pretrained(event_output_dir)
# torch.save(best_model, os.path.join(event_output_dir, "model.bin"))
# Plot the train loss trajectory in a plot
# train_loss_trajectory_plot_file = os.path.join(args.output_dir, "train_loss_trajectory.png")
# logging.info(f"Saving the Train loss trajectory at {train_loss_trajectory_plot_file}")
# plot_train_loss(epoch_train_loss, train_loss_trajectory_plot_file)
# running testing
# (1) probing threshold for each subtask
# (2) save threshold
# precision, recall, f1, prediction = evaluation(model, test_dataloader, device=device)
print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print("Testing Result without Post-processing")
precision, recall, f1, prediction, confusion_matrix, classification_report = evaluation(model, test_dataloader,
device=device)
mf1 = cal_micro_f1(confusion_matrix)
print(f"Micro F1 for each task: {mf1}")
f1 = np.mean(f1)
print(f"Macro F1: {f1}")
# print("post_processing!")
# results, model_config = post_processing(args, model, valid_dataloader, test_dataloader, event_output_dir,
# device=device)
print("loading results.json!")
# result_to_tsv(results, model_config, event, args.output_dir)
with open(os.path.join(model_dir,'results.json'), "r") as read_file:
results = json.load(read_file)
total_preds, total_labels, total_batch_data = prepare_for_prediction(model, new_dataloader, device)
with open("debug_new.pkl", 'wb') as outfile:
pickle.dump({"preds":total_preds, "labels":total_labels, "data":total_batch_data}, outfile)
#total_preds, total_labels, total_batch_data = prepare_for_prediction(model, test_dataloader, device)
#with open("debug_test.pkl", 'wb') as outfile:
# pickle.dump({"preds":total_preds, "labels":total_labels, "data":total_batch_data}, outfile)
threshold = results["best_dev_threshold"]
prediction = np.vstack( ### (28898, 33)
[(total_preds[:, i] > threshold[subtask]).astype(int)
for i, subtask in enumerate(model.subtasks)]).T
# total_batch_data = [] ##(28898
start = 0
for event in EVENT_LIST:
total_batch_data = []
with open(os.path.join(data_folder, f"{event}_new_data.json"), 'r', encoding='utf-8') as infile:
total_batch_data_tmp = json.load(infile)
total_batch_data.extend(total_batch_data_tmp)
subtask_list_event = [i for i in model.subtasks if event in i]
# print(subtask_list_event)
preds_index = []
for i, subtask in enumerate(model.subtasks):
if subtask in subtask_list_event:
preds_index.append(i)
# print(i,j)
preds_index = np.array(preds_index)
event_prediction = prediction[start:start + len(total_batch_data), preds_index]
print("==========================================")
print(event)
print(preds_index)
print(start, start+len(total_batch_data))
print("prediction.shape", prediction.shape)
print("event_prediction.shape", event_prediction.shape)
subtask_list_event = [i[len(event) + 1:] for i in subtask_list_event]
prediction_to_submission(event_prediction, total_batch_data, event, subtask_list_event, suffix=args.model_dir.split("/")[-1])
start += len(total_batch_data)
print("generating submission files!")
# for event in EVENT_LIST:
# new_data_predict(event, logging, args, model)
def main():
args = parse_arg()
make_dir_if_not_exists(args.output_dir)
results_tsv_save_file = os.path.join(args.output_dir, "result.tsv")
if os.path.exists(results_tsv_save_file):
os.remove(results_tsv_save_file)
else:
print("Can not delete the file as it doesn't exists")
with open(results_tsv_save_file, "a") as tsv_out:
writer = csv.writer(tsv_out, delimiter='\t')
header = ["Event", "Sub-task", "model name", "accuracy", "CM", "pos. F1", "dev_threshold", "dev_N",
"dev_F1", "dev_P", "dev_R", "dev_TP", "dev_FP", "dev_FN", "N", "F1", "P", "R", "TP", "FP", "FN"]
writer.writerow(header)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logfile = os.path.join(args.output_dir, "train_output.log")
logging.basicConfig(level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[logging.FileHandler(logfile, mode='w'), logging.StreamHandler()])
train(logging, args)
if __name__ == "__main__":
main()
| 24,679 | 40.689189 | 146 | py |
JOELIN | JOELIN-master/preprocessing/loadData.py |
from transformers import BertTokenizer, BertTokenizerFast
import torch
from torch.utils.data import Dataset, DataLoader
import logging
import os
from preprocessing.preprocessData import splitDatasetIntoTrainDevTest, preprocessDataAndSave
from preprocessing.utils import loadFromPickleFile
from preprocessing import const
class COVID19TaskDataset(Dataset):
def __init__(self, instance_list):
super(COVID19TaskDataset, self).__init__()
self.instance_list = instance_list
def __getitem__(self, index):
return self.instance_list[index]
def __len__(self):
return len(self.instance_list)
class TokenizeCollator():
def __init__(self, tokenizer, subtask_list, entity_start_token_id):
self.tokenizer = tokenizer
self.subtask_list = subtask_list
self.entity_start_token_id = entity_start_token_id
def __call__(self, batch):
# Prepare Result
gold_label_dict_batch = {subtask: [] for subtask in self.subtask_list}
input_text_list_batch = []
tweet_id_batch = []
token_batch = []
#print(batch[0])
for input_text, subtask_label_dict, tweet_id, token_text in batch:
input_text_list_batch.append(input_text)
tweet_id_batch.append(tweet_id)
token_batch.append(token_text)
for subtask in self.subtask_list:
gold_label_dict_batch[subtask].append(subtask_label_dict[subtask][1]) # 0 is gold chunk
# Send to BERT's tokenizer
tokenized_input_text_list_batch = self.tokenizer.batch_encode_plus(
input_text_list_batch, pad_to_max_length=True, return_tensors='pt')
input_ids = tokenized_input_text_list_batch['input_ids']
# Not needed for RobertaModel
if 'token_type_ids' in tokenized_input_text_list_batch:
token_type_ids = tokenized_input_text_list_batch['token_type_ids']
else:
token_type_ids = None
attention_mask = tokenized_input_text_list_batch['attention_mask']
# Further processing
# entity_start_positions = (input_ids == self.entity_start_token_id).nonzero()
entity_start_positions = torch.nonzero(input_ids == self.entity_start_token_id, as_tuple=False)
input_label_dict = {
subtask: torch.LongTensor(gold_label_list)
for subtask, gold_label_list in gold_label_dict_batch.items()
}
if entity_start_positions.size(0) == 0:
# Send entity_start_positions to [CLS]'s position i.e. 0
entity_start_positions = torch.zeros(input_ids.size(0), 2).long()
# DEBUG
for subtask in self.subtask_list:
assert input_ids.size(0) == input_label_dict[subtask].size(0)
return {
'input_ids': input_ids,
'entity_start_positions': entity_start_positions,
'token_type_ids': token_type_ids,
'gold_labels': input_label_dict,
'batch_data': batch,
'tweet_id': tweet_id_batch
}
def loadData (event,
entity_start_token_id,
tokenizer,
batch_size = 8,
train_ratio = 0.6, dev_ratio = 0.15,
shuffle_train_data_flg = True, num_workers = 0,
input_text_processing_func_list=[]):
"""Return DataLoader for train/dev/test and subtask_list
Input:
event -- name of event, one of
['positive', 'negative', 'can_not_test', 'death', 'cure_and_prevention']
tokenizer
Keyword Arguments:
batch_size -- [default 8]
train_ratio -- [default 0.6]
dev_ratio -- [default 0.15]
shuffle_train_data_flg -- whether shuffle train DataLoader [default True]
num_workers -- [default 0]
"""
# Init Tokenizer
# entity_start_token_id = tokenizer.convert_tokens_to_ids(["<E>"])[0]
# Load Data
preprocessed_data_file = os.path.join(const.DATA_FOLDER, f'{event}-preprocessed-data.pkl')
#
# if not os.path.isfile(preprocessed_data_file):
# TODO use logging module
print(f"File {preprocessed_data_file} doesn't exist, generating...")
preprocessDataAndSave(event)
#
subtask_list, raw_input_text_and_label_list = loadFromPickleFile(preprocessed_data_file)
if input_text_processing_func_list:
tmp_list = []
print("Processing Input Text")
for tweet_text, input_text, subtask_label_dict, tweet_id, token_text in raw_input_text_and_label_list:
for processing_func in input_text_processing_func_list:
input_text = processing_func(input_text)
print(tweet_text, input_text, subtask_label_dict, tweet_id)
tmp_list.append((tweet_text, input_text, subtask_label_dict, tweet_id, token_text))
raw_input_text_and_label_list = tmp_list
(train_instance_list,
dev_instance_list,
test_instance_list) = splitDatasetIntoTrainDevTest(
raw_input_text_and_label_list, train_ratio=train_ratio, dev_ratio=dev_ratio)
# TODO move to logging
print(f"Dataset Size Report: {len(train_instance_list)} / "
f"{len(dev_instance_list)} / {len(test_instance_list)} (train/dev/test)")
train_dataset = COVID19TaskDataset(train_instance_list)
dev_dataset = COVID19TaskDataset(dev_instance_list)
test_dataset = COVID19TaskDataset(test_instance_list)
collate_fn = TokenizeCollator(tokenizer, subtask_list, entity_start_token_id)
train_dataloader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=shuffle_train_data_flg, num_workers=num_workers,
collate_fn = collate_fn)
dev_dataloader = DataLoader(
dev_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn = collate_fn)
test_dataloader = DataLoader(
test_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn = collate_fn)
return train_dataloader, dev_dataloader, test_dataloader, subtask_list
def loadNewData(event,
entity_start_token_id,
tokenizer,
batch_size=8,
train_ratio=0.6, dev_ratio=0.15,
shuffle_train_data_flg=True, num_workers=0,
input_text_processing_func_list=[]):
"""Return DataLoader for train/dev/test and subtask_list
Input:
event -- name of event, one of
['positive', 'negative', 'can_not_test', 'death', 'cure_and_prevention']
tokenizer
Keyword Arguments:
batch_size -- [default 8]
train_ratio -- [default 0.6]
dev_ratio -- [default 0.15]
shuffle_train_data_flg -- whether shuffle train DataLoader [default True]
num_workers -- [default 0]
"""
# Init Tokenizer
# entity_start_token_id = tokenizer.convert_tokens_to_ids(["<E>"])[0]
# Load Data
preprocessed_data_file = os.path.join(const.NEW_DATA_FOLDER, f'{event}-preprocessed-data.pkl')
#
print(f"File {preprocessed_data_file} doesn't exist, generating...")
preprocessDataAndSave(event)
#
subtask_list, raw_input_text_and_label_list = loadFromPickleFile(preprocessed_data_file)
if input_text_processing_func_list:
tmp_list = []
print("Processing Input Text")
for tweet_text, input_text, subtask_label_dict, tweet_id, token_text in raw_input_text_and_label_list:
for processing_func in input_text_processing_func_list:
input_text = processing_func(input_text)
# print(tweet_text, input_text, subtask_label_dict, tweet_id)
tmp_list.append((tweet_text, input_text, subtask_label_dict, tweet_id, token_text))
raw_input_text_and_label_list = tmp_list
(train_instance_list,
dev_instance_list,
test_instance_list) = splitDatasetIntoTrainDevTest(
raw_input_text_and_label_list, train_ratio=1, dev_ratio=0)
# TODO move to logging
print(f"Dataset Size Report: {len(train_instance_list)} / "
f"{len(dev_instance_list)} / {len(test_instance_list)} (train/dev/test)")
train_dataset = COVID19TaskDataset(train_instance_list)
dev_dataset = COVID19TaskDataset(dev_instance_list)
test_dataset = COVID19TaskDataset(test_instance_list)
collate_fn = TokenizeCollator(tokenizer, subtask_list, entity_start_token_id)
train_dataloader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=shuffle_train_data_flg, num_workers=num_workers,
collate_fn=collate_fn)
dev_dataloader = DataLoader(
dev_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
test_dataloader = DataLoader(
test_dataset, batch_size=batch_size, num_workers=num_workers, collate_fn=collate_fn)
return train_dataloader, dev_dataloader, test_dataloader, subtask_list
| 8,848 | 36.655319 | 110 | py |
bayesmix | bayesmix-master/docs/conf.py | import os
import sys
import subprocess
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('../python'))
sys.path.insert(0, os.path.abspath('../python/bayesmixpy'))
def configureDoxyfile(input_dir, output_dir):
with open('Doxyfile.in', 'r') as file :
filedata = file.read()
filedata = filedata.replace('@DOXYGEN_INPUT_DIR@', input_dir)
filedata = filedata.replace('@DOXYGEN_OUTPUT_DIR@', output_dir)
with open('Doxyfile', 'w') as file:
file.write(filedata)
# Check if we're running on Read the Docs' servers
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
breathe_projects = { "bayesmix": "../build/docs/docs/doxygen/xml " }
breathe_default_project = "bayesmix"
if read_the_docs_build:
input_dir = '../src'
output_dir = 'build'
configureDoxyfile(input_dir, output_dir)
subprocess.call('doxygen', shell=True)
breathe_projects['bayesmix'] = output_dir + '/xml'
project = 'bayesmix'
copyright = '2021, Guindani, B. and Beraha, M.'
author = 'Guindani, B. and Beraha, M.'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.imgmath',
'sphinx.ext.todo',
'breathe',
]
templates_path = ['_templates']
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
html_theme = 'haiku'
highlight_language = 'cpp'
imgmath_latex = 'latex'
| 1,563 | 23.825397 | 68 | py |
pix2pix3D | pix2pix3D-main/legacy.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Converting legacy network pickle into the new format."""
import click
import pickle
import re
import copy
import numpy as np
import torch
import dnnlib
from torch_utils import misc
#----------------------------------------------------------------------------
def load_network_pkl(f, force_fp16=False):
data = _LegacyUnpickler(f).load()
# Legacy TensorFlow pickle => convert.
if isinstance(data, tuple) and len(data) == 3 and all(isinstance(net, _TFNetworkStub) for net in data):
tf_G, tf_D, tf_Gs = data
G = convert_tf_generator(tf_G)
D = convert_tf_discriminator(tf_D)
G_ema = convert_tf_generator(tf_Gs)
data = dict(G=G, D=D, G_ema=G_ema)
# Add missing fields.
if 'training_set_kwargs' not in data:
data['training_set_kwargs'] = None
if 'augment_pipe' not in data:
data['augment_pipe'] = None
# Validate contents.
assert isinstance(data['G'], torch.nn.Module)
assert isinstance(data['D'], torch.nn.Module)
assert isinstance(data['G_ema'], torch.nn.Module)
assert isinstance(data['training_set_kwargs'], (dict, type(None)))
assert isinstance(data['augment_pipe'], (torch.nn.Module, type(None)))
# Force FP16.
if force_fp16:
for key in ['G', 'D', 'G_ema']:
old = data[key]
kwargs = copy.deepcopy(old.init_kwargs)
fp16_kwargs = kwargs.get('synthesis_kwargs', kwargs)
fp16_kwargs.num_fp16_res = 4
fp16_kwargs.conv_clamp = 256
if kwargs != old.init_kwargs:
new = type(old)(**kwargs).eval().requires_grad_(False)
misc.copy_params_and_buffers(old, new, require_all=True)
data[key] = new
return data
#----------------------------------------------------------------------------
class _TFNetworkStub(dnnlib.EasyDict):
pass
class _LegacyUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'dnnlib.tflib.network' and name == 'Network':
return _TFNetworkStub
return super().find_class(module, name)
#----------------------------------------------------------------------------
def _collect_tf_params(tf_net):
# pylint: disable=protected-access
tf_params = dict()
def recurse(prefix, tf_net):
for name, value in tf_net.variables:
tf_params[prefix + name] = value
for name, comp in tf_net.components.items():
recurse(prefix + name + '/', comp)
recurse('', tf_net)
return tf_params
#----------------------------------------------------------------------------
def _populate_module_params(module, *patterns):
for name, tensor in misc.named_params_and_buffers(module):
found = False
value = None
for pattern, value_fn in zip(patterns[0::2], patterns[1::2]):
match = re.fullmatch(pattern, name)
if match:
found = True
if value_fn is not None:
value = value_fn(*match.groups())
break
try:
assert found
if value is not None:
tensor.copy_(torch.from_numpy(np.array(value)))
except:
print(name, list(tensor.shape))
raise
#----------------------------------------------------------------------------
def convert_tf_generator(tf_G):
if tf_G.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_G.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None, none=None):
known_kwargs.add(tf_name)
val = tf_kwargs.get(tf_name, default)
return val if val is not None else none
# Convert kwargs.
from training import networks_stylegan2
network_class = networks_stylegan2.Generator
kwargs = dnnlib.EasyDict(
z_dim = kwarg('latent_size', 512),
c_dim = kwarg('label_size', 0),
w_dim = kwarg('dlatent_size', 512),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
architecture = kwarg('architecture', 'skip'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
use_noise = kwarg('use_noise', True),
activation = kwarg('nonlinearity', 'lrelu'),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 8),
embed_features = kwarg('label_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('mapping_nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.01),
w_avg_beta = kwarg('w_avg_beta', 0.995, none=1),
),
)
# Check for unknown kwargs.
kwarg('truncation_psi')
kwarg('truncation_cutoff')
kwarg('style_mixing_prob')
kwarg('structure')
kwarg('conditioning')
kwarg('fused_modconv')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_G)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'ToRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/ToRGB/{match.group(2)}'] = value
kwargs.synthesis.kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
G = network_class(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
# pylint: disable=f-string-without-interpolation
_populate_module_params(G,
r'mapping\.w_avg', lambda: tf_params[f'dlatent_avg'],
r'mapping\.embed\.weight', lambda: tf_params[f'mapping/LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'mapping/LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'mapping/Dense{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'mapping/Dense{i}/bias'],
r'synthesis\.b4\.const', lambda: tf_params[f'synthesis/4x4/Const/const'][0],
r'synthesis\.b4\.conv1\.weight', lambda: tf_params[f'synthesis/4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b4\.conv1\.bias', lambda: tf_params[f'synthesis/4x4/Conv/bias'],
r'synthesis\.b4\.conv1\.noise_const', lambda: tf_params[f'synthesis/noise0'][0, 0],
r'synthesis\.b4\.conv1\.noise_strength', lambda: tf_params[f'synthesis/4x4/Conv/noise_strength'],
r'synthesis\.b4\.conv1\.affine\.weight', lambda: tf_params[f'synthesis/4x4/Conv/mod_weight'].transpose(),
r'synthesis\.b4\.conv1\.affine\.bias', lambda: tf_params[f'synthesis/4x4/Conv/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv0\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv0\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/bias'],
r'synthesis\.b(\d+)\.conv0\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-5}'][0, 0],
r'synthesis\.b(\d+)\.conv0\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/noise_strength'],
r'synthesis\.b(\d+)\.conv0\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv0\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv0_up/mod_bias'] + 1,
r'synthesis\.b(\d+)\.conv1\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.conv1\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/bias'],
r'synthesis\.b(\d+)\.conv1\.noise_const', lambda r: tf_params[f'synthesis/noise{int(np.log2(int(r)))*2-4}'][0, 0],
r'synthesis\.b(\d+)\.conv1\.noise_strength', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/noise_strength'],
r'synthesis\.b(\d+)\.conv1\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.conv1\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/Conv1/mod_bias'] + 1,
r'synthesis\.b(\d+)\.torgb\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/weight'].transpose(3, 2, 0, 1),
r'synthesis\.b(\d+)\.torgb\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/bias'],
r'synthesis\.b(\d+)\.torgb\.affine\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_weight'].transpose(),
r'synthesis\.b(\d+)\.torgb\.affine\.bias', lambda r: tf_params[f'synthesis/{r}x{r}/ToRGB/mod_bias'] + 1,
r'synthesis\.b(\d+)\.skip\.weight', lambda r: tf_params[f'synthesis/{r}x{r}/Skip/weight'][::-1, ::-1].transpose(3, 2, 0, 1),
r'.*\.resample_filter', None,
r'.*\.act_filter', None,
)
return G
#----------------------------------------------------------------------------
def convert_tf_discriminator(tf_D):
if tf_D.version < 4:
raise ValueError('TensorFlow pickle version too low')
# Collect kwargs.
tf_kwargs = tf_D.static_kwargs
known_kwargs = set()
def kwarg(tf_name, default=None):
known_kwargs.add(tf_name)
return tf_kwargs.get(tf_name, default)
# Convert kwargs.
kwargs = dnnlib.EasyDict(
c_dim = kwarg('label_size', 0),
img_resolution = kwarg('resolution', 1024),
img_channels = kwarg('num_channels', 3),
architecture = kwarg('architecture', 'resnet'),
channel_base = kwarg('fmap_base', 16384) * 2,
channel_max = kwarg('fmap_max', 512),
num_fp16_res = kwarg('num_fp16_res', 0),
conv_clamp = kwarg('conv_clamp', None),
cmap_dim = kwarg('mapping_fmaps', None),
block_kwargs = dnnlib.EasyDict(
activation = kwarg('nonlinearity', 'lrelu'),
resample_filter = kwarg('resample_kernel', [1,3,3,1]),
freeze_layers = kwarg('freeze_layers', 0),
),
mapping_kwargs = dnnlib.EasyDict(
num_layers = kwarg('mapping_layers', 0),
embed_features = kwarg('mapping_fmaps', None),
layer_features = kwarg('mapping_fmaps', None),
activation = kwarg('nonlinearity', 'lrelu'),
lr_multiplier = kwarg('mapping_lrmul', 0.1),
),
epilogue_kwargs = dnnlib.EasyDict(
mbstd_group_size = kwarg('mbstd_group_size', None),
mbstd_num_channels = kwarg('mbstd_num_features', 1),
activation = kwarg('nonlinearity', 'lrelu'),
),
)
# Check for unknown kwargs.
kwarg('structure')
kwarg('conditioning')
unknown_kwargs = list(set(tf_kwargs.keys()) - known_kwargs)
if len(unknown_kwargs) > 0:
raise ValueError('Unknown TensorFlow kwarg', unknown_kwargs[0])
# Collect params.
tf_params = _collect_tf_params(tf_D)
for name, value in list(tf_params.items()):
match = re.fullmatch(r'FromRGB_lod(\d+)/(.*)', name)
if match:
r = kwargs.img_resolution // (2 ** int(match.group(1)))
tf_params[f'{r}x{r}/FromRGB/{match.group(2)}'] = value
kwargs.architecture = 'orig'
#for name, value in tf_params.items(): print(f'{name:<50s}{list(value.shape)}')
# Convert params.
from training import networks_stylegan2
D = networks_stylegan2.Discriminator(**kwargs).eval().requires_grad_(False)
# pylint: disable=unnecessary-lambda
# pylint: disable=f-string-without-interpolation
_populate_module_params(D,
r'b(\d+)\.fromrgb\.weight', lambda r: tf_params[f'{r}x{r}/FromRGB/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.fromrgb\.bias', lambda r: tf_params[f'{r}x{r}/FromRGB/bias'],
r'b(\d+)\.conv(\d+)\.weight', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/weight'].transpose(3, 2, 0, 1),
r'b(\d+)\.conv(\d+)\.bias', lambda r, i: tf_params[f'{r}x{r}/Conv{i}{["","_down"][int(i)]}/bias'],
r'b(\d+)\.skip\.weight', lambda r: tf_params[f'{r}x{r}/Skip/weight'].transpose(3, 2, 0, 1),
r'mapping\.embed\.weight', lambda: tf_params[f'LabelEmbed/weight'].transpose(),
r'mapping\.embed\.bias', lambda: tf_params[f'LabelEmbed/bias'],
r'mapping\.fc(\d+)\.weight', lambda i: tf_params[f'Mapping{i}/weight'].transpose(),
r'mapping\.fc(\d+)\.bias', lambda i: tf_params[f'Mapping{i}/bias'],
r'b4\.conv\.weight', lambda: tf_params[f'4x4/Conv/weight'].transpose(3, 2, 0, 1),
r'b4\.conv\.bias', lambda: tf_params[f'4x4/Conv/bias'],
r'b4\.fc\.weight', lambda: tf_params[f'4x4/Dense0/weight'].transpose(),
r'b4\.fc\.bias', lambda: tf_params[f'4x4/Dense0/bias'],
r'b4\.out\.weight', lambda: tf_params[f'Output/weight'].transpose(),
r'b4\.out\.bias', lambda: tf_params[f'Output/bias'],
r'.*\.resample_filter', None,
)
return D
#----------------------------------------------------------------------------
@click.command()
@click.option('--source', help='Input pickle', required=True, metavar='PATH')
@click.option('--dest', help='Output pickle', required=True, metavar='PATH')
@click.option('--force-fp16', help='Force the networks to use FP16', type=bool, default=False, metavar='BOOL', show_default=True)
def convert_network_pickle(source, dest, force_fp16):
"""Convert legacy network pickle into the native PyTorch format.
The tool is able to load the main network configurations exported using the TensorFlow version of StyleGAN2 or StyleGAN2-ADA.
It does not support e.g. StyleGAN2-ADA comparison methods, StyleGAN2 configs A-D, or StyleGAN1 networks.
Example:
\b
python legacy.py \\
--source=https://nvlabs-fi-cdn.nvidia.com/stylegan2/networks/stylegan2-cat-config-f.pkl \\
--dest=stylegan2-cat-config-f.pkl
"""
print(f'Loading "{source}"...')
with dnnlib.util.open_url(source) as f:
data = load_network_pkl(f, force_fp16=force_fp16)
print(f'Saving "{dest}"...')
with open(dest, 'wb') as f:
pickle.dump(data, f)
print('Done.')
#----------------------------------------------------------------------------
if __name__ == "__main__":
convert_network_pickle() # pylint: disable=no-value-for-parameter
#----------------------------------------------------------------------------
| 16,675 | 50.153374 | 154 | py |
pix2pix3D | pix2pix3D-main/camera_utils.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
Helper functions for constructing camera parameter matrices. Primarily used in visualization and inference scripts.
"""
import math
import torch
import torch.nn as nn
from training.volumetric_rendering import math_utils
class GaussianCameraPoseSampler:
"""
Samples pitch and yaw from a Gaussian distribution and returns a camera pose.
Camera is specified as looking at the origin.
If horizontal and vertical stddev (specified in radians) are zero, gives a
deterministic camera pose with yaw=horizontal_mean, pitch=vertical_mean.
The coordinate system is specified with y-up, z-forward, x-left.
Horizontal mean is the azimuthal angle (rotation around y axis) in radians,
vertical mean is the polar angle (angle from the y axis) in radians.
A point along the z-axis has azimuthal_angle=0, polar_angle=pi/2.
Example:
For a camera pose looking at the origin with the camera at position [0, 0, 1]:
cam2world = GaussianCameraPoseSampler.sample(math.pi/2, math.pi/2, radius=1)
"""
@staticmethod
def sample(horizontal_mean, vertical_mean, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'):
h = torch.randn((batch_size, 1), device=device) * horizontal_stddev + horizontal_mean
v = torch.randn((batch_size, 1), device=device) * vertical_stddev + vertical_mean
v = torch.clamp(v, 1e-5, math.pi - 1e-5)
theta = h
v = v / math.pi
phi = torch.arccos(1 - 2*v)
camera_origins = torch.zeros((batch_size, 3), device=device)
camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta)
camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta)
camera_origins[:, 1:2] = radius*torch.cos(phi)
forward_vectors = math_utils.normalize_vecs(-camera_origins)
return create_cam2world_matrix(forward_vectors, camera_origins)
class LookAtPoseSampler:
"""
Same as GaussianCameraPoseSampler, except the
camera is specified as looking at 'lookat_position', a 3-vector.
Example:
For a camera pose looking at the origin with the camera at position [0, 0, 1]:
cam2world = LookAtPoseSampler.sample(math.pi/2, math.pi/2, torch.tensor([0, 0, 0]), radius=1)
"""
@staticmethod
def sample(horizontal_mean, vertical_mean, lookat_position, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'):
h = torch.randn((batch_size, 1), device=device) * horizontal_stddev + horizontal_mean
v = torch.randn((batch_size, 1), device=device) * vertical_stddev + vertical_mean
v = torch.clamp(v, 1e-5, math.pi - 1e-5)
theta = h
v = v / math.pi
phi = torch.arccos(1 - 2*v)
camera_origins = torch.zeros((batch_size, 3), device=device)
camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta)
camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta)
camera_origins[:, 1:2] = radius*torch.cos(phi)
# forward_vectors = math_utils.normalize_vecs(-camera_origins)
forward_vectors = math_utils.normalize_vecs(lookat_position - camera_origins)
return create_cam2world_matrix(forward_vectors, camera_origins)
class UniformCameraPoseSampler:
"""
Same as GaussianCameraPoseSampler, except the
pose is sampled from a uniform distribution with range +-[horizontal/vertical]_stddev.
Example:
For a batch of random camera poses looking at the origin with yaw sampled from [-pi/2, +pi/2] radians:
cam2worlds = UniformCameraPoseSampler.sample(math.pi/2, math.pi/2, horizontal_stddev=math.pi/2, radius=1, batch_size=16)
"""
@staticmethod
def sample(horizontal_mean, vertical_mean, horizontal_stddev=0, vertical_stddev=0, radius=1, batch_size=1, device='cpu'):
h = (torch.rand((batch_size, 1), device=device) * 2 - 1) * horizontal_stddev + horizontal_mean
v = (torch.rand((batch_size, 1), device=device) * 2 - 1) * vertical_stddev + vertical_mean
v = torch.clamp(v, 1e-5, math.pi - 1e-5)
theta = h
v = v / math.pi
phi = torch.arccos(1 - 2*v)
camera_origins = torch.zeros((batch_size, 3), device=device)
camera_origins[:, 0:1] = radius*torch.sin(phi) * torch.cos(math.pi-theta)
camera_origins[:, 2:3] = radius*torch.sin(phi) * torch.sin(math.pi-theta)
camera_origins[:, 1:2] = radius*torch.cos(phi)
forward_vectors = math_utils.normalize_vecs(-camera_origins)
return create_cam2world_matrix(forward_vectors, camera_origins)
def create_cam2world_matrix(forward_vector, origin):
"""
Takes in the direction the camera is pointing and the camera origin and returns a cam2world matrix.
Works on batches of forward_vectors, origins. Assumes y-axis is up and that there is no camera roll.
"""
forward_vector = math_utils.normalize_vecs(forward_vector)
up_vector = torch.tensor([0, 1, 0], dtype=torch.float, device=origin.device).expand_as(forward_vector)
right_vector = -math_utils.normalize_vecs(torch.cross(up_vector, forward_vector, dim=-1))
up_vector = math_utils.normalize_vecs(torch.cross(forward_vector, right_vector, dim=-1))
rotation_matrix = torch.eye(4, device=origin.device).unsqueeze(0).repeat(forward_vector.shape[0], 1, 1)
rotation_matrix[:, :3, :3] = torch.stack((right_vector, up_vector, forward_vector), axis=-1)
translation_matrix = torch.eye(4, device=origin.device).unsqueeze(0).repeat(forward_vector.shape[0], 1, 1)
translation_matrix[:, :3, 3] = origin
cam2world = (translation_matrix @ rotation_matrix)[:, :, :]
assert(cam2world.shape[1:] == (4, 4))
return cam2world
def FOV_to_intrinsics(fov_degrees, device='cpu'):
"""
Creates a 3x3 camera intrinsics matrix from the camera field of view, specified in degrees.
Note the intrinsics are returned as normalized by image size, rather than in pixel units.
Assumes principal point is at image center.
"""
focal_length = float(1 / (math.tan(fov_degrees * 3.14159 / 360) * 1.414))
intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device)
return intrinsics | 6,814 | 44.738255 | 142 | py |
pix2pix3D | pix2pix3D-main/train.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Train a GAN using the techniques described in the paper
"Efficient Geometry-aware 3D Generative Adversarial Networks."
Code adapted from
"Alias-Free Generative Adversarial Networks"."""
import os
import click
import re
import json
import tempfile
import torch
import dnnlib
from training import training_loop
from metrics import metric_main
from torch_utils import training_stats
from torch_utils import custom_ops
# ----------------------------------------------------------------------------
def subprocess_fn(rank, c, temp_dir):
dnnlib.util.Logger(file_name=os.path.join(
c.run_dir, 'log.txt'), file_mode='a', should_flush=True)
# Init torch.distributed.
if c.num_gpus > 1:
init_file = os.path.abspath(os.path.join(
temp_dir, '.torch_distributed_init'))
if os.name == 'nt':
init_method = 'file:///' + init_file.replace('\\', '/')
torch.distributed.init_process_group(
backend='gloo', init_method=init_method, rank=rank, world_size=c.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(
backend='nccl', init_method=init_method, rank=rank, world_size=c.num_gpus)
# Init torch_utils.
sync_device = torch.device('cuda', rank) if c.num_gpus > 1 else None
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if rank != 0:
custom_ops.verbosity = 'none'
# Execute training loop.
training_loop.training_loop(rank=rank, **c)
# ----------------------------------------------------------------------------
def launch_training(c, desc, outdir, dry_run):
dnnlib.util.Logger(should_flush=True)
# Pick output directory.
prev_run_dirs = []
if os.path.isdir(outdir):
prev_run_dirs = [x for x in os.listdir(
outdir) if os.path.isdir(os.path.join(outdir, x))]
prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
cur_run_id = max(prev_run_ids, default=-1) + 1
c.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{desc}')
assert not os.path.exists(c.run_dir)
c.exp_name = f'{cur_run_id:05d}-{desc}'
# Print options.
print()
print('Training options:')
print(json.dumps(c, indent=2))
print()
print(f'Output directory: {c.run_dir}')
print(f'Number of GPUs: {c.num_gpus}')
print(f'Batch size: {c.batch_size} images')
print(f'Training duration: {c.total_kimg} kimg')
print(f'Dataset path: {c.training_set_kwargs.path}')
print(f'Dataset size: {c.training_set_kwargs.max_size} images')
print(f'Dataset resolution: {c.training_set_kwargs.resolution}')
print(f'Dataset labels: {c.training_set_kwargs.use_labels}')
print(f'Dataset x-flips: {c.training_set_kwargs.xflip}')
print()
# Dry run?
if dry_run:
print('Dry run; exiting.')
return
# Create output directory.
print('Creating output directory...')
os.makedirs(c.run_dir)
with open(os.path.join(c.run_dir, 'training_options.json'), 'wt') as f:
json.dump(c, f, indent=2)
# Launch processes.
print('Launching processes...')
torch.multiprocessing.set_start_method('spawn')
with tempfile.TemporaryDirectory() as temp_dir:
if c.num_gpus == 1:
subprocess_fn(rank=0, c=c, temp_dir=temp_dir)
else:
torch.multiprocessing.spawn(
fn=subprocess_fn, args=(c, temp_dir), nprocs=c.num_gpus)
# ----------------------------------------------------------------------------
def init_dataset_kwargs(data):
try:
dataset_kwargs = dnnlib.EasyDict(
class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False)
# Subclass of training.dataset.Dataset.
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs)
# Be explicit about resolution.
dataset_kwargs.resolution = dataset_obj.resolution
# Be explicit about labels.
dataset_kwargs.use_labels = dataset_obj.has_labels
# Be explicit about dataset size.
dataset_kwargs.max_size = len(dataset_obj)
return dataset_kwargs, dataset_obj.name
except IOError as err:
raise click.ClickException(f'--data: {err}')
# ----------------------------------------------------------------------------
def init_conditional_dataset_kwargs(data, mask_data, data_type, resolution=None):
try:
if data_type == 'seg':
dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageSegFolderDataset', path=data, mask_path=mask_data,
data_type=data_type, use_labels=True, max_size=None, xflip=False, resolution=resolution)
# Subclass of training.dataset.Dataset.
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs)
# Be explicit about resolution.
dataset_kwargs.resolution = dataset_obj.resolution
# Be explicit about labels.
dataset_kwargs.use_labels = dataset_obj.has_labels
# Be explicit about dataset size.
dataset_kwargs.max_size = len(dataset_obj)
return dataset_kwargs, dataset_obj.name
elif data_type == 'edge':
dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageEdgeFolderDataset', path=data,
mask_path=mask_data, data_type=data_type, use_labels=True, max_size=None, xflip=False)
# Subclass of training.dataset.Dataset.
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs)
# Be explicit about resolution.
dataset_kwargs.resolution = dataset_obj.resolution
# Be explicit about labels.
dataset_kwargs.use_labels = dataset_obj.has_labels
# Be explicit about dataset size.
dataset_kwargs.max_size = len(dataset_obj)
return dataset_kwargs, dataset_obj.name
else:
raise click.ClickException(f'Unknown data_type: {data_type}')
except IOError as err:
raise click.ClickException(f'--data: {err}')
# ----------------------------------------------------------------------------
def parse_comma_separated_list(s):
if isinstance(s, list):
return s
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
# ----------------------------------------------------------------------------
@click.command()
# Required.
@click.option('--outdir', help='Where to save the results', metavar='DIR', required=True)
@click.option('--cfg', help='Base configuration', type=str, required=True)
@click.option('--data', help='Training data', metavar='[ZIP|DIR]', type=str, required=True)
@click.option('--mask_data', help='Training data', metavar='[ZIP|DIR]', type=str, required=True)
@click.option('--data_type', help='Training data type', type=str, default='seg', show_default=True)
@click.option('--gpus', help='Number of GPUs to use', metavar='INT', type=click.IntRange(min=1), required=True)
@click.option('--batch', help='Total batch size', metavar='INT', type=click.IntRange(min=1), required=True)
@click.option('--gamma', help='R1 regularization weight', metavar='FLOAT', type=click.FloatRange(min=0), required=True)
# Optional features.
@click.option('--cond', help='Train conditional model', metavar='BOOL', type=bool, default=True, show_default=True)
@click.option('--mirror', help='Enable dataset x-flips', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--aug', help='Augmentation mode', type=click.Choice(['noaug', 'ada', 'fixed']), default='noaug', show_default=True)
@click.option('--resume', help='Resume from given network pickle', metavar='[PATH|URL]', type=str)
@click.option('--freezed', help='Freeze first layers of D', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True)
# Misc hyperparameters.
@click.option('--p', help='Probability for --aug=fixed', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.2, show_default=True)
@click.option('--target', help='Target value for --aug=ada', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0.6, show_default=True)
@click.option('--batch-gpu', help='Limit batch size per GPU', metavar='INT', type=click.IntRange(min=1))
@click.option('--cbase', help='Capacity multiplier', metavar='INT', type=click.IntRange(min=1), default=32768, show_default=True)
@click.option('--cmax', help='Max. feature maps', metavar='INT', type=click.IntRange(min=1), default=512, show_default=True)
@click.option('--glr', help='G learning rate [default: varies]', metavar='FLOAT', type=click.FloatRange(min=0))
@click.option('--dlr', help='D learning rate', metavar='FLOAT', type=click.FloatRange(min=0), default=0.002, show_default=True)
@click.option('--map-depth', help='Mapping network depth [default: varies]', metavar='INT', type=click.IntRange(min=1), default=2, show_default=True)
@click.option('--mbstd-group', help='Minibatch std group size', metavar='INT', type=click.IntRange(min=1), default=4, show_default=True)
# Misc settings.
@click.option('--desc', help='String to include in result dir name', metavar='STR', type=str)
@click.option('--metrics', help='Quality metrics', metavar='[NAME|A,B,C|none]', type=parse_comma_separated_list, default='fid50k_full', show_default=True)
@click.option('--kimg', help='Total training duration', metavar='KIMG', type=click.IntRange(min=1), default=25000, show_default=True)
@click.option('--tick', help='How often to print progress', metavar='KIMG', type=click.IntRange(min=1), default=4, show_default=True)
@click.option('--snap', help='How often to save snapshots', metavar='TICKS', type=click.IntRange(min=1), default=10, show_default=True)
@click.option('--seed', help='Random seed', metavar='INT', type=click.IntRange(min=0), default=0, show_default=True)
# @click.option('--fp32', help='Disable mixed-precision', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--nobench', help='Disable cuDNN benchmarking', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--workers', help='DataLoader worker processes', metavar='INT', type=click.IntRange(min=1), default=3, show_default=True)
@click.option('-n', '--dry-run', help='Print training options and exit', is_flag=True)
# @click.option('--sr_module', help='Superresolution module', metavar='STR', type=str, required=True)
@click.option('--neural_rendering_resolution_initial', help='Resolution to render at', metavar='INT', type=click.IntRange(min=1), default=64, required=False)
@click.option('--neural_rendering_resolution_final', help='Final resolution to render at, if blending', metavar='INT', type=click.IntRange(min=1), required=False, default=None)
@click.option('--neural_rendering_resolution_fade_kimg', help='Kimg to blend resolution over', metavar='INT', type=click.IntRange(min=0), required=False, default=1000, show_default=True)
@click.option('--blur_fade_kimg', help='Blur over how many', metavar='INT', type=click.IntRange(min=1), required=False, default=200)
@click.option('--gen_pose_cond', help='If true, enable generator pose conditioning.', metavar='BOOL', type=bool, required=False, default=False)
@click.option('--c-scale', help='Scale factor for generator pose conditioning.', metavar='FLOAT', type=click.FloatRange(min=0), required=False, default=1)
@click.option('--c-noise', help='Add noise for generator pose conditioning.', metavar='FLOAT', type=click.FloatRange(min=0), required=False, default=0)
@click.option('--gpc_reg_prob', help='Strength of swapping regularization. None means no generator pose conditioning, i.e. condition with zeros.', metavar='FLOAT', type=click.FloatRange(min=0), required=False, default=0.5)
@click.option('--gpc_reg_fade_kimg', help='Length of swapping prob fade', metavar='INT', type=click.IntRange(min=0), required=False, default=1000)
@click.option('--disc_c_noise', help='Strength of discriminator pose conditioning regularization, in standard deviations.', metavar='FLOAT', type=click.FloatRange(min=0), required=False, default=0)
@click.option('--sr_noise_mode', help='Type of noise for superresolution', metavar='STR', type=click.Choice(['random', 'none']), required=False, default='none')
@click.option('--resume_blur', help='Enable to blur even on resume', metavar='BOOL', type=bool, required=False, default=False)
@click.option('--sr_num_fp16_res', help='Number of fp16 layers in superresolution', metavar='INT', type=click.IntRange(min=0), default=4, required=False, show_default=True)
@click.option('--g_num_fp16_res', help='Number of fp16 layers in generator', metavar='INT', type=click.IntRange(min=0), default=0, required=False, show_default=True)
@click.option('--d_num_fp16_res', help='Number of fp16 layers in discriminator', metavar='INT', type=click.IntRange(min=0), default=4, required=False, show_default=True)
@click.option('--sr_first_cutoff', help='First cutoff for AF superresolution', metavar='INT', type=click.IntRange(min=2), default=2, required=False, show_default=True)
@click.option('--sr_first_stopband', help='First cutoff for AF superresolution', metavar='FLOAT', type=click.FloatRange(min=2), default=2**2.1, required=False, show_default=True)
@click.option('--style_mixing_prob', help='Style-mixing regularization probability for training.', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0, required=False, show_default=True)
@click.option('--sr-module', help='Superresolution module override', metavar='STR', type=str, required=False, default=None)
@click.option('--density_reg', help='Density regularization strength.', metavar='FLOAT', type=click.FloatRange(min=0), default=0.25, required=False, show_default=True)
@click.option('--density_reg_every', help='lazy density reg', metavar='int', type=click.FloatRange(min=1), default=4, required=False, show_default=True)
@click.option('--density_reg_p_dist', help='density regularization strength.', metavar='FLOAT', type=click.FloatRange(min=0), default=0.004, required=False, show_default=True)
@click.option('--reg_type', help='Type of regularization', metavar='STR', type=click.Choice(['l1', 'l1-alt', 'monotonic-detach', 'monotonic-fixed', 'total-variation']), required=False, default='l1')
@click.option('--decoder_lr_mul', help='decoder learning rate multiplier.', metavar='FLOAT', type=click.FloatRange(min=0), default=1, required=False, show_default=True)
@click.option('--wandb_log', help='whether to use wandb', metavar='BOOL', type=bool, default=True, show_default=True)
@click.option('--no_eval', help='whether to disable evaluation metrics', metavar='BOOL', type=bool, default=True, show_default=True)
@click.option('--random_c_prob', help='Probablity of random poses', metavar='FLOAT', type=click.FloatRange(min=0, max=1), default=0, required=False, show_default=True)
@click.option('--debug', help='whether to use debug mode', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--render_mask', help='whether to render masks', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--dis_mask', help='whether to use Dis for masks', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--lambda_l1', help='L1 loss weight', metavar='FLOAT', type=click.FloatRange(min=0), default=0, required=False, show_default=True)
@click.option('--lambda_lpips', help='LPIPS loss weight', metavar='FLOAT', type=click.FloatRange(min=0), default=10, required=False, show_default=True)
@click.option('--lambda_d_semantic', help='D_semantic loss weight', metavar='FLOAT', type=click.FloatRange(min=0), default=1, required=False, show_default=True)
@click.option('--seg_weight', help='Cross Entropy Class Weight', metavar='int', type=click.FloatRange(min=0, max=2), default=0, required=False, show_default=True)
@click.option('--edge_weight', help='L1 loss weight for edges', metavar='FLOAT', type=click.FloatRange(min=0), default=2, required=False, show_default=True)
@click.option('--only_raw_recons', help='whether to only use reconstruction loss for nerf renderings', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--semantic_channels', help='number of semantic channels', metavar='INT', type=click.IntRange(min=1), default=19, show_default=True)
@click.option('--use_bg', help='whether to use bg model in nerf', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--silhouette_loss', help='whether to use silhouette loss in nerf', metavar='BOOL', type=bool, default=False, show_default=True)
@click.option('--data_resolution', help='Data Resolution', metavar='INT', type=int, default=0, required=False)
@click.option('--geometry_layer', help='Geometry Layer', metavar='INT', type=int, default=7, required=False)
@click.option('--lambda_cross_view', help='Cross View loss weight', metavar='FLOAT', type=click.FloatRange(min=0), default=0, required=False, show_default=True)
def main(**kwargs):
"""Train a GAN using the techniques described in the paper
"Alias-Free Generative Adversarial Networks".
Examples:
\b
# Train StyleGAN3-T for AFHQv2 using 8 GPUs.
python train.py --outdir=~/training-runs --cfg=stylegan3-t --data=~/datasets/afhqv2-512x512.zip \\
--gpus=8 --batch=32 --gamma=8.2 --mirror=1
\b
# Fine-tune StyleGAN3-R for MetFaces-U using 1 GPU, starting from the pre-trained FFHQ-U pickle.
python train.py --outdir=~/training-runs --cfg=stylegan3-r --data=~/datasets/metfacesu-1024x1024.zip \\
--gpus=8 --batch=32 --gamma=6.6 --mirror=1 --kimg=5000 --snap=5 \\
--resume=https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/stylegan3-r-ffhqu-1024x1024.pkl
\b
# Train StyleGAN2 for FFHQ at 1024x1024 resolution using 8 GPUs.
python train.py --outdir=~/training-runs --cfg=stylegan2 --data=~/datasets/ffhq-1024x1024.zip \\
--gpus=8 --batch=32 --gamma=10 --mirror=1 --aug=noaug
"""
# Initialize config.
opts = dnnlib.EasyDict(kwargs) # Command line arguments.
c = dnnlib.EasyDict() # Main config dict.
c.G_kwargs = dnnlib.EasyDict(
class_name=None, z_dim=512, w_dim=512, mapping_kwargs=dnnlib.EasyDict())
c.D_kwargs = dnnlib.EasyDict(class_name='training.networks_stylegan2.Discriminator', block_kwargs=dnnlib.EasyDict(
), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict())
c.G_opt_kwargs = dnnlib.EasyDict(
class_name='torch.optim.Adam', betas=[0, 0.99], eps=1e-8)
c.D_opt_kwargs = dnnlib.EasyDict(
class_name='torch.optim.Adam', betas=[0, 0.99], eps=1e-8)
c.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.Pix2Pix3DLoss')
c.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, prefetch_factor=2)
# Training set.
# c.training_set_kwargs, dataset_name = init_dataset_kwargs(data=opts.data)
if opts.data_resolution == 0:
opts.data_resolution = None
c.training_set_kwargs, dataset_name = init_conditional_dataset_kwargs(
data=opts.data, mask_data=opts.mask_data, data_type=opts.data_type, resolution=opts.data_resolution)
if opts.cond and not c.training_set_kwargs.use_labels:
raise click.ClickException(
'--cond=True requires labels specified in dataset.json')
c.training_set_kwargs.use_labels = opts.cond
c.training_set_kwargs.xflip = opts.mirror
# Hyperparameters & settings.
c.num_gpus = opts.gpus
c.batch_size = opts.batch
c.batch_gpu = opts.batch_gpu or opts.batch // opts.gpus
c.G_kwargs.channel_base = c.D_kwargs.channel_base = opts.cbase
c.G_kwargs.channel_max = c.D_kwargs.channel_max = opts.cmax
c.G_kwargs.mapping_kwargs.num_layers = opts.map_depth
c.D_kwargs.block_kwargs.freeze_layers = opts.freezed
c.D_kwargs.epilogue_kwargs.mbstd_group_size = opts.mbstd_group
c.loss_kwargs.r1_gamma = opts.gamma
c.G_opt_kwargs.lr = (
0.002 if opts.cfg == 'stylegan2' else 0.0025) if opts.glr is None else opts.glr
c.D_opt_kwargs.lr = opts.dlr
c.metrics = opts.metrics
c.total_kimg = opts.kimg
c.kimg_per_tick = opts.tick
c.image_snapshot_ticks = c.network_snapshot_ticks = opts.snap
c.random_seed = c.training_set_kwargs.random_seed = opts.seed
c.data_loader_kwargs.num_workers = 0
# Loss parameters
c.loss_kwargs.random_c_prob = opts.random_c_prob
c.loss_kwargs.lambda_l1 = opts.lambda_l1
c.loss_kwargs.lambda_lpips = opts.lambda_lpips
c.loss_kwargs.lambda_D_semantic = opts.lambda_d_semantic
c.loss_kwargs.seg_weight = opts.seg_weight
c.loss_kwargs.edge_weight = opts.edge_weight
c.loss_kwargs.only_raw_recons = opts.only_raw_recons
c.loss_kwargs.silhouette_loss = opts.silhouette_loss
c.loss_kwargs.lambda_cross_view = opts.lambda_cross_view
# Mask parameters
if opts.data_type == 'seg':
c.G_kwargs.mapping_kwargs.in_resolution = c.training_set_kwargs.resolution
c.G_kwargs.mapping_kwargs.in_channels = opts.semantic_channels
# c.G_kwargs.mapping_kwargs.class_name = 'training.triplane_cond.MaskMappingNetwork'
c.G_kwargs.mapping_kwargs.class_name = 'training.triplane_cond.MaskMappingNetwork_disentangle'
elif opts.data_type == 'edge':
c.G_kwargs.mapping_kwargs.in_resolution = c.training_set_kwargs.resolution
c.G_kwargs.mapping_kwargs.in_channels = 1
c.G_kwargs.mapping_kwargs.geometry_layer = opts.geometry_layer
# c.G_kwargs.mapping_kwargs.class_name = 'training.triplane_cond.EdgeMappingNetwork'
c.G_kwargs.mapping_kwargs.class_name = 'training.triplane_cond.EdgeMappingNetwork_disentangle'
else:
raise click.ClickException(
f'--data_type={opts.data_type} not supported.')
# Sanity checks.
if c.batch_size % c.num_gpus != 0:
raise click.ClickException('--batch must be a multiple of --gpus')
if c.batch_size % (c.num_gpus * c.batch_gpu) != 0:
raise click.ClickException(
'--batch must be a multiple of --gpus times --batch-gpu')
if c.batch_gpu < c.D_kwargs.epilogue_kwargs.mbstd_group_size:
raise click.ClickException(
'--batch-gpu cannot be smaller than --mbstd')
if any(not metric_main.is_valid_metric(metric) for metric in c.metrics):
raise click.ClickException('\n'.join(
['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
# Base configuration.
c.ema_kimg = c.batch_size * 10 / 32
# c.G_kwargs.class_name = 'training.triplane.TriPlaneGenerator'
if opts.render_mask:
# c.G_kwargs.class_name = 'training.triplane_cond.TriPlaneSemanticGenerator'
c.G_kwargs.class_name = 'training.triplane_cond.TriPlaneSemanticEntangleGenerator'
if opts.use_bg:
c.G_kwargs.class_name = 'training.triplane_cond.TriPlaneSemanticEntangleGenerator_withBG'
else:
c.G_kwargs.class_name = 'training.triplane_cond.TriPlaneGenerator'
c.D_kwargs.class_name = 'training.dual_discriminator.DualDiscriminator'
# Speed up training by using regular convolutions instead of grouped convolutions.
c.G_kwargs.fused_modconv_default = 'inference_only'
# Filter mode for raw images ['antialiased', 'none', float [0-1]]
c.loss_kwargs.filter_mode = 'antialiased'
# Regularization for discriminator pose conditioning
c.D_kwargs.disc_c_noise = opts.disc_c_noise
if c.training_set_kwargs.resolution == 512:
sr_module = 'training.superresolution.SuperresolutionHybrid8XDC'
sr_module_semantic = 'training.superresolution.SuperresolutionHybrid8XDC_semantic'
elif c.training_set_kwargs.resolution == 256:
sr_module = 'training.superresolution.SuperresolutionHybrid4X'
sr_module_semantic = 'training.superresolution.SuperresolutionHybrid4X_semantic'
elif c.training_set_kwargs.resolution == 128:
sr_module = 'training.superresolution.SuperresolutionHybrid2X'
sr_module_semantic = 'training.superresolution.SuperresolutionHybrid2X_semantic'
else:
assert False, f"Unsupported resolution {c.training_set_kwargs.resolution}; make a new superresolution module"
if opts.sr_module != None:
sr_module = opts.sr_module
rendering_options = {
'image_resolution': c.training_set_kwargs.resolution,
'disparity_space_sampling': False,
'clamp_mode': 'softplus',
'superresolution_module': sr_module,
'superresolution_module_semantic': sr_module_semantic,
# if true, fill generator pose conditioning label with dummy zero vector
'c_gen_conditioning_zero': not opts.gen_pose_cond,
'gpc_reg_prob': opts.gpc_reg_prob if opts.gen_pose_cond else None,
'c_scale': opts.c_scale, # mutliplier for generator pose conditioning label
# [random or none], whether to inject pixel noise into super-resolution layers
'superresolution_noise_mode': opts.sr_noise_mode,
'density_reg': opts.density_reg, # strength of density regularization
# distance at which to sample perturbed points for density regularization
'density_reg_p_dist': opts.density_reg_p_dist,
# for experimenting with variations on density regularization
'reg_type': opts.reg_type,
'decoder_lr_mul': opts.decoder_lr_mul, # learning rate multiplier for decoder
'sr_antialias': True,
}
if opts.cfg == 'ffhq' or opts.cfg == 'celeba':
rendering_options.update({
# number of uniform samples to take per ray.
'depth_resolution': 48,
# number of importance samples to take per ray.
'depth_resolution_importance': 48,
# near point along each ray to start taking samples.
'ray_start': 2.25,
'ray_end': 3.3, # far point along each ray to stop taking samples.
# the side-length of the bounding box spanned by the tri-planes; box_warp=1 means [-0.5, -0.5, -0.5] -> [0.5, 0.5, 0.5].
'box_warp': 1,
# used only in the visualizer to specify camera orbit radius.
'avg_camera_radius': 2.7,
# used only in the visualizer to control center of camera rotation.
'avg_camera_pivot': [0, 0, 0.2],
})
elif opts.cfg == 'afhq':
rendering_options.update({
'depth_resolution': 48,
'depth_resolution_importance': 48,
'ray_start': 2.25,
'ray_end': 3.3,
'box_warp': 1,
'avg_camera_radius': 2.7,
'avg_camera_pivot': [0, 0, -0.06],
})
elif opts.cfg == 'shapenet':
rendering_options.update({
'depth_resolution': 64,
'depth_resolution_importance': 64,
'ray_start': 0.1,
'ray_end': 2.6,
'box_warp': 1.6,
'white_back': True,
'avg_camera_radius': 1.7,
'avg_camera_pivot': [0, 0, 0],
})
else:
assert False, "Need to specify config"
if opts.density_reg > 0:
c.G_reg_interval = opts.density_reg_every
c.G_kwargs.rendering_kwargs = rendering_options
c.G_kwargs.num_fp16_res = 0
# Blur the images seen by the discriminator.
c.loss_kwargs.blur_init_sigma = 10
# Fade out the blur during the first N kimg.
c.loss_kwargs.blur_fade_kimg = c.batch_size * opts.blur_fade_kimg / 32
c.loss_kwargs.gpc_reg_prob = opts.gpc_reg_prob if opts.gen_pose_cond else None
c.loss_kwargs.gpc_reg_fade_kimg = opts.gpc_reg_fade_kimg
c.loss_kwargs.dual_discrimination = True
c.loss_kwargs.neural_rendering_resolution_initial = opts.neural_rendering_resolution_initial
c.loss_kwargs.neural_rendering_resolution_final = opts.neural_rendering_resolution_final
c.loss_kwargs.neural_rendering_resolution_fade_kimg = opts.neural_rendering_resolution_fade_kimg
c.G_kwargs.sr_num_fp16_res = opts.sr_num_fp16_res
c.G_kwargs.sr_kwargs = dnnlib.EasyDict(
channel_base=opts.cbase, channel_max=opts.cmax, fused_modconv_default='inference_only')
c.loss_kwargs.style_mixing_prob = opts.style_mixing_prob
# Augmentation.
if opts.aug != 'noaug':
c.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', xflip=1, rotate90=1, xint=1,
scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1)
if opts.aug == 'ada':
c.ada_target = opts.target
if opts.aug == 'fixed':
c.augment_p = opts.p
# Resume.
if opts.resume is not None:
c.resume_pkl = opts.resume
c.ada_kimg = 100 # Make ADA react faster at the beginning.
c.ema_rampup = None # Disable EMA rampup.
if not opts.resume_blur:
c.loss_kwargs.blur_init_sigma = 0 # Disable blur rampup.
c.loss_kwargs.gpc_reg_fade_kimg = 0 # Disable swapping rampup
# Performance-related toggles.
# if opts.fp32:
# c.G_kwargs.num_fp16_res = c.D_kwargs.num_fp16_res = 0
# c.G_kwargs.conv_clamp = c.D_kwargs.conv_clamp = None
c.G_kwargs.num_fp16_res = opts.g_num_fp16_res
c.G_kwargs.conv_clamp = 256 if opts.g_num_fp16_res > 0 else None
c.D_kwargs.num_fp16_res = opts.d_num_fp16_res
c.D_kwargs.conv_clamp = 256 if opts.d_num_fp16_res > 0 else None
if opts.nobench:
c.cudnn_benchmark = False
# Description string.
desc = f'{opts.cfg:s}-{dataset_name:s}-gpus{c.num_gpus:d}-batch{c.batch_size:d}-gamma{c.loss_kwargs.r1_gamma:g}'
if opts.desc is not None:
desc += f'-{opts.desc}'
# Logging.
c.wandb_log = opts.wandb_log
c.no_eval = opts.no_eval
c.debug = opts.debug
# Dis for masks
if opts.dis_mask:
c.D_semantic_kwargs = c.D_kwargs.copy()
else:
c.D_semantic_kwargs = None
# Launch.
launch_training(c=c, desc=desc, outdir=opts.outdir, dry_run=opts.dry_run)
# ----------------------------------------------------------------------------
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
# ----------------------------------------------------------------------------
| 31,932 | 57.808471 | 223 | py |
pix2pix3D | pix2pix3D-main/training/dual_discriminator.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Discriminator architectures from the paper
"Efficient Geometry-aware 3D Generative Adversarial Networks"."""
import numpy as np
import torch
from torch_utils import persistence
from torch_utils.ops import upfirdn2d
from training.networks_stylegan2 import DiscriminatorBlock, MappingNetwork, DiscriminatorEpilogue
@persistence.persistent_class
class SingleDiscriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
sr_upsample_factor = 1, # Ignored for SingleDiscriminator
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, c, update_emas=False, **block_kwargs):
img = img['image']
_ = update_emas # unused
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
def extra_repr(self):
return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
#----------------------------------------------------------------------------
def filtered_resizing(image_orig_tensor, size, f, filter_mode='antialiased'):
if filter_mode == 'antialiased':
ada_filtered_64 = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', align_corners=False, antialias=True)
elif filter_mode == 'classic':
ada_filtered_64 = upfirdn2d.upsample2d(image_orig_tensor, f, up=2)
ada_filtered_64 = torch.nn.functional.interpolate(ada_filtered_64, size=(size * 2 + 2, size * 2 + 2), mode='bilinear', align_corners=False)
ada_filtered_64 = upfirdn2d.downsample2d(ada_filtered_64, f, down=2, flip_filter=True, padding=-1)
elif filter_mode == 'none':
ada_filtered_64 = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', align_corners=False)
elif type(filter_mode) == float:
assert 0 < filter_mode < 1
filtered = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', align_corners=False, antialias=True)
aliased = torch.nn.functional.interpolate(image_orig_tensor, size=(size, size), mode='bilinear', align_corners=False, antialias=False)
ada_filtered_64 = (1 - filter_mode) * aliased + (filter_mode) * filtered
return ada_filtered_64
#----------------------------------------------------------------------------
@persistence.persistent_class
class DualDiscriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
disc_c_noise = 0, # Corrupt camera parameters with X std dev of noise before disc. pose conditioning.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
**unused_kwargs
):
super().__init__()
img_channels *= 2
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
self.disc_c_noise = disc_c_noise
def forward(self, img, c, update_emas=False, **block_kwargs):
image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter)
img = torch.cat([img['image'], image_raw], 1)
_ = update_emas # unused
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
if self.disc_c_noise > 0: c += torch.randn_like(c) * c.std(0) * self.disc_c_noise
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
def extra_repr(self):
return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class DummyDualDiscriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
img_channels *= 2
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
self.raw_fade = 1
def forward(self, img, c, update_emas=False, **block_kwargs):
self.raw_fade = max(0, self.raw_fade - 1/(500000/32))
image_raw = filtered_resizing(img['image_raw'], size=img['image'].shape[-1], f=self.resample_filter) * self.raw_fade
img = torch.cat([img['image'], image_raw], 1)
_ = update_emas # unused
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
def extra_repr(self):
return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
#----------------------------------------------------------------------------
| 13,110 | 51.23506 | 149 | py |
pix2pix3D | pix2pix3D-main/training/loss_utils.py | import torch
import torch.nn.functional as F
def cross_entropy2d(input, target, weight=None, size_average=True):
n, c, h, w = input.size()
nt, ht, wt = target.size()
if (h != ht) or (w != wt):
# upsample labels
input = F.interpolate(input, size=(ht, wt), mode='bilinear', align_corners=True)
input = input.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
target = target.view(-1)
loss = F.cross_entropy(input, target, weight=weight, reduction='mean')
return loss | 519 | 29.588235 | 88 | py |
pix2pix3D | pix2pix3D-main/training/superresolution.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Superresolution network architectures from the paper
"Efficient Geometry-aware 3D Generative Adversarial Networks"."""
import torch
from training.networks_stylegan2 import Conv2dLayer, SynthesisLayer, ToRGBLayer
from torch_utils.ops import upfirdn2d
from torch_utils import persistence
from torch_utils import misc
from training.networks_stylegan2 import SynthesisBlock
import numpy as np
from training.networks_stylegan3 import SynthesisLayer as AFSynthesisLayer
#----------------------------------------------------------------------------
# for 512x512 generation
@persistence.persistent_class
class SuperresolutionHybrid8X(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 512
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 128
self.sr_antialias = sr_antialias
self.block0 = SynthesisBlock(channels, 128, w_dim=512, resolution=256,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=512,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] != self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# for 256x256 generation
@persistence.persistent_class
class SuperresolutionHybrid4X(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 256
use_fp16 = sr_num_fp16_res > 0
self.sr_antialias = sr_antialias
self.input_resolution = 128
self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=128,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=256,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] < self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# for 128 x 128 generation
@persistence.persistent_class
class SuperresolutionHybrid2X(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 128
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 64
self.sr_antialias = sr_antialias
self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=64,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=128,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] != self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# for 128 x 128 generation
@persistence.persistent_class
class SuperresolutionHybrid2X_semantic(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, semantic_channels,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 128
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 64
self.sr_antialias = sr_antialias
self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=64,
img_channels=semantic_channels, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=128,
img_channels=semantic_channels, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] != self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# TODO: Delete (here for backwards compatibility with old 256x256 models)
@persistence.persistent_class
class SuperresolutionHybridDeepfp32(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 256
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 128
self.block0 = SynthesisBlockNoUp(channels, 128, w_dim=512, resolution=128,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(128, 64, w_dim=512, resolution=256,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.register_buffer('resample_filter', upfirdn2d.setup_filter([1,3,3,1]))
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] < self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlockNoUp(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.fused_modconv_default = fused_modconv_default
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if is_last or architecture == 'skip':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet':
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
_ = update_emas # unused
misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
w_iter = iter(ws.unbind(dim=1))
if ws.device.type != 'cuda':
force_fp32 = True
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
if fused_modconv is None:
fused_modconv = self.fused_modconv_default
if fused_modconv == 'inference_only':
fused_modconv = (not self.training)
# Input.
if self.in_channels == 0:
x = self.const.to(dtype=dtype, memory_format=memory_format)
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
else:
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution])
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
x = y.add_(x)
else:
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
# ToRGB.
# if img is not None:
# misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
# img = upfirdn2d.upsample2d(img, self.resample_filter)
if self.is_last or self.architecture == 'skip':
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
assert x.dtype == dtype
assert img is None or img.dtype == torch.float32
return x, img
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
# for 512x512 generation
@persistence.persistent_class
class SuperresolutionHybrid8XDC(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 512
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 128
self.sr_antialias = sr_antialias
self.block0 = SynthesisBlock(channels, 256, w_dim=512, resolution=256,
img_channels=3, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(256, 128, w_dim=512, resolution=512,
img_channels=3, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] != self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#----------------------------------------------------------------------------
# for 512x512 generation
@persistence.persistent_class
class SuperresolutionHybrid8XDC_semantic(torch.nn.Module):
def __init__(self, channels, img_resolution, sr_num_fp16_res, sr_antialias, semantic_channels,
num_fp16_res=4, conv_clamp=None, channel_base=None, channel_max=None,# IGNORE
**block_kwargs):
super().__init__()
assert img_resolution == 512
use_fp16 = sr_num_fp16_res > 0
self.input_resolution = 128
self.sr_antialias = sr_antialias
self.block0 = SynthesisBlock(channels, 256, w_dim=512, resolution=256,
img_channels=semantic_channels, is_last=False, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
self.block1 = SynthesisBlock(256, 128, w_dim=512, resolution=512,
img_channels=semantic_channels, is_last=True, use_fp16=use_fp16, conv_clamp=(256 if use_fp16 else None), **block_kwargs)
def forward(self, rgb, x, ws, **block_kwargs):
ws = ws[:, -1:, :].repeat(1, 3, 1)
if x.shape[-1] != self.input_resolution:
x = torch.nn.functional.interpolate(x, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
rgb = torch.nn.functional.interpolate(rgb, size=(self.input_resolution, self.input_resolution),
mode='bilinear', align_corners=False, antialias=self.sr_antialias)
x, rgb = self.block0(x, rgb, ws, **block_kwargs)
x, rgb = self.block1(x, rgb, ws, **block_kwargs)
return rgb
#---------------------------------------------------------------------------- | 18,911 | 52.123596 | 140 | py |
pix2pix3D | pix2pix3D-main/training/loss.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Loss functions."""
import numpy as np
import torch
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import upfirdn2d
from training.dual_discriminator import filtered_resizing
import lpips
import torch.nn.functional as F
from training.loss_utils import cross_entropy2d
# ----------------------------------------------------------------------------
class Loss:
# to be overridden by subclass
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg):
raise NotImplementedError()
# ----------------------------------------------------------------------------
class StyleGAN2Loss(Loss):
def __init__(self, device, G, D, augment_pipe=None, r1_gamma=10, style_mixing_prob=0, pl_weight=0, pl_batch_shrink=2, pl_decay=0.01, pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0, r1_gamma_init=0, r1_gamma_fade_kimg=0, neural_rendering_resolution_initial=64, neural_rendering_resolution_final=None, neural_rendering_resolution_fade_kimg=0, gpc_reg_fade_kimg=1000, gpc_reg_prob=None, dual_discrimination=False, filter_mode='antialiased'):
super().__init__()
self.device = device
self.G = G
self.D = D
self.augment_pipe = augment_pipe
self.r1_gamma = r1_gamma
self.style_mixing_prob = style_mixing_prob
self.pl_weight = pl_weight
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_no_weight_grad = pl_no_weight_grad
self.pl_mean = torch.zeros([], device=device)
self.blur_init_sigma = blur_init_sigma
self.blur_fade_kimg = blur_fade_kimg
self.r1_gamma_init = r1_gamma_init
self.r1_gamma_fade_kimg = r1_gamma_fade_kimg
self.neural_rendering_resolution_initial = neural_rendering_resolution_initial
self.neural_rendering_resolution_final = neural_rendering_resolution_final
self.neural_rendering_resolution_fade_kimg = neural_rendering_resolution_fade_kimg
self.gpc_reg_fade_kimg = gpc_reg_fade_kimg
self.gpc_reg_prob = gpc_reg_prob
self.dual_discrimination = dual_discrimination
self.filter_mode = filter_mode
self.resample_filter = upfirdn2d.setup_filter(
[1, 3, 3, 1], device=device)
self.blur_raw_target = True
assert self.gpc_reg_prob is None or (0 <= self.gpc_reg_prob <= 1)
def run_G(self, z, c, swapping_prob, neural_rendering_resolution, update_emas=False):
if swapping_prob is not None:
c_swapped = torch.roll(c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
(c.shape[0], 1), device=c.device) < swapping_prob, c_swapped, c)
else:
c_gen_conditioning = torch.zeros_like(c)
ws = self.G.mapping(z, c_gen_conditioning, update_emas=update_emas)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty([], dtype=torch.int64,
device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand(
[], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G.mapping(
torch.randn_like(z), c, update_emas=False)[:, cutoff:]
gen_output = self.G.synthesis(
ws, c, neural_rendering_resolution=neural_rendering_resolution, update_emas=update_emas)
return gen_output, ws
def run_D(self, img, c, blur_sigma=0, blur_sigma_raw=0, update_emas=False):
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
with torch.autograd.profiler.record_function('blur'):
f = torch.arange(-blur_size, blur_size + 1,
device=img['image'].device).div(blur_sigma).square().neg().exp2()
img['image'] = upfirdn2d.filter2d(img['image'], f / f.sum())
if self.augment_pipe is not None:
augmented_pair = self.augment_pipe(torch.cat([img['image'],
torch.nn.functional.interpolate(img['image_raw'], size=img['image'].shape[2:], mode='bilinear', antialias=True)],
dim=1))
img['image'] = augmented_pair[:, :img['image'].shape[1]]
img['image_raw'] = torch.nn.functional.interpolate(
augmented_pair[:, img['image'].shape[1]:], size=img['image_raw'].shape[2:], mode='bilinear', antialias=True)
logits = self.D(img, c, update_emas=update_emas)
return logits
def accumulate_gradients(self, phase, real_img, real_c, gen_z, gen_c, gain, cur_nimg):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg', 'Dboth']
if self.G.rendering_kwargs.get('density_reg', 0) == 0:
phase = {'Greg': 'none', 'Gboth': 'Gmain'}.get(phase, phase)
if self.r1_gamma == 0:
phase = {'Dreg': 'none', 'Dboth': 'Dmain'}.get(phase, phase)
blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), 0) * \
self.blur_init_sigma if self.blur_fade_kimg > 0 else 0
r1_gamma = self.r1_gamma
alpha = min(cur_nimg / (self.gpc_reg_fade_kimg * 1e3),
1) if self.gpc_reg_fade_kimg > 0 else 1
swapping_prob = (1 - alpha) * 1 + alpha * \
self.gpc_reg_prob if self.gpc_reg_prob is not None else None
if self.neural_rendering_resolution_final is not None:
alpha = min(
cur_nimg / (self.neural_rendering_resolution_fade_kimg * 1e3), 1)
neural_rendering_resolution = int(np.rint(self.neural_rendering_resolution_initial * (
1 - alpha) + self.neural_rendering_resolution_final * alpha))
else:
neural_rendering_resolution = self.neural_rendering_resolution_initial
real_img_raw = filtered_resizing(
real_img, size=neural_rendering_resolution, f=self.resample_filter, filter_mode=self.filter_mode)
if self.blur_raw_target:
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
f = torch.arange(-blur_size, blur_size + 1,
device=real_img_raw.device).div(blur_sigma).square().neg().exp2()
real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum())
real_img = {'image': real_img, 'image_raw': real_img_raw}
# Gmain: Maximize logits for generated images.
if phase in ['Gmain', 'Gboth']:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(
gen_z, gen_c, swapping_prob=swapping_prob, neural_rendering_resolution=neural_rendering_resolution)
gen_logits = self.run_D(gen_img, gen_c, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits)
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mean().mul(gain).backward()
# Density Regularization
if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs['reg_type'] == 'l1':
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, c_gen_conditioning, update_emas=False)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty(
[], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand(
[], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G.mapping(
torch.randn_like(z), c, update_emas=False)[:, cutoff:]
initial_coordinates = torch.rand(
(ws.shape[0], 1000, 3), device=ws.device) * 2 - 1
perturbed_coordinates = initial_coordinates + \
torch.randn_like(initial_coordinates) * \
self.G.rendering_kwargs['density_reg_p_dist']
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
TVloss = torch.nn.functional.l1_loss(
sigma_initial, sigma_perturbed) * self.G.rendering_kwargs['density_reg']
TVloss.mul(gain).backward()
# Alternative density regularization
if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs['reg_type'] == 'monotonic-detach':
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, c_gen_conditioning, update_emas=False)
initial_coordinates = torch.rand(
(ws.shape[0], 2000, 3), device=ws.device) * 2 - 1 # Front
perturbed_coordinates = initial_coordinates + \
torch.tensor([0, 0, -1], device=ws.device) * (1/256) * \
self.G.rendering_kwargs['box_warp'] # Behind
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
monotonic_loss = torch.relu(
sigma_initial.detach() - sigma_perturbed).mean() * 10
monotonic_loss.mul(gain).backward()
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, c_gen_conditioning, update_emas=False)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty(
[], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand(
[], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G.mapping(
torch.randn_like(z), c, update_emas=False)[:, cutoff:]
initial_coordinates = torch.rand(
(ws.shape[0], 1000, 3), device=ws.device) * 2 - 1
perturbed_coordinates = initial_coordinates + \
torch.randn_like(initial_coordinates) * (1/256) * \
self.G.rendering_kwargs['box_warp']
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
TVloss = torch.nn.functional.l1_loss(
sigma_initial, sigma_perturbed) * self.G.rendering_kwargs['density_reg']
TVloss.mul(gain).backward()
# Alternative density regularization
if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs['reg_type'] == 'monotonic-fixed':
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, c_gen_conditioning, update_emas=False)
initial_coordinates = torch.rand(
(ws.shape[0], 2000, 3), device=ws.device) * 2 - 1 # Front
perturbed_coordinates = initial_coordinates + \
torch.tensor([0, 0, -1], device=ws.device) * (1/256) * \
self.G.rendering_kwargs['box_warp'] # Behind
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
monotonic_loss = torch.relu(
sigma_initial - sigma_perturbed).mean() * 10
monotonic_loss.mul(gain).backward()
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, c_gen_conditioning, update_emas=False)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty(
[], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand(
[], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G.mapping(
torch.randn_like(z), c, update_emas=False)[:, cutoff:]
initial_coordinates = torch.rand(
(ws.shape[0], 1000, 3), device=ws.device) * 2 - 1
perturbed_coordinates = initial_coordinates + \
torch.randn_like(initial_coordinates) * (1/256) * \
self.G.rendering_kwargs['box_warp']
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
TVloss = torch.nn.functional.l1_loss(
sigma_initial, sigma_perturbed) * self.G.rendering_kwargs['density_reg']
TVloss.mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if phase in ['Dmain', 'Dboth']:
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, swapping_prob=swapping_prob,
neural_rendering_resolution=neural_rendering_resolution, update_emas=True)
gen_logits = self.run_D(
gen_img, gen_c, blur_sigma=blur_sigma, update_emas=True)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits)
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if phase in ['Dmain', 'Dreg', 'Dboth']:
name = 'Dreal' if phase == 'Dmain' else 'Dr1' if phase == 'Dreg' else 'Dreal_Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp_image = real_img['image'].detach(
).requires_grad_(phase in ['Dreg', 'Dboth'])
real_img_tmp_image_raw = real_img['image_raw'].detach(
).requires_grad_(phase in ['Dreg', 'Dboth'])
real_img_tmp = {'image': real_img_tmp_image,
'image_raw': real_img_tmp_image_raw}
real_logits = self.run_D(
real_img_tmp, real_c, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if phase in ['Dmain', 'Dboth']:
loss_Dreal = torch.nn.functional.softplus(-real_logits)
training_stats.report(
'Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if phase in ['Dreg', 'Dboth']:
if self.dual_discrimination:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[
real_img_tmp['image'], real_img_tmp['image_raw']], create_graph=True, only_inputs=True)
r1_grads_image = r1_grads[0]
r1_grads_image_raw = r1_grads[1]
r1_penalty = r1_grads_image.square().sum(
[1, 2, 3]) + r1_grads_image_raw.square().sum([1, 2, 3])
else: # single discrimination
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[
real_img_tmp['image']], create_graph=True, only_inputs=True)
r1_grads_image = r1_grads[0]
r1_penalty = r1_grads_image.square().sum([1, 2, 3])
loss_Dr1 = r1_penalty * (r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(loss_Dreal + loss_Dr1).mean().mul(gain).backward()
# ----------------------------------------------------------------------------
class Pix2Pix3DLoss(Loss):
def __init__(self, device, G, D, D_semantic=None, augment_pipe=None, r1_gamma=10, style_mixing_prob=0, pl_weight=0, pl_batch_shrink=2, pl_decay=0.01, pl_no_weight_grad=False, blur_init_sigma=0, blur_fade_kimg=0, r1_gamma_init=0, r1_gamma_fade_kimg=0, neural_rendering_resolution_initial=64, neural_rendering_resolution_final=None, neural_rendering_resolution_fade_kimg=0, gpc_reg_fade_kimg=1000, gpc_reg_prob=None, dual_discrimination=False, filter_mode='antialiased',
random_c_prob=0, lambda_l1=2, lambda_lpips=10, lambda_D_semantic=1, seg_weight=0, edge_weight=2, only_raw_recons=False, silhouette_loss=False,
lambda_cross_view=0):
super().__init__()
self.device = device
self.G = G
self.D = D
self.D_semantic = D_semantic
self.augment_pipe = augment_pipe
self.r1_gamma = r1_gamma
self.style_mixing_prob = style_mixing_prob
self.pl_weight = pl_weight
self.pl_batch_shrink = pl_batch_shrink
self.pl_decay = pl_decay
self.pl_no_weight_grad = pl_no_weight_grad
self.pl_mean = torch.zeros([], device=device)
self.blur_init_sigma = blur_init_sigma
self.blur_fade_kimg = blur_fade_kimg
self.r1_gamma_init = r1_gamma_init
self.r1_gamma_fade_kimg = r1_gamma_fade_kimg
self.neural_rendering_resolution_initial = neural_rendering_resolution_initial
self.neural_rendering_resolution_final = neural_rendering_resolution_final
self.neural_rendering_resolution_fade_kimg = neural_rendering_resolution_fade_kimg
self.gpc_reg_fade_kimg = gpc_reg_fade_kimg
self.gpc_reg_prob = gpc_reg_prob
self.dual_discrimination = dual_discrimination
self.filter_mode = filter_mode
self.resample_filter = upfirdn2d.setup_filter(
[1, 3, 3, 1], device=device)
self.blur_raw_target = True
assert self.gpc_reg_prob is None or (0 <= self.gpc_reg_prob <= 1)
self.random_c_prob = random_c_prob
self.lpips_loss = lpips.LPIPS(net='vgg').to(device=device)
self.lambda_l1 = lambda_l1
self.lambda_lpips = lambda_lpips
self.lambda_D_semantic = lambda_D_semantic
if int(seg_weight) == 1:
self.seg_weight = torch.tensor([0.42768099, 0.45614868, 1.59952169, 4.38863045, 4.85695198,
4.86439145, 3.53563349, 3.57896961, 3.37838867, 3.66981824,
4.17743386, 3.5624441, 2.78190484, 0.40917425, 2.38560636,
4.65813434, 17.17367367, 1.13303585, 1.25281865]).to(self.device)
elif int(seg_weight) == 2:
print('Using seg weight 2')
self.seg_weight = torch.tensor([1.82911031e-01, 2.08071618e-01, 2.55846962e+00, 1.92600773e+01,
2.35899825e+01, 2.36623042e+01, 1.25007042e+01, 1.28090235e+01,
1.14135100e+01, 1.34675659e+01, 1.74509537e+01, 1.26910080e+01,
7.73899453e+00, 1.67423571e-01, 5.69111768e+00, 2.16982155e+01,
2.94935067e+02, 1.28377023e+00, 1.56955458e+00]).to(self.device)
else:
self.seg_weight = None
self.edge_weight = edge_weight
self.only_raw_recons = only_raw_recons
self.silhouette_loss = silhouette_loss
self.lambda_cross_view = lambda_cross_view
def run_G(self, z, c, batch, swapping_prob, neural_rendering_resolution, update_emas=False, mode='random_z_image_c'):
# if swapping_prob is not None:
# c_swapped = torch.roll(c.clone(), 1, 0)
# c_gen_conditioning = torch.where(torch.rand((c.shape[0], 1), device=c.device) < swapping_prob, c_swapped, c)
# else:
# c_gen_conditioning = torch.zeros_like(c)
if mode == 'random_z_image_c':
ws = self.G.mapping(
z, batch['pose'], batch, update_emas=update_emas)
gen_output = self.G.synthesis(
ws, batch['pose'], neural_rendering_resolution=neural_rendering_resolution, update_emas=update_emas)
elif mode == 'random_z_random_c':
ws = self.G.mapping(
z, batch['pose'], batch, update_emas=update_emas)
gen_output = self.G.synthesis(
ws, c, neural_rendering_resolution=neural_rendering_resolution, update_emas=update_emas)
# if self.style_mixing_prob > 0:
# with torch.autograd.profiler.record_function('style_mixing'):
# cutoff = torch.empty([], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
# cutoff = torch.where(torch.rand([], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
# ws[:, cutoff:] = self.G.mapping(torch.randn_like(z), c, update_emas=False)[:, cutoff:]
return gen_output, ws
def run_D(self, img, c, blur_sigma=0, blur_sigma_raw=0, update_emas=False):
input_img = {'image': img['image'].clone(
), 'image_raw': img['image_raw'].clone()}
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
with torch.autograd.profiler.record_function('blur'):
f = torch.arange(-blur_size, blur_size + 1, device=input_img['image'].device).div(
blur_sigma).square().neg().exp2()
input_img['image'] = upfirdn2d.filter2d(
input_img['image'], f / f.sum())
if self.augment_pipe is not None:
augmented_pair = self.augment_pipe(torch.cat([input_img['image'],
torch.nn.functional.interpolate(input_img['image_raw'], size=input_img['image'].shape[2:], mode='bilinear', antialias=True)],
dim=1))
input_img['image'] = augmented_pair[:,
:input_img['image'].shape[1]]
input_img['image_raw'] = torch.nn.functional.interpolate(
augmented_pair[:, input_img['image'].shape[1]:], size=input_img['image_raw'].shape[2:], mode='bilinear', antialias=True)
logits = self.D(input_img, c, update_emas=update_emas)
return logits
def run_D_semantic(self, img, c, blur_sigma=0, blur_sigma_raw=0, update_emas=False):
# mask = batch['mask']
# mask = torch.nn.functional.one_hot(mask, num_classes=self.G.semantic_channels).permute(0, 3, 1, 2).float()
# mask_raw = torch.nn.functional.interpolate(mask, size=img['image_raw'].shape[2:], mode='nearest')
# img['image'] = torch.cat([img['image'], mask], dim=1)
# img['image_raw'] = torch.cat([img['image_raw'], mask_raw], dim=1)
input_img = {'image': img['image'].clone(
), 'image_raw': img['image_raw'].clone()}
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
with torch.autograd.profiler.record_function('blur'):
f = torch.arange(-blur_size, blur_size + 1, device=input_img['image'].device).div(
blur_sigma).square().neg().exp2()
input_img['image'] = upfirdn2d.filter2d(
input_img['image'], f / f.sum())
if self.augment_pipe is not None:
augmented_pair = self.augment_pipe(torch.cat([input_img['image'],
torch.nn.functional.interpolate(input_img['image_raw'], size=input_img['image'].shape[2:], mode='bilinear', antialias=True)],
dim=1))
input_img['image'] = augmented_pair[:,
:input_img['image'].shape[1]]
input_img['image_raw'] = torch.nn.functional.interpolate(
augmented_pair[:, input_img['image'].shape[1]:], size=input_img['image_raw'].shape[2:], mode='bilinear', antialias=True)
logits = self.D_semantic(input_img, c, update_emas=update_emas)
return logits
def accumulate_gradients(self, phase, batch, gen_z, gen_c, gain, cur_nimg, debug=False):
assert phase in ['Gmain', 'Greg', 'Gboth', 'Dmain', 'Dreg',
'Dboth', 'D_semanticmain', 'D_semanticreg', 'D_semanticboth']
if self.G.rendering_kwargs.get('density_reg', 0) == 0:
phase = {'Greg': 'none', 'Gboth': 'Gmain'}.get(phase, phase)
if self.r1_gamma == 0:
phase = {'Dreg': 'none', 'Dboth': 'Dmain'}.get(phase, phase)
blur_sigma = max(1 - cur_nimg / (self.blur_fade_kimg * 1e3), 0) * \
self.blur_init_sigma if self.blur_fade_kimg > 0 else 0
r1_gamma = self.r1_gamma
alpha = min(cur_nimg / (self.gpc_reg_fade_kimg * 1e3),
1) if self.gpc_reg_fade_kimg > 0 else 1
swapping_prob = (1 - alpha) * 1 + alpha * \
self.gpc_reg_prob if self.gpc_reg_prob is not None else None
if torch.rand(1) < self.random_c_prob:
generator_mode = 'random_z_random_c'
c_render = gen_c
else:
generator_mode = 'random_z_image_c'
c_render = batch['pose']
if self.neural_rendering_resolution_final is not None:
alpha = min(
cur_nimg / (self.neural_rendering_resolution_fade_kimg * 1e3), 1)
neural_rendering_resolution = int(np.rint(self.neural_rendering_resolution_initial * (
1 - alpha) + self.neural_rendering_resolution_final * alpha))
else:
neural_rendering_resolution = self.neural_rendering_resolution_initial
real_img, real_c = batch['image'], batch['pose']
real_img_raw = filtered_resizing(
real_img, size=neural_rendering_resolution, f=self.resample_filter, filter_mode=self.filter_mode)
if self.blur_raw_target:
blur_size = np.floor(blur_sigma * 3)
if blur_size > 0:
f = torch.arange(-blur_size, blur_size + 1,
device=real_img_raw.device).div(blur_sigma).square().neg().exp2()
real_img_raw = upfirdn2d.filter2d(real_img_raw, f / f.sum())
real_img = {'image': real_img, 'image_raw': real_img_raw}
real_mask = batch['mask']
# real_mask_raw = F.interpolate(real_mask, size=neural_rendering_resolution, mode='nearest')
# real_mask = {'mask': real_mask, 'mask_raw': real_mask_raw}
# Gmain: Maximize logits for generated images.
if phase in ['Gmain', 'Gboth']:
with torch.autograd.profiler.record_function('Gmain_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, batch, swapping_prob=swapping_prob,
neural_rendering_resolution=neural_rendering_resolution, mode=generator_mode)
gen_logits = self.run_D(
gen_img, c_render, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Gmain = torch.nn.functional.softplus(-gen_logits)
if self.D_semantic is not None:
input_img = {}
if self.G.data_type == 'seg':
mask_softmax = torch.nn.functional.softmax(
gen_img['semantic'], dim=1)
mask_softmax_raw = torch.nn.functional.softmax(
gen_img['semantic_raw'], dim=1)
# Detach to avoid backpropagating through the rgb branch.
input_img['image'] = torch.cat(
[gen_img['image'].detach(), mask_softmax], dim=1)
input_img['image_raw'] = torch.cat(
[gen_img['image_raw'].detach(), mask_softmax_raw], dim=1)
else:
# Detach to avoid backpropagating through the rgb branch.
input_img['image'] = torch.cat(
[gen_img['image'].detach(), gen_img['semantic']], dim=1)
input_img['image_raw'] = torch.cat(
[gen_img['image_raw'].detach(), gen_img['semantic_raw']], dim=1)
gen_logits_semantic = self.run_D_semantic(
input_img, c_render, blur_sigma=blur_sigma)
training_stats.report(
'Loss/scores/fake_semantic', gen_logits_semantic)
training_stats.report(
'Loss/signs/fake_semantic', gen_logits_semantic.sign())
loss_Gmain += torch.nn.functional.softplus(
-gen_logits_semantic) * self.lambda_D_semantic
if generator_mode == 'random_z_image_c':
loss_G_img_reconstruction = F.smooth_l1_loss(gen_img['image'], real_img['image']) * self.lambda_l1 \
+ self.lpips_loss(gen_img['image'], real_img['image']) * self.lambda_lpips
loss_G_img_reconstruction_raw = F.smooth_l1_loss(gen_img['image_raw'], real_img['image_raw']) * self.lambda_l1 \
+ self.lpips_loss(gen_img['image_raw'], real_img['image_raw']) * self.lambda_lpips
loss_G_img_reconstruction = loss_G_img_reconstruction * \
(1 - float(self.only_raw_recons)) + \
loss_G_img_reconstruction_raw
training_stats.report(
'Loss/G/loss_img_reconstruction', loss_G_img_reconstruction)
# print(loss_G_img_reconstruction.shape, loss_Gmain.shape)
loss_Gmain = loss_Gmain + \
loss_G_img_reconstruction.squeeze(-1).squeeze(-1)
if 'semantic' in gen_img:
if self.G.data_type == 'seg':
loss_G_semantic_reconstruction = cross_entropy2d(gen_img['semantic'], real_mask.squeeze(
1).long(), weight=self.seg_weight) * (1 - float(self.only_raw_recons))
real_mask_raw = F.interpolate(
real_mask, size=neural_rendering_resolution, mode='nearest')
loss_G_semantic_reconstruction_raw = cross_entropy2d(
gen_img['semantic_raw'], real_mask_raw.squeeze(1).long(), weight=self.seg_weight)
loss_G_semantic_reconstruction = loss_G_semantic_reconstruction + \
loss_G_semantic_reconstruction_raw
else:
real_mask = batch['mask']
# real_mask_raw = filtered_resizing(real_mask, size=neural_rendering_resolution, f=self.resample_filter, filter_mode=self.filter_mode)
real_mask_raw = F.interpolate(
real_mask, size=neural_rendering_resolution, mode='nearest')
loss_G_semantic_reconstruction = F.smooth_l1_loss(gen_img['semantic'], real_mask) * self.edge_weight * (
1 - float(self.only_raw_recons)) + F.smooth_l1_loss(gen_img['semantic_raw'], real_mask_raw) * self.edge_weight
training_stats.report(
'Loss/G/loss_semantic_reconstruction', loss_G_semantic_reconstruction)
loss_Gmain = loss_Gmain + \
loss_G_semantic_reconstruction.squeeze(
-1).squeeze(-1)
loss_G_silhouette = 0
if self.silhouette_loss and self.G.data_type == 'seg':
real_mask_raw = F.interpolate(
real_mask, size=neural_rendering_resolution, mode='nearest')
loss_G_silhouette = self.calculate_silhouette_loss(
gen_img['weight'], real_mask_raw.long())
loss_Gmain = loss_Gmain + loss_G_silhouette
training_stats.report(
'Loss/G/loss_silhouette', loss_G_silhouette)
else:
training_stats.report(
'Loss/G/loss_semantic_reconstruction', 0)
training_stats.report('Loss/G/loss_silhouette', 0)
else:
training_stats.report('Loss/G/loss_img_reconstruction', 0)
training_stats.report(
'Loss/G/loss_semantic_reconstruction', 0)
training_stats.report('Loss/G/loss_silhouette', 0)
training_stats.report('Loss/G/loss', loss_Gmain)
with torch.autograd.profiler.record_function('Gmain_backward'):
loss_Gmain.mean().mul(gain).backward()
# Cross-view consistency loss
with torch.no_grad():
gen_img, _gen_ws = self.run_G(gen_z, gen_c, batch, swapping_prob=swapping_prob,
neural_rendering_resolution=neural_rendering_resolution, mode='random_z_random_c')
batch_proj = batch.copy()
if self.G.data_type == 'seg':
batch_proj['mask'] = torch.argmax(
gen_img['semantic'].detach(), dim=1, keepdim=True)
else:
batch_proj['mask'] = gen_img['semantic'].detach()
gen_img_proj, gen_ws_proj = self.run_G(gen_z, gen_c, batch_proj, swapping_prob=swapping_prob,
neural_rendering_resolution=neural_rendering_resolution, mode='random_z_image_c')
with torch.no_grad():
gen_img_recon, gen_ws_recon = self.run_G(
gen_z, gen_c, batch, swapping_prob=swapping_prob, neural_rendering_resolution=neural_rendering_resolution, mode='random_z_image_c')
loss_cross_view = F.smooth_l1_loss(
gen_img_proj['semantic_raw'], gen_img_recon['semantic_raw']) * self.lambda_cross_view
training_stats.report('Loss/G/loss_cross_view', loss_cross_view)
loss_cross_view.mean().mul(gain).backward()
# Density Regularization
if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs['reg_type'] == 'l1':
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, batch['pose'], batch, update_emas=False)
initial_coordinates = torch.rand(
(ws.shape[0], 1000, 3), device=ws.device) * 2 - 1
perturbed_coordinates = initial_coordinates + \
torch.randn_like(initial_coordinates) * \
self.G.rendering_kwargs['density_reg_p_dist']
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
TVloss = torch.nn.functional.l1_loss(
sigma_initial, sigma_perturbed) * self.G.rendering_kwargs['density_reg']
TVloss.mul(gain).backward()
# Alternative density regularization
if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs['reg_type'] == 'monotonic-detach':
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, batch['pose'], batch, update_emas=False)
initial_coordinates = torch.rand(
(ws.shape[0], 2000, 3), device=ws.device) * 2 - 1 # Front
perturbed_coordinates = initial_coordinates + \
torch.tensor([0, 0, -1], device=ws.device) * (1/256) * \
self.G.rendering_kwargs['box_warp'] # Behind
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
monotonic_loss = torch.relu(
sigma_initial.detach() - sigma_perturbed).mean() * 10
monotonic_loss.mul(gain).backward()
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, batch['pose'], batch, update_emas=False)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty(
[], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand(
[], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G.mapping(
torch.randn_like(z), c, update_emas=False)[:, cutoff:]
initial_coordinates = torch.rand(
(ws.shape[0], 1000, 3), device=ws.device) * 2 - 1
perturbed_coordinates = initial_coordinates + \
torch.randn_like(initial_coordinates) * (1/256) * \
self.G.rendering_kwargs['box_warp']
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
TVloss = torch.nn.functional.l1_loss(
sigma_initial, sigma_perturbed) * self.G.rendering_kwargs['density_reg']
TVloss.mul(gain).backward()
# Alternative density regularization
if phase in ['Greg', 'Gboth'] and self.G.rendering_kwargs.get('density_reg', 0) > 0 and self.G.rendering_kwargs['reg_type'] == 'monotonic-fixed':
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, batch['pose'], batch, update_emas=False)
initial_coordinates = torch.rand(
(ws.shape[0], 2000, 3), device=ws.device) * 2 - 1 # Front
perturbed_coordinates = initial_coordinates + \
torch.tensor([0, 0, -1], device=ws.device) * (1/256) * \
self.G.rendering_kwargs['box_warp'] # Behind
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
monotonic_loss = torch.relu(
sigma_initial - sigma_perturbed).mean() * 10
monotonic_loss.mul(gain).backward()
if swapping_prob is not None:
c_swapped = torch.roll(gen_c.clone(), 1, 0)
c_gen_conditioning = torch.where(torch.rand(
[], device=gen_c.device) < swapping_prob, c_swapped, gen_c)
else:
c_gen_conditioning = torch.zeros_like(gen_c)
ws = self.G.mapping(gen_z, batch['pose'], batch, update_emas=False)
if self.style_mixing_prob > 0:
with torch.autograd.profiler.record_function('style_mixing'):
cutoff = torch.empty(
[], dtype=torch.int64, device=ws.device).random_(1, ws.shape[1])
cutoff = torch.where(torch.rand(
[], device=ws.device) < self.style_mixing_prob, cutoff, torch.full_like(cutoff, ws.shape[1]))
ws[:, cutoff:] = self.G.mapping(
torch.randn_like(z), c, update_emas=False)[:, cutoff:]
initial_coordinates = torch.rand(
(ws.shape[0], 1000, 3), device=ws.device) * 2 - 1
perturbed_coordinates = initial_coordinates + \
torch.randn_like(initial_coordinates) * (1/256) * \
self.G.rendering_kwargs['box_warp']
all_coordinates = torch.cat(
[initial_coordinates, perturbed_coordinates], dim=1)
sigma = self.G.sample_mixed(all_coordinates, torch.randn_like(
all_coordinates), ws, update_emas=False)['sigma']
sigma_initial = sigma[:, :sigma.shape[1]//2]
sigma_perturbed = sigma[:, sigma.shape[1]//2:]
TVloss = torch.nn.functional.l1_loss(
sigma_initial, sigma_perturbed) * self.G.rendering_kwargs['density_reg']
TVloss.mul(gain).backward()
# Dmain: Minimize logits for generated images.
loss_Dgen = 0
if phase in ['Dmain', 'Dboth']:
if torch.rand(1) < self.random_c_prob:
generator_mode = 'random_z_random_c'
c_render = gen_c
else:
generator_mode = 'random_z_image_c'
c_render = batch['pose']
with torch.autograd.profiler.record_function('Dgen_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, batch, swapping_prob=swapping_prob,
neural_rendering_resolution=neural_rendering_resolution, update_emas=True, mode=generator_mode)
gen_logits = self.run_D(
gen_img, c_render, blur_sigma=blur_sigma, update_emas=True)
training_stats.report('Loss/scores/fake', gen_logits)
training_stats.report('Loss/signs/fake', gen_logits.sign())
loss_Dgen = torch.nn.functional.softplus(gen_logits)
with torch.autograd.profiler.record_function('Dgen_backward'):
loss_Dgen.mean().mul(gain).backward()
# Dmain: Maximize logits for real images.
# Dr1: Apply R1 regularization.
if phase in ['Dmain', 'Dreg', 'Dboth']:
name = 'Dreal' if phase == 'Dmain' else 'Dr1' if phase == 'Dreg' else 'Dreal_Dr1'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp_image = real_img['image'].detach(
).requires_grad_(phase in ['Dreg', 'Dboth'])
real_img_tmp_image_raw = real_img['image_raw'].detach(
).requires_grad_(phase in ['Dreg', 'Dboth'])
real_img_tmp = {'image': real_img_tmp_image,
'image_raw': real_img_tmp_image_raw}
real_logits = self.run_D(
real_img_tmp, real_c, blur_sigma=blur_sigma)
training_stats.report('Loss/scores/real', real_logits)
training_stats.report('Loss/signs/real', real_logits.sign())
loss_Dreal = 0
if phase in ['Dmain', 'Dboth']:
loss_Dreal = torch.nn.functional.softplus(-real_logits)
training_stats.report(
'Loss/D/loss', loss_Dgen + loss_Dreal)
loss_Dr1 = 0
if phase in ['Dreg', 'Dboth']:
if self.dual_discrimination:
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[
real_img_tmp['image'], real_img_tmp['image_raw']], create_graph=True, only_inputs=True)
r1_grads_image = r1_grads[0]
r1_grads_image_raw = r1_grads[1]
r1_penalty = r1_grads_image.square().sum(
[1, 2, 3]) + r1_grads_image_raw.square().sum([1, 2, 3])
else: # single discrimination
with torch.autograd.profiler.record_function('r1_grads'), conv2d_gradfix.no_weight_gradients():
r1_grads = torch.autograd.grad(outputs=[real_logits.sum()], inputs=[
real_img_tmp['image']], create_graph=True, only_inputs=True)
r1_grads_image = r1_grads[0]
r1_penalty = r1_grads_image.square().sum([1, 2, 3])
loss_Dr1 = r1_penalty * (r1_gamma / 2)
training_stats.report('Loss/r1_penalty', r1_penalty)
training_stats.report('Loss/D/reg', loss_Dr1)
with torch.autograd.profiler.record_function(name + '_backward'):
(loss_Dreal + loss_Dr1).mean().mul(gain).backward()
# D_semanticmain: Minimize logits for generated images and masks.
loss_Dgen_semantic = 0
if phase in ['D_semanticmain', 'D_semanticboth']:
if torch.rand(1) < self.random_c_prob:
generator_mode = 'random_z_random_c'
c_render = gen_c
else:
generator_mode = 'random_z_image_c'
c_render = batch['pose']
with torch.autograd.profiler.record_function('Dgen_semantic_forward'):
gen_img, _gen_ws = self.run_G(gen_z, gen_c, batch, swapping_prob=swapping_prob,
neural_rendering_resolution=neural_rendering_resolution, update_emas=True, mode=generator_mode)
input_img = {}
if self.G.data_type == 'seg':
mask_softmax = torch.nn.functional.softmax(
gen_img['semantic'], dim=1)
mask_softmax_raw = torch.nn.functional.softmax(
gen_img['semantic_raw'], dim=1)
input_img['image'] = torch.cat(
[gen_img['image'], mask_softmax], dim=1)
input_img['image_raw'] = torch.cat(
[gen_img['image_raw'], mask_softmax_raw], dim=1)
else:
input_img['image'] = torch.cat(
[gen_img['image'], gen_img['semantic']], dim=1)
input_img['image_raw'] = torch.cat(
[gen_img['image_raw'], gen_img['semantic_raw']], dim=1)
gen_logits_semantic = self.run_D_semantic(
input_img, c_render, blur_sigma=blur_sigma)
training_stats.report(
'Loss/scores/fake_semantic', gen_logits_semantic)
training_stats.report(
'Loss/signs/fake_semantic', gen_logits_semantic.sign())
loss_Dgen_semantic = torch.nn.functional.softplus(
gen_logits_semantic)
with torch.autograd.profiler.record_function('Dgen_semantic_backward'):
loss_Dgen_semantic.mean().mul(gain).backward()
# D_semanticmain: Maximize logits for real images and masks.
# Dr1: Apply R1 regularization.
if phase in ['D_semanticmain', 'D_semanticreg', 'D_semanticboth']:
name = 'Dreal_semantic' if phase == 'D_semanticmain' else 'Dr1_semantic' if phase == 'D_semanticreg' else 'Dreal_Dr1_semantic'
with torch.autograd.profiler.record_function(name + '_forward'):
real_img_tmp_image = real_img['image'].detach().requires_grad_(
phase in ['D_semanticreg', 'D_semanticboth'])
real_img_tmp_image_raw = real_img['image_raw'].detach().requires_grad_(
phase in ['D_semanticreg', 'D_semanticboth'])
if self.G.data_type == 'seg':
real_mask = torch.nn.functional.one_hot(batch['mask'].squeeze(
1).long(), num_classes=self.G.semantic_channels).permute(0, 3, 1, 2).float()
real_mask_raw = filtered_resizing(
real_mask, size=neural_rendering_resolution, f=self.resample_filter, filter_mode=self.filter_mode)
real_mask_tmp_image = real_mask.detach().requires_grad_(
phase in ['D_semanticreg', 'D_semanticboth'])
real_mask_tmp_image_raw = real_mask_raw.detach().requires_grad_(
phase in ['D_semanticreg', 'D_semanticboth'])
else:
real_mask = batch['mask']
real_mask_raw = filtered_resizing(
real_mask, size=neural_rendering_resolution, f=self.resample_filter, filter_mode=self.filter_mode)
real_mask_tmp_image = real_mask.detach().requires_grad_(
phase in ['D_semanticreg', 'D_semanticboth'])
real_mask_tmp_image_raw = real_mask_raw.detach().requires_grad_(
phase in ['D_semanticreg', 'D_semanticboth'])
real_img_tmp = {'image': torch.cat([real_img_tmp_image, real_mask_tmp_image], dim=1), 'image_raw': torch.cat([
real_img_tmp_image_raw, real_mask_tmp_image_raw], dim=1)}
real_logits_semantic = self.run_D_semantic(
real_img_tmp, real_c, blur_sigma=blur_sigma)
training_stats.report(
'Loss/scores/real_semantic', real_logits_semantic)
training_stats.report(
'Loss/signs/real_semantic', real_logits_semantic.sign())
loss_Dreal_semantic = 0
if phase in ['D_semanticmain', 'D_semanticboth']:
loss_Dreal_semantic = torch.nn.functional.softplus(
-real_logits_semantic)
training_stats.report(
'Loss/D/loss_semantic', loss_Dgen_semantic + loss_Dreal_semantic)
loss_Dr1_semantic = 0
if phase in ['D_semanticreg', 'D_semanticboth']:
if self.dual_discrimination:
with torch.autograd.profiler.record_function('r1_grads_semantic'), conv2d_gradfix.no_weight_gradients():
r1_grads_semantic = torch.autograd.grad(outputs=[real_logits_semantic.sum()], inputs=[
real_img_tmp['image'], real_img_tmp['image_raw']], create_graph=True, only_inputs=True)
r1_grads_image_semantic = r1_grads_semantic[0]
r1_grads_image_raw_semantic = r1_grads_semantic[1]
r1_penalty_semantic = r1_grads_image_semantic.square().sum(
[1, 2, 3]) + r1_grads_image_raw_semantic.square().sum([1, 2, 3])
else:
with torch.autograd.profiler.record_function('r1_grads_semantic'):
r1_grads_semantic = torch.autograd.grad(outputs=[real_logits_semantic.sum(
)], inputs=[real_img_tmp['image']], create_graph=True, only_inputs=True)
r1_grads_image_semantic = r1_grads_semantic[0]
r1_penalty_semantic = r1_grads_image_semantic.square().sum([
1, 2, 3])
loss_Dr1_semantic = r1_penalty_semantic * r1_gamma * 0.5
training_stats.report(
'Loss/r1_penalty_semantic', r1_penalty_semantic)
training_stats.report(
'Loss/D/reg_semantic', loss_Dr1_semantic)
with torch.autograd.profiler.record_function(name + '_backward'):
(loss_Dreal_semantic + loss_Dr1_semantic).mean().mul(gain).backward()
def calculate_silhouette_loss(self, weight_image, mask):
# weight_image: [B, 1, H, W]
# mask: [B, 1, H, W]
# assert weight_image.shape == mask.shape
# bg_mask = (mask == 0)
# fg_mask = (mask > 0)
# bg_weight = weight_image[bg_mask]
# fg_weight = weight_image[fg_mask]
# bg_loss = bg_weight.mean()
# fg_loss = 1 - fg_weight.mean()
# return bg_loss + fg_loss
assert weight_image.shape == mask.shape
ref_silhouette = (mask > 0).float()
loss = (weight_image - ref_silhouette).pow(2).mean() * 10
return loss
# ----------------------------------------------------------------------------
| 58,286 | 55.865366 | 472 | py |
pix2pix3D | pix2pix3D-main/training/augment.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Augmentation pipeline from the paper
"Training Generative Adversarial Networks with Limited Data".
Matches the original implementation by Karras et al. at
https://github.com/NVlabs/stylegan2-ada/blob/main/training/augment.py"""
import numpy as np
import scipy.signal
import torch
from torch_utils import persistence
from torch_utils import misc
from torch_utils.ops import upfirdn2d
from torch_utils.ops import grid_sample_gradfix
from torch_utils.ops import conv2d_gradfix
#----------------------------------------------------------------------------
# Coefficients of various wavelet decomposition low-pass filters.
wavelets = {
'haar': [0.7071067811865476, 0.7071067811865476],
'db1': [0.7071067811865476, 0.7071067811865476],
'db2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'db3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'db4': [-0.010597401784997278, 0.032883011666982945, 0.030841381835986965, -0.18703481171888114, -0.02798376941698385, 0.6308807679295904, 0.7148465705525415, 0.23037781330885523],
'db5': [0.003335725285001549, -0.012580751999015526, -0.006241490213011705, 0.07757149384006515, -0.03224486958502952, -0.24229488706619015, 0.13842814590110342, 0.7243085284385744, 0.6038292697974729, 0.160102397974125],
'db6': [-0.00107730108499558, 0.004777257511010651, 0.0005538422009938016, -0.031582039318031156, 0.02752286553001629, 0.09750160558707936, -0.12976686756709563, -0.22626469396516913, 0.3152503517092432, 0.7511339080215775, 0.4946238903983854, 0.11154074335008017],
'db7': [0.0003537138000010399, -0.0018016407039998328, 0.00042957797300470274, 0.012550998556013784, -0.01657454163101562, -0.03802993693503463, 0.0806126091510659, 0.07130921926705004, -0.22403618499416572, -0.14390600392910627, 0.4697822874053586, 0.7291320908465551, 0.39653931948230575, 0.07785205408506236],
'db8': [-0.00011747678400228192, 0.0006754494059985568, -0.0003917403729959771, -0.00487035299301066, 0.008746094047015655, 0.013981027917015516, -0.04408825393106472, -0.01736930100202211, 0.128747426620186, 0.00047248457399797254, -0.2840155429624281, -0.015829105256023893, 0.5853546836548691, 0.6756307362980128, 0.3128715909144659, 0.05441584224308161],
'sym2': [-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025],
'sym3': [0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569],
'sym4': [-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427],
'sym5': [0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728],
'sym6': [0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148],
'sym7': [0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255],
'sym8': [-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609],
}
#----------------------------------------------------------------------------
# Helpers for constructing transformation matrices.
def matrix(*rows, device=None):
assert all(len(row) == len(rows[0]) for row in rows)
elems = [x for row in rows for x in row]
ref = [x for x in elems if isinstance(x, torch.Tensor)]
if len(ref) == 0:
return misc.constant(np.asarray(rows), device=device)
assert device is None or device == ref[0].device
elems = [x if isinstance(x, torch.Tensor) else misc.constant(x, shape=ref[0].shape, device=ref[0].device) for x in elems]
return torch.stack(elems, dim=-1).reshape(ref[0].shape + (len(rows), -1))
def translate2d(tx, ty, **kwargs):
return matrix(
[1, 0, tx],
[0, 1, ty],
[0, 0, 1],
**kwargs)
def translate3d(tx, ty, tz, **kwargs):
return matrix(
[1, 0, 0, tx],
[0, 1, 0, ty],
[0, 0, 1, tz],
[0, 0, 0, 1],
**kwargs)
def scale2d(sx, sy, **kwargs):
return matrix(
[sx, 0, 0],
[0, sy, 0],
[0, 0, 1],
**kwargs)
def scale3d(sx, sy, sz, **kwargs):
return matrix(
[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1],
**kwargs)
def rotate2d(theta, **kwargs):
return matrix(
[torch.cos(theta), torch.sin(-theta), 0],
[torch.sin(theta), torch.cos(theta), 0],
[0, 0, 1],
**kwargs)
def rotate3d(v, theta, **kwargs):
vx = v[..., 0]; vy = v[..., 1]; vz = v[..., 2]
s = torch.sin(theta); c = torch.cos(theta); cc = 1 - c
return matrix(
[vx*vx*cc+c, vx*vy*cc-vz*s, vx*vz*cc+vy*s, 0],
[vy*vx*cc+vz*s, vy*vy*cc+c, vy*vz*cc-vx*s, 0],
[vz*vx*cc-vy*s, vz*vy*cc+vx*s, vz*vz*cc+c, 0],
[0, 0, 0, 1],
**kwargs)
def translate2d_inv(tx, ty, **kwargs):
return translate2d(-tx, -ty, **kwargs)
def scale2d_inv(sx, sy, **kwargs):
return scale2d(1 / sx, 1 / sy, **kwargs)
def rotate2d_inv(theta, **kwargs):
return rotate2d(-theta, **kwargs)
#----------------------------------------------------------------------------
# Versatile image augmentation pipeline from the paper
# "Training Generative Adversarial Networks with Limited Data".
#
# All augmentations are disabled by default; individual augmentations can
# be enabled by setting their probability multipliers to 1.
@persistence.persistent_class
class AugmentPipe(torch.nn.Module):
def __init__(self,
xflip=0, rotate90=0, xint=0, xint_max=0.125,
scale=0, rotate=0, aniso=0, xfrac=0, scale_std=0.2, rotate_max=1, aniso_std=0.2, xfrac_std=0.125,
brightness=0, contrast=0, lumaflip=0, hue=0, saturation=0, brightness_std=0.2, contrast_std=0.5, hue_max=1, saturation_std=1,
imgfilter=0, imgfilter_bands=[1,1,1,1], imgfilter_std=1,
noise=0, cutout=0, noise_std=0.1, cutout_size=0.5,
):
super().__init__()
self.register_buffer('p', torch.ones([])) # Overall multiplier for augmentation probability.
# Pixel blitting.
self.xflip = float(xflip) # Probability multiplier for x-flip.
self.rotate90 = float(rotate90) # Probability multiplier for 90 degree rotations.
self.xint = float(xint) # Probability multiplier for integer translation.
self.xint_max = float(xint_max) # Range of integer translation, relative to image dimensions.
# General geometric transformations.
self.scale = float(scale) # Probability multiplier for isotropic scaling.
self.rotate = float(rotate) # Probability multiplier for arbitrary rotation.
self.aniso = float(aniso) # Probability multiplier for anisotropic scaling.
self.xfrac = float(xfrac) # Probability multiplier for fractional translation.
self.scale_std = float(scale_std) # Log2 standard deviation of isotropic scaling.
self.rotate_max = float(rotate_max) # Range of arbitrary rotation, 1 = full circle.
self.aniso_std = float(aniso_std) # Log2 standard deviation of anisotropic scaling.
self.xfrac_std = float(xfrac_std) # Standard deviation of frational translation, relative to image dimensions.
# Color transformations.
self.brightness = float(brightness) # Probability multiplier for brightness.
self.contrast = float(contrast) # Probability multiplier for contrast.
self.lumaflip = float(lumaflip) # Probability multiplier for luma flip.
self.hue = float(hue) # Probability multiplier for hue rotation.
self.saturation = float(saturation) # Probability multiplier for saturation.
self.brightness_std = float(brightness_std) # Standard deviation of brightness.
self.contrast_std = float(contrast_std) # Log2 standard deviation of contrast.
self.hue_max = float(hue_max) # Range of hue rotation, 1 = full circle.
self.saturation_std = float(saturation_std) # Log2 standard deviation of saturation.
# Image-space filtering.
self.imgfilter = float(imgfilter) # Probability multiplier for image-space filtering.
self.imgfilter_bands = list(imgfilter_bands) # Probability multipliers for individual frequency bands.
self.imgfilter_std = float(imgfilter_std) # Log2 standard deviation of image-space filter amplification.
# Image-space corruptions.
self.noise = float(noise) # Probability multiplier for additive RGB noise.
self.cutout = float(cutout) # Probability multiplier for cutout.
self.noise_std = float(noise_std) # Standard deviation of additive RGB noise.
self.cutout_size = float(cutout_size) # Size of the cutout rectangle, relative to image dimensions.
# Setup orthogonal lowpass filter for geometric augmentations.
self.register_buffer('Hz_geom', upfirdn2d.setup_filter(wavelets['sym6']))
# Construct filter bank for image-space filtering.
Hz_lo = np.asarray(wavelets['sym2']) # H(z)
Hz_hi = Hz_lo * ((-1) ** np.arange(Hz_lo.size)) # H(-z)
Hz_lo2 = np.convolve(Hz_lo, Hz_lo[::-1]) / 2 # H(z) * H(z^-1) / 2
Hz_hi2 = np.convolve(Hz_hi, Hz_hi[::-1]) / 2 # H(-z) * H(-z^-1) / 2
Hz_fbank = np.eye(4, 1) # Bandpass(H(z), b_i)
for i in range(1, Hz_fbank.shape[0]):
Hz_fbank = np.dstack([Hz_fbank, np.zeros_like(Hz_fbank)]).reshape(Hz_fbank.shape[0], -1)[:, :-1]
Hz_fbank = scipy.signal.convolve(Hz_fbank, [Hz_lo2])
Hz_fbank[i, (Hz_fbank.shape[1] - Hz_hi2.size) // 2 : (Hz_fbank.shape[1] + Hz_hi2.size) // 2] += Hz_hi2
self.register_buffer('Hz_fbank', torch.as_tensor(Hz_fbank, dtype=torch.float32))
def forward(self, images, debug_percentile=None):
assert isinstance(images, torch.Tensor) and images.ndim == 4
batch_size, num_channels, height, width = images.shape
device = images.device
if debug_percentile is not None:
debug_percentile = torch.as_tensor(debug_percentile, dtype=torch.float32, device=device)
# -------------------------------------
# Select parameters for pixel blitting.
# -------------------------------------
# Initialize inverse homogeneous 2D transform: G_inv @ pixel_out ==> pixel_in
I_3 = torch.eye(3, device=device)
G_inv = I_3
# Apply x-flip with probability (xflip * strength).
if self.xflip > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 2)
i = torch.where(torch.rand([batch_size], device=device) < self.xflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
G_inv = G_inv @ scale2d_inv(1 - 2 * i, 1)
# Apply 90 degree rotations with probability (rotate90 * strength).
if self.rotate90 > 0:
i = torch.floor(torch.rand([batch_size], device=device) * 4)
i = torch.where(torch.rand([batch_size], device=device) < self.rotate90 * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 4))
G_inv = G_inv @ rotate2d_inv(-np.pi / 2 * i)
# Apply integer translation with probability (xint * strength).
if self.xint > 0:
t = (torch.rand([batch_size, 2], device=device) * 2 - 1) * self.xint_max
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xint * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, (debug_percentile * 2 - 1) * self.xint_max)
G_inv = G_inv @ translate2d_inv(torch.round(t[:,0] * width), torch.round(t[:,1] * height))
# --------------------------------------------------------
# Select parameters for general geometric transformations.
# --------------------------------------------------------
# Apply isotropic scaling with probability (scale * strength).
if self.scale > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.scale_std)
s = torch.where(torch.rand([batch_size], device=device) < self.scale * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.scale_std))
G_inv = G_inv @ scale2d_inv(s, s)
# Apply pre-rotation with probability p_rot.
p_rot = 1 - torch.sqrt((1 - self.rotate * self.p).clamp(0, 1)) # P(pre OR post) = p
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.rotate_max)
G_inv = G_inv @ rotate2d_inv(-theta) # Before anisotropic scaling.
# Apply anisotropic scaling with probability (aniso * strength).
if self.aniso > 0:
s = torch.exp2(torch.randn([batch_size], device=device) * self.aniso_std)
s = torch.where(torch.rand([batch_size], device=device) < self.aniso * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.aniso_std))
G_inv = G_inv @ scale2d_inv(s, 1 / s)
# Apply post-rotation with probability p_rot.
if self.rotate > 0:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.rotate_max
theta = torch.where(torch.rand([batch_size], device=device) < p_rot, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.zeros_like(theta)
G_inv = G_inv @ rotate2d_inv(-theta) # After anisotropic scaling.
# Apply fractional translation with probability (xfrac * strength).
if self.xfrac > 0:
t = torch.randn([batch_size, 2], device=device) * self.xfrac_std
t = torch.where(torch.rand([batch_size, 1], device=device) < self.xfrac * self.p, t, torch.zeros_like(t))
if debug_percentile is not None:
t = torch.full_like(t, torch.erfinv(debug_percentile * 2 - 1) * self.xfrac_std)
G_inv = G_inv @ translate2d_inv(t[:,0] * width, t[:,1] * height)
# ----------------------------------
# Execute geometric transformations.
# ----------------------------------
# Execute if the transform is not identity.
if G_inv is not I_3:
# Calculate padding.
cx = (width - 1) / 2
cy = (height - 1) / 2
cp = matrix([-cx, -cy, 1], [cx, -cy, 1], [cx, cy, 1], [-cx, cy, 1], device=device) # [idx, xyz]
cp = G_inv @ cp.t() # [batch, xyz, idx]
Hz_pad = self.Hz_geom.shape[0] // 4
margin = cp[:, :2, :].permute(1, 0, 2).flatten(1) # [xy, batch * idx]
margin = torch.cat([-margin, margin]).max(dim=1).values # [x0, y0, x1, y1]
margin = margin + misc.constant([Hz_pad * 2 - cx, Hz_pad * 2 - cy] * 2, device=device)
margin = margin.max(misc.constant([0, 0] * 2, device=device))
margin = margin.min(misc.constant([width-1, height-1] * 2, device=device))
mx0, my0, mx1, my1 = margin.ceil().to(torch.int32)
# Pad image and adjust origin.
images = torch.nn.functional.pad(input=images, pad=[mx0,mx1,my0,my1], mode='reflect')
G_inv = translate2d((mx0 - mx1) / 2, (my0 - my1) / 2) @ G_inv
# Upsample.
images = upfirdn2d.upsample2d(x=images, f=self.Hz_geom, up=2)
G_inv = scale2d(2, 2, device=device) @ G_inv @ scale2d_inv(2, 2, device=device)
G_inv = translate2d(-0.5, -0.5, device=device) @ G_inv @ translate2d_inv(-0.5, -0.5, device=device)
# Execute transformation.
shape = [batch_size, num_channels, (height + Hz_pad * 2) * 2, (width + Hz_pad * 2) * 2]
G_inv = scale2d(2 / images.shape[3], 2 / images.shape[2], device=device) @ G_inv @ scale2d_inv(2 / shape[3], 2 / shape[2], device=device)
grid = torch.nn.functional.affine_grid(theta=G_inv[:,:2,:], size=shape, align_corners=False)
images = grid_sample_gradfix.grid_sample(images, grid)
# Downsample and crop.
images = upfirdn2d.downsample2d(x=images, f=self.Hz_geom, down=2, padding=-Hz_pad*2, flip_filter=True)
# --------------------------------------------
# Select parameters for color transformations.
# --------------------------------------------
# Initialize homogeneous 3D transformation matrix: C @ color_in ==> color_out
I_4 = torch.eye(4, device=device)
C = I_4
# Apply brightness with probability (brightness * strength).
if self.brightness > 0:
b = torch.randn([batch_size], device=device) * self.brightness_std
b = torch.where(torch.rand([batch_size], device=device) < self.brightness * self.p, b, torch.zeros_like(b))
if debug_percentile is not None:
b = torch.full_like(b, torch.erfinv(debug_percentile * 2 - 1) * self.brightness_std)
C = translate3d(b, b, b) @ C
# Apply contrast with probability (contrast * strength).
if self.contrast > 0:
c = torch.exp2(torch.randn([batch_size], device=device) * self.contrast_std)
c = torch.where(torch.rand([batch_size], device=device) < self.contrast * self.p, c, torch.ones_like(c))
if debug_percentile is not None:
c = torch.full_like(c, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.contrast_std))
C = scale3d(c, c, c) @ C
# Apply luma flip with probability (lumaflip * strength).
v = misc.constant(np.asarray([1, 1, 1, 0]) / np.sqrt(3), device=device) # Luma axis.
if self.lumaflip > 0:
i = torch.floor(torch.rand([batch_size, 1, 1], device=device) * 2)
i = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.lumaflip * self.p, i, torch.zeros_like(i))
if debug_percentile is not None:
i = torch.full_like(i, torch.floor(debug_percentile * 2))
C = (I_4 - 2 * v.ger(v) * i) @ C # Householder reflection.
# Apply hue rotation with probability (hue * strength).
if self.hue > 0 and num_channels > 1:
theta = (torch.rand([batch_size], device=device) * 2 - 1) * np.pi * self.hue_max
theta = torch.where(torch.rand([batch_size], device=device) < self.hue * self.p, theta, torch.zeros_like(theta))
if debug_percentile is not None:
theta = torch.full_like(theta, (debug_percentile * 2 - 1) * np.pi * self.hue_max)
C = rotate3d(v, theta) @ C # Rotate around v.
# Apply saturation with probability (saturation * strength).
if self.saturation > 0 and num_channels > 1:
s = torch.exp2(torch.randn([batch_size, 1, 1], device=device) * self.saturation_std)
s = torch.where(torch.rand([batch_size, 1, 1], device=device) < self.saturation * self.p, s, torch.ones_like(s))
if debug_percentile is not None:
s = torch.full_like(s, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.saturation_std))
C = (v.ger(v) + (I_4 - v.ger(v)) * s) @ C
# ------------------------------
# Execute color transformations.
# ------------------------------
# Execute if the transform is not identity.
if C is not I_4:
images = images.reshape([batch_size, num_channels, height * width])
if num_channels == 3:
images = C[:, :3, :3] @ images + C[:, :3, 3:]
elif num_channels == 1:
C = C[:, :3, :].mean(dim=1, keepdims=True)
images = images * C[:, :, :3].sum(dim=2, keepdims=True) + C[:, :, 3:]
elif num_channels == 6:
images[:, :3] = C[:, :3, :3] @ images[:, :3] + C[:, :3, 3:]
images[:, 3:] = C[:, :3, :3] @ images[:, 3:] + C[:, :3, 3:]
else:
raise ValueError('Image must be RGB (3 channels) or L (1 channel)')
images = images.reshape([batch_size, num_channels, height, width])
# ----------------------
# Image-space filtering.
# ----------------------
if self.imgfilter > 0:
num_bands = self.Hz_fbank.shape[0]
assert len(self.imgfilter_bands) == num_bands
expected_power = misc.constant(np.array([10, 1, 1, 1]) / 13, device=device) # Expected power spectrum (1/f).
# Apply amplification for each band with probability (imgfilter * strength * band_strength).
g = torch.ones([batch_size, num_bands], device=device) # Global gain vector (identity).
for i, band_strength in enumerate(self.imgfilter_bands):
t_i = torch.exp2(torch.randn([batch_size], device=device) * self.imgfilter_std)
t_i = torch.where(torch.rand([batch_size], device=device) < self.imgfilter * self.p * band_strength, t_i, torch.ones_like(t_i))
if debug_percentile is not None:
t_i = torch.full_like(t_i, torch.exp2(torch.erfinv(debug_percentile * 2 - 1) * self.imgfilter_std)) if band_strength > 0 else torch.ones_like(t_i)
t = torch.ones([batch_size, num_bands], device=device) # Temporary gain vector.
t[:, i] = t_i # Replace i'th element.
t = t / (expected_power * t.square()).sum(dim=-1, keepdims=True).sqrt() # Normalize power.
g = g * t # Accumulate into global gain.
# Construct combined amplification filter.
Hz_prime = g @ self.Hz_fbank # [batch, tap]
Hz_prime = Hz_prime.unsqueeze(1).repeat([1, num_channels, 1]) # [batch, channels, tap]
Hz_prime = Hz_prime.reshape([batch_size * num_channels, 1, -1]) # [batch * channels, 1, tap]
# Apply filter.
p = self.Hz_fbank.shape[1] // 2
images = images.reshape([1, batch_size * num_channels, height, width])
images = torch.nn.functional.pad(input=images, pad=[p,p,p,p], mode='reflect')
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(2), groups=batch_size*num_channels)
images = conv2d_gradfix.conv2d(input=images, weight=Hz_prime.unsqueeze(3), groups=batch_size*num_channels)
images = images.reshape([batch_size, num_channels, height, width])
# ------------------------
# Image-space corruptions.
# ------------------------
# Apply additive RGB noise with probability (noise * strength).
if self.noise > 0:
sigma = torch.randn([batch_size, 1, 1, 1], device=device).abs() * self.noise_std
sigma = torch.where(torch.rand([batch_size, 1, 1, 1], device=device) < self.noise * self.p, sigma, torch.zeros_like(sigma))
if debug_percentile is not None:
sigma = torch.full_like(sigma, torch.erfinv(debug_percentile) * self.noise_std)
images = images + torch.randn([batch_size, num_channels, height, width], device=device) * sigma
# Apply cutout with probability (cutout * strength).
if self.cutout > 0:
size = torch.full([batch_size, 2, 1, 1, 1], self.cutout_size, device=device)
size = torch.where(torch.rand([batch_size, 1, 1, 1, 1], device=device) < self.cutout * self.p, size, torch.zeros_like(size))
center = torch.rand([batch_size, 2, 1, 1, 1], device=device)
if debug_percentile is not None:
size = torch.full_like(size, self.cutout_size)
center = torch.full_like(center, debug_percentile)
coord_x = torch.arange(width, device=device).reshape([1, 1, 1, -1])
coord_y = torch.arange(height, device=device).reshape([1, 1, -1, 1])
mask_x = (((coord_x + 0.5) / width - center[:, 0]).abs() >= size[:, 0] / 2)
mask_y = (((coord_y + 0.5) / height - center[:, 1]).abs() >= size[:, 1] / 2)
mask = torch.logical_or(mask_x, mask_y).to(torch.float32)
images = images * mask
return images
#----------------------------------------------------------------------------
| 26,919 | 59.904977 | 366 | py |
pix2pix3D | pix2pix3D-main/training/dataset.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Streaming images and labels from datasets created with dataset_tool.py."""
import os
import numpy as np
import zipfile
import PIL.Image
import json
import torch
import dnnlib
import cv2
try:
import pyspng
except ImportError:
pyspng = None
#----------------------------------------------------------------------------
class Dataset(torch.utils.data.Dataset):
def __init__(self,
name, # Name of the dataset.
raw_shape, # Shape of the raw image data (NCHW).
max_size = None, # Artificially limit the size of the dataset. None = no limit. Applied before xflip.
use_labels = False, # Enable conditioning labels? False = label dimension is zero.
xflip = False, # Artificially double the size of the dataset via x-flips. Applied after max_size.
random_seed = 0, # Random seed to use when applying max_size.
):
self._name = name
self._raw_shape = list(raw_shape)
self._use_labels = use_labels
self._raw_labels = None
self._label_shape = None
# Apply max_size.
self._raw_idx = np.arange(self._raw_shape[0], dtype=np.int64)
if (max_size is not None) and (self._raw_idx.size > max_size):
np.random.RandomState(random_seed).shuffle(self._raw_idx)
self._raw_idx = np.sort(self._raw_idx[:max_size])
# Apply xflip.
self._xflip = np.zeros(self._raw_idx.size, dtype=np.uint8)
if xflip:
self._raw_idx = np.tile(self._raw_idx, 2)
self._xflip = np.concatenate([self._xflip, np.ones_like(self._xflip)])
def _get_raw_labels(self):
if self._raw_labels is None:
self._raw_labels = self._load_raw_labels() if self._use_labels else None
if self._raw_labels is None:
self._raw_labels = np.zeros([self._raw_shape[0], 0], dtype=np.float32)
assert isinstance(self._raw_labels, np.ndarray)
assert self._raw_labels.shape[0] == self._raw_shape[0]
assert self._raw_labels.dtype in [np.float32, np.int64]
if self._raw_labels.dtype == np.int64:
assert self._raw_labels.ndim == 1
assert np.all(self._raw_labels >= 0)
self._raw_labels_std = self._raw_labels.std(0)
return self._raw_labels
def close(self): # to be overridden by subclass
pass
def _load_raw_image(self, raw_idx): # to be overridden by subclass
raise NotImplementedError
def _load_raw_labels(self): # to be overridden by subclass
raise NotImplementedError
def __getstate__(self):
return dict(self.__dict__, _raw_labels=None)
def __del__(self):
try:
self.close()
except:
pass
def __len__(self):
return self._raw_idx.size
def __getitem__(self, idx):
image = self._load_raw_image(self._raw_idx[idx])
assert isinstance(image, np.ndarray)
assert list(image.shape) == self.image_shape
assert image.dtype == np.uint8
if self._xflip[idx]:
assert image.ndim == 3 # CHW
image = image[:, :, ::-1]
return image.copy(), self.get_label(idx)
def get_label(self, idx):
label = self._get_raw_labels()[self._raw_idx[idx]]
if label.dtype == np.int64:
onehot = np.zeros(self.label_shape, dtype=np.float32)
onehot[label] = 1
label = onehot
return label.copy()
def get_details(self, idx):
d = dnnlib.EasyDict()
d.raw_idx = int(self._raw_idx[idx])
d.xflip = (int(self._xflip[idx]) != 0)
d.raw_label = self._get_raw_labels()[d.raw_idx].copy()
return d
def get_label_std(self):
return self._raw_labels_std
@property
def name(self):
return self._name
@property
def image_shape(self):
return list(self._raw_shape[1:])
@property
def num_channels(self):
assert len(self.image_shape) == 3 # CHW
return self.image_shape[0]
@property
def resolution(self):
assert len(self.image_shape) == 3 # CHW
assert self.image_shape[1] == self.image_shape[2]
return self.image_shape[1]
@property
def label_shape(self):
if self._label_shape is None:
raw_labels = self._get_raw_labels()
if raw_labels.dtype == np.int64:
self._label_shape = [int(np.max(raw_labels)) + 1]
else:
self._label_shape = raw_labels.shape[1:]
return list(self._label_shape)
@property
def label_dim(self):
assert len(self.label_shape) == 1
return self.label_shape[0]
@property
def has_labels(self):
return any(x != 0 for x in self.label_shape)
@property
def has_onehot_labels(self):
return self._get_raw_labels().dtype == np.int64
#----------------------------------------------------------------------------
class ImageFolderDataset(Dataset):
def __init__(self,
path, # Path to directory or zip.
resolution = None, # Ensure specific resolution, None = highest available.
**super_kwargs, # Additional arguments for the Dataset base class.
):
self._path = path
self._zipfile = None
if os.path.isdir(self._path):
self._type = 'dir'
self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files}
elif self._file_ext(self._path) == '.zip':
self._type = 'zip'
self._all_fnames = set(self._get_zipfile().namelist())
else:
raise IOError('Path must point to a directory or zip')
PIL.Image.init()
self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION)
if len(self._image_fnames) == 0:
raise IOError('No image files found in the specified path')
name = os.path.splitext(os.path.basename(self._path))[0]
raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape)
if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution):
raise IOError('Image files do not match the specified resolution')
super().__init__(name=name, raw_shape=raw_shape, **super_kwargs)
@staticmethod
def _file_ext(fname):
return os.path.splitext(fname)[1].lower()
def _get_zipfile(self):
assert self._type == 'zip'
if self._zipfile is None:
self._zipfile = zipfile.ZipFile(self._path)
return self._zipfile
def _open_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._path, fname), 'rb')
if self._type == 'zip':
return self._get_zipfile().open(fname, 'r')
return None
def close(self):
try:
if self._zipfile is not None:
self._zipfile.close()
finally:
self._zipfile = None
def __getstate__(self):
return dict(super().__getstate__(), _zipfile=None)
def _load_raw_image(self, raw_idx):
fname = self._image_fnames[raw_idx]
with self._open_file(fname) as f:
if pyspng is not None and self._file_ext(fname) == '.png':
image = pyspng.load(f.read())
else:
image = np.array(PIL.Image.open(f))
if image.ndim == 2:
image = image[:, :, np.newaxis] # HW => HWC
image = image.transpose(2, 0, 1) # HWC => CHW
return image
def _load_raw_labels(self):
fname = 'dataset.json'
if fname not in self._all_fnames:
return None
with self._open_file(fname) as f:
labels = json.load(f)['labels']
if labels is None:
return None
labels = dict(labels)
labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames]
labels = np.array(labels)
labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim])
return labels
#----------------------------------------------------------------------------
class ImageSegFolderDataset(Dataset):
def __init__(self,
path, # Path to directory or zip.
mask_path, # Path to directory or zip.
resolution = None, # Ensure specific resolution, None = highest available.
data_type = 'seg', # Data type of the mask files.
**super_kwargs, # Additional arguments for the Dataset base class.
):
self._path = path
self._mask_path = mask_path
self._zipfile = None
self._seg_zipfile = None
self.data_type = data_type
if os.path.isdir(self._path):
self._type = 'dir'
self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files}
elif self._file_ext(self._path) == '.zip':
self._type = 'zip'
self._all_fnames = set(self._get_zipfile().namelist())
self._all_seg_fnames = set(self._get_seg_zipfile().namelist())
else:
raise IOError('Path must point to a directory or zip')
PIL.Image.init()
self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION)
self._seg_fnames = sorted(fname for fname in self._all_seg_fnames if self._file_ext(fname) in PIL.Image.EXTENSION)
if len(self._image_fnames) == 0:
raise IOError('No image files found in the specified path')
name = os.path.splitext(os.path.basename(self._path))[0]
raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape)
if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution):
raise IOError('Image files do not match the specified resolution')
super().__init__(name=name, raw_shape=raw_shape, **super_kwargs)
@staticmethod
def _file_ext(fname):
return os.path.splitext(fname)[1].lower()
def _get_zipfile(self):
assert self._type == 'zip'
if self._zipfile is None:
self._zipfile = zipfile.ZipFile(self._path)
return self._zipfile
def _get_seg_zipfile(self):
assert self._type == 'zip'
if self._seg_zipfile is None:
self._seg_zipfile = zipfile.ZipFile(self._mask_path)
return self._seg_zipfile
def _open_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._path, fname), 'rb')
if self._type == 'zip':
return self._get_zipfile().open(fname, 'r')
return None
def _open_seg_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._mask_path, fname), 'rb')
if self._type == 'zip':
return self._get_seg_zipfile().open(fname, 'r')
return None
def close(self):
try:
if self._zipfile is not None:
self._zipfile.close()
if self._seg_zipfile is not None:
self._seg_zipfile.close()
finally:
self._zipfile = None
self._seg_zipfile = None
def __getstate__(self):
return dict(super().__getstate__(), _zipfile=None)
def _load_raw_image(self, raw_idx):
fname = self._image_fnames[raw_idx]
with self._open_file(fname) as f:
if pyspng is not None and self._file_ext(fname) == '.png':
image = pyspng.load(f.read())
else:
image = np.array(PIL.Image.open(f))
if image.ndim == 2:
image = image[:, :, np.newaxis] # HW => HWC
image = image.transpose(2, 0, 1) # HWC => CHW
return image
def _load_raw_labels(self):
fname = 'dataset.json'
if fname not in self._all_fnames:
return None
with self._open_file(fname) as f:
labels = json.load(f)['labels']
if labels is None:
return None
labels = dict(labels)
labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames]
labels = np.array(labels)
labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim])
return labels
def _load_raw_mask(self, raw_idx):
fname = self._seg_fnames[raw_idx]
with self._open_seg_file(fname) as f:
if pyspng is not None and self._file_ext(fname) == '.png':
mask = pyspng.load(f.read())
else:
mask = np.array(PIL.Image.open(f))
if mask.ndim == 2:
mask = mask[:, :, np.newaxis]
mask = mask.transpose(2, 0, 1) # HWC => CHW
return mask
def __getitem__(self, idx):
mask = self._load_raw_mask(self._raw_idx[idx])
image = self._load_raw_image(self._raw_idx[idx])
assert isinstance(image, np.ndarray)
assert list(image.shape) == self.image_shape
assert image.dtype == np.uint8
if self._xflip[idx]:
assert image.ndim == 3 # CHW
image = image[:, :, ::-1]
mask = mask[:, :, ::-1]
ret = {'image': image.copy(), 'pose': self.get_label(idx), 'mask': mask.copy(), 'idx': idx}
return ret
# image = self._load_raw_image(self._raw_idx[idx])
# assert isinstance(image, np.ndarray)
# assert list(image.shape) == self.image_shape
# assert image.dtype == np.uint8
# if self._xflip[idx]:
# assert image.ndim == 3 # CHW
# image = image[:, :, ::-1]
# return image.copy(), self.get_label(idx)
#----------------------------------------------------------------------------
class ImageEdgeFolderDataset(Dataset):
def __init__(self,
path, # Path to directory or zip.
mask_path, # Path to directory or zip.
resolution = None, # Ensure specific resolution, None = highest available.
data_type = 'edge', # Data type to use for returned images.
**super_kwargs, # Additional arguments for the Dataset base class.
):
self._path = path
self._mask_path = mask_path
self._zipfile = None
self._seg_zipfile = None
self.data_type = data_type
if os.path.isdir(self._path):
self._type = 'dir'
self._all_fnames = {os.path.relpath(os.path.join(root, fname), start=self._path) for root, _dirs, files in os.walk(self._path) for fname in files}
elif self._file_ext(self._path) == '.zip':
self._type = 'zip'
self._all_fnames = set(self._get_zipfile().namelist())
self._all_seg_fnames = set(self._get_seg_zipfile().namelist())
else:
raise IOError('Path must point to a directory or zip')
PIL.Image.init()
self._image_fnames = sorted(fname for fname in self._all_fnames if self._file_ext(fname) in PIL.Image.EXTENSION)
self._seg_fnames = sorted(fname for fname in self._all_seg_fnames if self._file_ext(fname) in PIL.Image.EXTENSION)
if len(self._image_fnames) == 0:
raise IOError('No image files found in the specified path')
name = os.path.splitext(os.path.basename(self._path))[0]
raw_shape = [len(self._image_fnames)] + list(self._load_raw_image(0).shape)
if resolution is not None and (raw_shape[2] != resolution or raw_shape[3] != resolution):
raise IOError('Image files do not match the specified resolution')
super().__init__(name=name, raw_shape=raw_shape, **super_kwargs)
@staticmethod
def _file_ext(fname):
return os.path.splitext(fname)[1].lower()
def _get_zipfile(self):
assert self._type == 'zip'
if self._zipfile is None:
self._zipfile = zipfile.ZipFile(self._path)
return self._zipfile
def _get_seg_zipfile(self):
assert self._type == 'zip'
if self._seg_zipfile is None:
self._seg_zipfile = zipfile.ZipFile(self._mask_path)
return self._seg_zipfile
def _open_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._path, fname), 'rb')
if self._type == 'zip':
return self._get_zipfile().open(fname, 'r')
return None
def _open_seg_file(self, fname):
if self._type == 'dir':
return open(os.path.join(self._mask_path, fname), 'rb')
if self._type == 'zip':
return self._get_seg_zipfile().open(fname, 'r')
return None
def close(self):
try:
if self._zipfile is not None:
self._zipfile.close()
if self._seg_zipfile is not None:
self._seg_zipfile.close()
finally:
self._zipfile = None
self._seg_zipfile = None
def __getstate__(self):
return dict(super().__getstate__(), _zipfile=None)
def _load_raw_image(self, raw_idx):
fname = self._image_fnames[raw_idx]
with self._open_file(fname) as f:
if pyspng is not None and self._file_ext(fname) == '.png':
image = pyspng.load(f.read())
else:
image = np.array(PIL.Image.open(f))
if image.ndim == 2:
image = image[:, :, np.newaxis] # HW => HWC
image = image.transpose(2, 0, 1) # HWC => CHW
return image
def _load_raw_labels(self):
fname = 'dataset.json'
if fname not in self._all_fnames:
return None
with self._open_file(fname) as f:
labels = json.load(f)['labels']
if labels is None:
return None
labels = dict(labels)
labels = [labels[fname.replace('\\', '/')] for fname in self._image_fnames]
labels = np.array(labels)
labels = labels.astype({1: np.int64, 2: np.float32}[labels.ndim])
return labels
def _load_raw_mask(self, raw_idx):
fname = self._seg_fnames[raw_idx]
with self._open_seg_file(fname) as f:
mask = np.array(PIL.Image.open(f).convert('L'))
mask = cv2.blur(255 - mask, ksize=(3,3))
if mask.ndim == 2:
mask = mask[:, :, np.newaxis] # HW => HWC
if hasattr(self, '_raw_shape') and mask.shape[0] != self.resolution: # resize input image
mask = cv2.resize(mask, (self.resolution, self.resolution), interpolation=cv2.INTER_NEAREST)[...,np.newaxis] # use nearest neighbour to interpolate
mask = mask.transpose(2, 0, 1) # HWC => CHW
return mask
def __getitem__(self, idx):
mask = self._load_raw_mask(self._raw_idx[idx])
image = self._load_raw_image(self._raw_idx[idx])
assert isinstance(image, np.ndarray)
assert list(image.shape) == self.image_shape
assert image.dtype == np.uint8
if self._xflip[idx]:
assert image.ndim == 3 # CHW
image = image[:, :, ::-1]
mask = mask[:, :, ::-1]
ret = {'image': image.copy(), 'pose': self.get_label(idx), 'mask': mask.copy(), 'idx': idx}
return ret
# image = self._load_raw_image(self._raw_idx[idx])
# assert isinstance(image, np.ndarray)
# assert list(image.shape) == self.image_shape
# assert image.dtype == np.uint8
# if self._xflip[idx]:
# assert image.ndim == 3 # CHW
# image = image[:, :, ::-1]
# return image.copy(), self.get_label(idx)
#---------------------------------------------------------------------------- | 20,459 | 37.676749 | 159 | py |
pix2pix3D | pix2pix3D-main/training/crosssection_utils.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
import torch
def sample_cross_section(G, ws, resolution=256, w=1.2):
axis=0
A, B = torch.meshgrid(torch.linspace(w/2, -w/2, resolution, device=ws.device), torch.linspace(-w/2, w/2, resolution, device=ws.device), indexing='ij')
A, B = A.reshape(-1, 1), B.reshape(-1, 1)
C = torch.zeros_like(A)
coordinates = [A, B]
coordinates.insert(axis, C)
coordinates = torch.cat(coordinates, dim=-1).expand(ws.shape[0], -1, -1)
sigma = G.sample_mixed(coordinates, torch.randn_like(coordinates), ws)['sigma']
return sigma.reshape(-1, 1, resolution, resolution)
# if __name__ == '__main__':
# sample_crossection(None) | 1,199 | 45.153846 | 154 | py |
pix2pix3D | pix2pix3D-main/training/triplane.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
import torch
from torch_utils import persistence
from training.networks_stylegan2 import Generator as StyleGAN2Backbone
from training.volumetric_rendering.renderer import ImportanceRenderer
from training.volumetric_rendering.ray_sampler import RaySampler
import dnnlib
@persistence.persistent_class
class TriPlaneGenerator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.img_resolution=img_resolution
self.img_channels=img_channels
self.renderer = ImportanceRenderer()
self.ray_sampler = RaySampler()
self.backbone = StyleGAN2Backbone(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
self.superresolution = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module'], channels=32, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], **sr_kwargs)
self.decoder = OSGDecoder(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32})
self.neural_rendering_resolution = 64
self.rendering_kwargs = rendering_kwargs
self._last_planes = None
def mapping(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
if self.rendering_kwargs['c_gen_conditioning_zero']:
c = torch.zeros_like(c)
return self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
def synthesis(self, ws, c, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
cam2world_matrix = c[:, :16].view(-1, 4, 4)
intrinsics = c[:, 16:25].view(-1, 3, 3)
if neural_rendering_resolution is None:
neural_rendering_resolution = self.neural_rendering_resolution
else:
self.neural_rendering_resolution = neural_rendering_resolution
# Create a batch of rays for volume rendering
ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution)
# Create triplanes by running StyleGAN backbone
N, M, _ = ray_origins.shape
if use_cached_backbone and self._last_planes is not None:
planes = self._last_planes
else:
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
if cache_backbone:
self._last_planes = planes
# Reshape output into three 32-channel planes
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
# Perform volume rendering
feature_samples, depth_samples, weights_samples = self.renderer(planes, self.decoder, ray_origins, ray_directions, self.rendering_kwargs) # channels last
# Reshape into 'raw' neural-rendered image
H = W = self.neural_rendering_resolution
feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous()
depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W)
# Run superresolution to get final image
rgb_image = feature_image[:, :3]
sr_image = self.superresolution(rgb_image, feature_image, ws, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'})
return {'image': sr_image, 'image_raw': rgb_image, 'image_depth': depth_image}
def sample(self, coordinates, directions, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes.
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def sample_mixed(self, coordinates, directions, ws, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Same as sample, but expects latent vectors 'ws' instead of Gaussian noise 'z'
planes = self.backbone.synthesis(ws, update_emas = update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
# Render a batch of generated images.
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
return self.synthesis(ws, c, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, **synthesis_kwargs)
from training.networks_stylegan2 import FullyConnectedLayer
class OSGDecoder(torch.nn.Module):
def __init__(self, n_features, options):
super().__init__()
self.hidden_dim = 64
self.net = torch.nn.Sequential(
FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']),
torch.nn.Softplus(),
FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], lr_multiplier=options['decoder_lr_mul'])
)
def forward(self, sampled_features, ray_directions):
# Aggregate features
sampled_features = sampled_features.mean(1)
x = sampled_features
N, M, C = x.shape
x = x.view(N*M, C)
x = self.net(x)
x = x.view(N, M, -1)
rgb = torch.sigmoid(x[..., 1:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
sigma = x[..., 0:1]
return {'rgb': rgb, 'sigma': sigma}
| 7,562 | 54.610294 | 258 | py |
pix2pix3D | pix2pix3D-main/training/training_loop.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Main training loop."""
import os
import time
import copy
import json
import pickle
import psutil
import PIL.Image
import numpy as np
import torch
import dnnlib
from torch_utils import misc
from torch_utils import training_stats
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import grid_sample_gradfix
import legacy
from metrics import metric_main
from camera_utils import LookAtPoseSampler
from training.crosssection_utils import sample_cross_section
from training.utils import color_mask
# #----------------------------------------------------------------------------
# def setup_snapshot_image_grid(training_set, random_seed=0):
# rnd = np.random.RandomState(random_seed)
# gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
# gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
# # No labels => show random subset of training samples.
# if not training_set.has_labels:
# all_indices = list(range(len(training_set)))
# rnd.shuffle(all_indices)
# grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
# else:
# # Group training samples by label.
# label_groups = dict() # label => [idx, ...]
# for idx in range(len(training_set)):
# label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
# if label not in label_groups:
# label_groups[label] = []
# label_groups[label].append(idx)
# # Reorder.
# label_order = list(label_groups.keys())
# rnd.shuffle(label_order)
# for label in label_order:
# rnd.shuffle(label_groups[label])
# # Organize into grid.
# grid_indices = []
# for y in range(gh):
# label = label_order[y % len(label_order)]
# indices = label_groups[label]
# grid_indices += [indices[x % len(indices)] for x in range(gw)]
# label_groups[label] = [indices[(i + gw) % len(indices)] for i in range(len(indices))]
# # Load data.
# images, labels = zip(*[training_set[i] for i in grid_indices])
# return (gw, gh), np.stack(images), np.stack(labels)
#----------------------------------------------------------------------------
def setup_snapshot_image_grid(training_set, test_set=None, random_seed=0):
rnd = np.random.RandomState(random_seed)
# gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
# gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
# No labels => show random subset of training samples.
# assert not training_set.has_labels
all_indices = list(range(len(training_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
# Load data.
images = torch.stack([torch.Tensor(training_set[i]['image']) for i in grid_indices])
masks = torch.stack([torch.Tensor(training_set[i]['mask']) for i in grid_indices])
poses = torch.stack([torch.Tensor(training_set[i]['pose']) for i in grid_indices])
if test_set is None:
return (gw, gh), images, masks, poses
else:
all_indices = list(range(len(test_set)))
rnd.shuffle(all_indices)
grid_indices = [all_indices[i % len(all_indices)] for i in range(gw * gh)]
images_test = torch.stack([torch.Tensor(test_set[i]['image']) for i in grid_indices])
masks_test = torch.stack([torch.Tensor(test_set[i]['mask']) for i in grid_indices])
poses_test = torch.stack([test_set[i]['pose'] for i in grid_indices])
images = torch.cat([images, images_test], dim=0)
masks = torch.cat([masks, masks_test], dim=0)
poses = torch.cat([poses, poses_test], dim=0)
return (gw, gh*2), images, masks, poses
#----------------------------------------------------------------------------
def save_image_grid(img, fname, drange, grid_size):
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape([gh, gw, C, H, W])
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape([gh * H, gw * W, C])
assert C in [1, 3]
if C == 1:
PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
if C == 3:
PIL.Image.fromarray(img, 'RGB').save(fname)
#----------------------------------------------------------------------------
def get_image_grid(img, drange, grid_size):
lo, hi = drange
img = np.asarray(img, dtype=np.float32)
img = (img - lo) * (255 / (hi - lo))
img = np.rint(img).clip(0, 255).astype(np.uint8)
gw, gh = grid_size
_N, C, H, W = img.shape
img = img.reshape([gh, gw, C, H, W])
img = img.transpose(0, 3, 1, 4, 2)
img = img.reshape([gh * H, gw * W, C])
assert C in [1, 3]
if C == 1:
return PIL.Image.fromarray(img[:, :, 0], 'L')
if C == 3:
return PIL.Image.fromarray(img, 'RGB')
#----------------------------------------------------------------------------
def log_table(G_ema, grid_z, grid_i, grid_c, grid_m, grid_p, mask_type, global_step, device, wandb):
max_rounds = 16
images_all = []
images = []
segs_all = []
segs = []
for round_idx in range(max_rounds):
out = G_ema(grid_z[round_idx].to(device), grid_c[round_idx].to(device),
{'image':grid_i[round_idx].to(device), 'pose':grid_p[round_idx].to(device), 'mask':grid_m[round_idx].to(device)}, noise_mode='const')
images.append(out['image'])
if 'semantic' in out:
if mask_type == 'seg':
segs.append(torch.argmax(out['semantic'], dim=1)) # B x 32 x 32
else:
segs.append(out['semantic'])
images_all.append(torch.cat(images))
if len(segs) > 0:
segs_all.append(torch.cat(segs))
yaw_range = 0.35
for yaw in [-1, -0.5, 0, 0.5, 1]:
cam2world_pose = LookAtPoseSampler.sample(3.14/2 + yaw * yaw_range, 3.14/2, torch.tensor([0, 0, 0.2], device=device), radius=2.7, device=device)
intrinsics = torch.tensor([[4.2647, 0, 0.5], [0, 4.2647, 0.5], [0, 0, 1]], device=device)
pose = torch.cat([cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
images = []
segs = []
for round_idx in range(max_rounds):
out = G_ema(grid_z[round_idx].to(device), pose.expand(grid_z[round_idx].shape[0], -1),
{'image':grid_i[round_idx].to(device), 'pose':grid_p[round_idx].to(device), 'mask':grid_m[round_idx].to(device)}, noise_mode='const')
images.append(out['image'])
if 'semantic' in out:
if mask_type == 'seg':
segs.append(torch.argmax(out['semantic'], dim=1)) # B x 32 x 32
else:
segs.append(out['semantic'])
images_all.append(torch.cat(images))
if len(segs) > 0:
segs_all.append(torch.cat(segs))
images_all = torch.stack(images_all) # 6 x N x 3 x 512 x 512
grid_i_all = torch.cat(grid_i)
grid_m_all = torch.cat(grid_m)
if len(segs_all) > 0:
segs_all = torch.stack(segs_all) # 6 x N x 512 x 512
if len(segs_all) > 0:
columns = ["Real Image", "Mask", "Generated_ema", "Generated Mask"]
else:
columns = ["Real Image", "Mask", "Generated_ema"]
table = wandb.Table(columns=columns)
for row in range(images_all.shape[1]):
g_img_ema = wandb.Image(images_all[:,row].clamp(-1,1))
r_img = wandb.Image(grid_i_all[row])
if mask_type == 'seg':
r_mask = wandb.Image(color_mask(grid_m_all[row].squeeze(0).cpu()))
else:
r_mask = wandb.Image(grid_m_all[row].squeeze(0).cpu().numpy())
if len(segs_all) > 0:
g_mask = segs_all[:,row] # 6 x 512 x 512
if mask_type == 'seg':
g_mask = torch.tensor(color_mask(g_mask.cpu()).transpose(0,3,1,2))
else:
g_mask = torch.tensor(g_mask.cpu().numpy().clip(-1, 1))
g_mask = wandb.Image(g_mask)
table.add_data(r_img, r_mask, g_img_ema, g_mask)
else:
table.add_data(r_img, r_mask, g_img_ema)
wandb.log({"Visualize": table}, step=global_step)
del images_all, grid_i_all, grid_m_all, segs_all
#----------------------------------------------------------------------------
def training_loop(
run_dir = '.', # Output directory.
training_set_kwargs = {}, # Options for training set.
data_loader_kwargs = {}, # Options for torch.utils.data.DataLoader.
G_kwargs = {}, # Options for generator network.
D_kwargs = {}, # Options for discriminator network.
G_opt_kwargs = {}, # Options for generator optimizer.
D_opt_kwargs = {}, # Options for discriminator optimizer.
augment_kwargs = None, # Options for augmentation pipeline. None = disable.
loss_kwargs = {}, # Options for loss function.
metrics = [], # Metrics to evaluate during training.
random_seed = 0, # Global random seed.
num_gpus = 1, # Number of GPUs participating in the training.
rank = 0, # Rank of the current process in [0, num_gpus[.
batch_size = 4, # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
batch_gpu = 4, # Number of samples processed at a time by one GPU.
ema_kimg = 10, # Half-life of the exponential moving average (EMA) of generator weights.
ema_rampup = 0.05, # EMA ramp-up coefficient. None = no rampup.
G_reg_interval = None, # How often to perform regularization for G? None = disable lazy regularization.
D_reg_interval = 16, # How often to perform regularization for D? None = disable lazy regularization.
augment_p = 0, # Initial value of augmentation probability.
ada_target = None, # ADA target value. None = fixed p.
ada_interval = 4, # How often to perform ADA adjustment?
ada_kimg = 500, # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
total_kimg = 25000, # Total length of the training, measured in thousands of real images.
kimg_per_tick = 4, # Progress snapshot interval.
image_snapshot_ticks = 50, # How often to save image snapshots? None = disable.
network_snapshot_ticks = 50, # How often to save network snapshots? None = disable.
resume_pkl = None, # Network pickle to resume training from.
resume_kimg = 0, # First kimg to report when resuming training.
cudnn_benchmark = True, # Enable torch.backends.cudnn.benchmark?
abort_fn = None, # Callback function for determining whether to abort training. Must return consistent results across ranks.
progress_fn = None, # Callback function for updating training progress. Called for all ranks.
wandb_log = False, # Log to Weights & Biases.
exp_name = 'default',# Experiment name.
no_eval = False, # Disable evaluation of metrics.
debug = False, # Enable debug mode.
D_semantic_kwargs = None, # Options for discriminator mask network.
):
kwargs = locals()
# Initialize.
start_time = time.time()
device = torch.device('cuda', rank)
np.random.seed(random_seed * num_gpus + rank)
torch.cuda.set_device(device)
torch.cuda.empty_cache()
torch.manual_seed(random_seed * num_gpus + rank)
torch.backends.cudnn.benchmark = cudnn_benchmark # Improves training speed.
torch.backends.cuda.matmul.allow_tf32 = False # Improves numerical accuracy.
torch.backends.cudnn.allow_tf32 = False # Improves numerical accuracy.
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = False # Improves numerical accuracy.
conv2d_gradfix.enabled = True # Improves training speed. # TODO: ENABLE
grid_sample_gradfix.enabled = False # Avoids errors with the augmentation pipe.
# Load training set.
if rank == 0:
print('Loading training set...')
training_set = dnnlib.util.construct_class_by_name(**training_set_kwargs) # subclass of training.dataset.Dataset
training_set_sampler = misc.InfiniteSampler(dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
training_set_iterator = iter(torch.utils.data.DataLoader(dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
if rank == 0:
print()
print('Num images: ', len(training_set))
print('Image shape:', training_set.image_shape)
print('Label shape:', training_set.label_shape)
print()
# Construct networks.
if rank == 0:
print('Constructing networks...')
common_kwargs = dict(c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels, semantic_channels=G_kwargs.mapping_kwargs.in_channels, data_type=training_set.data_type)
G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
G.register_buffer('dataset_label_std', torch.tensor(training_set.get_label_std()).to(device))
D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
G_ema = copy.deepcopy(G).eval()
D_semantic = None
if D_semantic_kwargs is not None:
D_semantic = dnnlib.util.construct_class_by_name(**D_semantic_kwargs, c_dim=training_set.label_dim, img_resolution=training_set.resolution, img_channels=training_set.num_channels + G_kwargs.mapping_kwargs.in_channels).train().requires_grad_(False).to(device)
# Resume from existing pickle.
if (resume_pkl is not None) and (rank == 0):
print(f'Resuming from "{resume_pkl}"')
with dnnlib.util.open_url(resume_pkl) as f:
resume_data = legacy.load_network_pkl(f)
for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
misc.copy_params_and_buffers(resume_data[name], module, require_all=False, allow_mismatch=True)
if D_semantic is not None:
if 'D_semantic' in resume_data:
misc.copy_params_and_buffers(resume_data['D_semantic'], D_semantic, require_all=False, allow_mismatch=True)
else:
misc.copy_params_and_buffers(resume_data['D'], D_semantic, require_all=False, allow_mismatch=True)
# Print network summary tables.
if rank == 0:
z = torch.empty([batch_gpu, G.z_dim], device=device)
c = torch.empty([batch_gpu, G.c_dim], device=device)
batch = {'pose':torch.empty([batch_gpu, G.c_dim], device=device), 'mask':torch.empty([batch_gpu, 1, training_set.resolution, training_set.resolution], device=device)}
img = misc.print_module_summary(G, [z, c, batch])
misc.print_module_summary(D, [img, c])
del z, c, batch, img
# Setup augmentation.
if rank == 0:
print('Setting up augmentation...')
augment_pipe = None
ada_stats = None
if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
augment_pipe = dnnlib.util.construct_class_by_name(**augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
augment_pipe.p.copy_(torch.as_tensor(augment_p))
if ada_target is not None:
ada_stats = training_stats.Collector(regex='Loss/signs/real')
# Distribute across GPUs.
if rank == 0:
print(f'Distributing across {num_gpus} GPUs...')
for module in [G, D, G_ema, augment_pipe, D_semantic]:
if module is not None:
for param in misc.params_and_buffers(module):
if param.numel() > 0 and num_gpus > 1:
torch.distributed.broadcast(param, src=0)
# Setup training phases.
if rank == 0:
print('Setting up training phases...')
loss = dnnlib.util.construct_class_by_name(device=device, G=G, D=D, augment_pipe=augment_pipe, D_semantic=D_semantic, **loss_kwargs) # subclass of training.loss.Loss
phases = []
for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval), ('D_semantic', D_semantic, D_opt_kwargs, D_reg_interval)]:
if module is None:
continue
if reg_interval is None:
opt = dnnlib.util.construct_class_by_name(params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'both', module=module, opt=opt, interval=1)]
else: # Lazy regularization.
mb_ratio = reg_interval / (reg_interval + 1)
opt_kwargs = dnnlib.EasyDict(opt_kwargs)
opt_kwargs.lr = opt_kwargs.lr * mb_ratio
opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
opt = dnnlib.util.construct_class_by_name(module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
phases += [dnnlib.EasyDict(name=name+'main', module=module, opt=opt, interval=1)]
phases += [dnnlib.EasyDict(name=name+'reg', module=module, opt=opt, interval=reg_interval)]
for phase in phases:
phase.start_event = None
phase.end_event = None
if rank == 0:
phase.start_event = torch.cuda.Event(enable_timing=True)
phase.end_event = torch.cuda.Event(enable_timing=True)
# Initialize logs.
if rank == 0:
print('Initializing logs...')
stats_collector = training_stats.Collector(regex='.*')
stats_metrics = dict()
stats_jsonl = None
stats_tfevents = None
if rank == 0:
stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
try:
import torch.utils.tensorboard as tensorboard
stats_tfevents = tensorboard.SummaryWriter(run_dir)
except ImportError as err:
print('Skipping tfevents export:', err)
if wandb_log:
import wandb # Setup wandb for logging
wandb.init(project="EG3D-Conditional", name=exp_name)
wandb.config.update(kwargs)
# Export sample images.
grid_size = None
grid_z = None
grid_c = None
if rank == 0:
print('Exporting sample images...')
grid_size, images, masks, poses = setup_snapshot_image_grid(training_set=training_set)
save_image_grid(images, os.path.join(run_dir, 'reals.png'), drange=[0,255], grid_size=grid_size)
if training_set_kwargs.data_type == 'seg':
masks_color = color_mask(masks.squeeze(1)).transpose(0,3,1,2)
elif training_set_kwargs.data_type == 'edge':
masks_color = 255 - masks
masks = -(masks.to(torch.float32) / 127.5 - 1)
save_image_grid(masks_color, os.path.join(run_dir, 'mask.png'), drange=[0, 255], grid_size=grid_size)
grid_z = torch.randn([images.shape[0], G.z_dim]).split(batch_gpu)
grid_i = (images.float() / 127.5 - 1).split(batch_gpu)
grid_m = masks.split(batch_gpu)
grid_p = poses.split(batch_gpu)
grid_c = poses.split(batch_gpu)
if wandb_log:
wandb.log({"Real Images": [wandb.Image(get_image_grid(images, drange=[0,255], grid_size=grid_size))]}, step=0)
wandb.log({"Real Masks": [wandb.Image(get_image_grid(masks_color, drange=[0,255], grid_size=grid_size))]}, step=0)
# # Fake init
# out = []
# for round_idx in range(len(grid_z)):
# o = G_ema(grid_z[round_idx].to(device), grid_c[round_idx].to(device),
# {'image':grid_i[round_idx].to(device), 'pose':grid_p[round_idx].to(device), 'mask':grid_m[round_idx].to(device)}, noise_mode='const')
# out.append(o)
# # out = [G_ema(z=z, c=c, noise_mode='const') for z, c in zip(grid_z, grid_c)]
# images = torch.cat([o['image'].cpu() for o in out]).numpy()
# images_raw = torch.cat([o['image_raw'].cpu() for o in out]).numpy()
# images_depth = -torch.cat([o['image_depth'].cpu() for o in out]).numpy()
# save_image_grid(images, os.path.join(run_dir, f'fakes_init.png'), drange=[-1,1], grid_size=grid_size)
# save_image_grid(images_raw, os.path.join(run_dir, f'fakes_init_raw.png'), drange=[-1,1], grid_size=grid_size)
# save_image_grid(images_depth, os.path.join(run_dir, f'fakes_init_depth.png'), drange=[images_depth.min(), images_depth.max()], grid_size=grid_size)
# if 'semantic' in out[0]:
# if training_set_kwargs.data_type == 'seg':
# images_semantic = torch.cat([torch.argmax(o['semantic'], dim=1).cpu() for o in out]).numpy()
# images_semantic_raw = torch.cat([torch.argmax(o['semantic_raw'], dim=1).cpu() for o in out]).numpy()
# images_semantic_color = color_mask(images_semantic).transpose(0, 3, 1, 2)
# images_semantic_raw_color = color_mask(images_semantic_raw).transpose(0, 3, 1, 2)
# elif training_set_kwargs.data_type == 'edge':
# images_semantic = torch.cat([o['semantic'].cpu() for o in out]).numpy()
# images_semantic_raw = torch.cat([o['semantic_raw'].cpu() for o in out]).numpy()
# images_semantic_color = 255 - (images_semantic + 1) * 127.5
# images_semantic_raw_color = 255- (images_semantic_raw + 1) * 127.5
# save_image_grid(images_semantic_color, os.path.join(run_dir, f'fakes_init_semantic.png'), drange=[0, 255], grid_size=grid_size)
# save_image_grid(images_semantic_raw_color, os.path.join(run_dir, f'fakes_init_semantic_raw.png'), drange=[0, 255], grid_size=grid_size)
# if wandb_log:
# wandb.log({'fakes_init': wandb.Image(get_image_grid(images, drange=[-1,1], grid_size=grid_size))}, step=0)
# wandb.log({'fakes_init_raw': wandb.Image(get_image_grid(images_raw, drange=[-1,1], grid_size=grid_size))}, step=0)
# wandb.log({'fakes_init_depth': wandb.Image(get_image_grid(images_depth, drange=[images_depth.min(), images_depth.max()], grid_size=grid_size))}, step=0)
# if 'semantic' in out[0]:
# wandb.log({'fakes_init_semantic': wandb.Image(get_image_grid(images_semantic_color, drange=[0, 255], grid_size=grid_size))}, step=0)
# wandb.log({'fakes_init_semantic_raw': wandb.Image(get_image_grid(images_semantic_raw_color, drange=[0, 255], grid_size=grid_size))}, step=0)
# Train.
if rank == 0:
print(f'Training for {total_kimg} kimg...')
print()
cur_nimg = resume_kimg * 1000
cur_tick = 0
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - start_time
batch_idx = 0
if progress_fn is not None:
progress_fn(0, total_kimg)
while True:
torch.cuda.empty_cache()
# Fetch training data.
if debug:
print(f"rank: {rank}, cur_tick: {cur_tick}, Fetch training data.")
with torch.autograd.profiler.record_function('data_fetch'):
def load_data(iterator):
batch = next(iterator)
# batch['image_raw'] = batch['image'].clone()
# batch['mask'] = resize_mask(batch['mask'], curr_res)
# img = [{'img': img} for img in (img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)]
# c = c.to(device).split(batch_gpu)
batch['image'] = (batch['image'].to(torch.float32) / 127.5 - 1) # [-1, 1]
# if mask_type == 'kp':
# batch['mask'] = (batch['mask'].to(torch.float32) * 2 - 1) # [-1, 1]
if training_set_kwargs.data_type == 'edge':
batch['mask'] = -(batch['mask'].to(torch.float32) / 127.5 - 1) # [-1, 1]
for key in batch:
batch[key] = batch[key].to(device).split(batch_gpu)
return batch
phase_batch = load_data(training_set_iterator)
# phase_real_img, phase_real_c = next(training_set_iterator)
# phase_real_img = (phase_real_img.to(device).to(torch.float32) / 127.5 - 1).split(batch_gpu)
# phase_real_c = phase_real_c.to(device).split(batch_gpu)
all_gen_z = torch.randn([len(phases) * batch_size, G.z_dim], device=device)
all_gen_z = [phase_gen_z.split(batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
all_gen_c = [training_set.get_label(np.random.randint(len(training_set))) for _ in range(len(phases) * batch_size)]
all_gen_c = torch.from_numpy(np.stack(all_gen_c)).pin_memory().to(device)
all_gen_c = [phase_gen_c.split(batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
# Execute training phases.
for phase, phase_gen_z, phase_gen_c in zip(phases, all_gen_z, all_gen_c):
if batch_idx % phase.interval != 0:
continue
if phase.start_event is not None:
phase.start_event.record(torch.cuda.current_stream(device))
# Accumulate gradients.
phase.opt.zero_grad(set_to_none=True)
phase.module.requires_grad_(True)
# for real_img, real_c, gen_z, gen_c in zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c):
# loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c, gen_z=gen_z, gen_c=gen_c, gain=phase.interval, cur_nimg=cur_nimg)
for round_idx in range(len(phase_batch['image'])):
if debug:
print(f"rank: {rank}, cur_tick: {cur_tick}, cur_nimg: {cur_nimg}, phase: {phase.name}, round_idx: {round_idx}")
round_batch = {key:phase_batch[key][round_idx] for key in phase_batch}
loss.accumulate_gradients(phase=phase.name, batch=round_batch, gen_z=phase_gen_z[round_idx], gen_c=phase_gen_c[round_idx], gain=phase.interval, cur_nimg=cur_nimg)
# if debug:
# print(f"rank: {rank}, cur_tick: {cur_tick}, phase: {phase.name}, round_idx: {round_idx} Done")
phase.module.requires_grad_(False)
# Update weights.
with torch.autograd.profiler.record_function(phase.name + '_opt'):
params = [param for param in phase.module.parameters() if param.numel() > 0 and param.grad is not None]
if len(params) > 0:
flat = torch.cat([param.grad.flatten() for param in params])
if num_gpus > 1:
torch.distributed.all_reduce(flat)
flat /= num_gpus
misc.nan_to_num(flat, nan=0, posinf=1e5, neginf=-1e5, out=flat)
grads = flat.split([param.numel() for param in params])
for param, grad in zip(params, grads):
param.grad = grad.reshape(param.shape)
phase.opt.step()
# Phase done.
if phase.end_event is not None:
phase.end_event.record(torch.cuda.current_stream(device))
# Update G_ema.
with torch.autograd.profiler.record_function('Gema'):
ema_nimg = ema_kimg * 1000
if ema_rampup is not None:
ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
for p_ema, p in zip(G_ema.parameters(), G.parameters()):
p_ema.copy_(p.lerp(p_ema, ema_beta))
for b_ema, b in zip(G_ema.buffers(), G.buffers()):
b_ema.copy_(b)
G_ema.neural_rendering_resolution = G.neural_rendering_resolution
G_ema.rendering_kwargs = G.rendering_kwargs.copy()
# Update state.
cur_nimg += batch_size
batch_idx += 1
# Execute ADA heuristic.
if (ada_stats is not None) and (batch_idx % ada_interval == 0):
ada_stats.update()
adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * (batch_size * ada_interval) / (ada_kimg * 1000)
augment_pipe.p.copy_((augment_pipe.p + adjust).max(misc.constant(0, device=device)))
# Perform maintenance tasks once per tick.
done = (cur_nimg >= total_kimg * 1000)
if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
continue
# Print status line, accumulating the same information in training_stats.
tick_end_time = time.time()
fields = []
fields += [f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
fields += [f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
fields += [f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
fields += [f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
fields += [f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
fields += [f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
fields += [f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
fields += [f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
fields += [f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"]
torch.cuda.reset_peak_memory_stats()
fields += [f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
training_stats.report0('Timing/total_hours', (tick_end_time - start_time) / (60 * 60))
training_stats.report0('Timing/total_days', (tick_end_time - start_time) / (24 * 60 * 60))
if rank == 0:
print(' '.join(fields))
# Check for abort.
if (not done) and (abort_fn is not None) and abort_fn():
done = True
if rank == 0:
print()
print('Aborting...')
# Save image snapshot.
if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
out = []
for round_idx in range(len(grid_z)):
o = G_ema(grid_z[round_idx].to(device), grid_c[round_idx].to(device),
{'image':grid_i[round_idx].to(device), 'pose':grid_p[round_idx].to(device), 'mask':grid_m[round_idx].to(device)}, noise_mode='const')
out.append(o)
# out = [G_ema(z=z, c=c, noise_mode='const') for z, c in zip(grid_z, grid_c)]
images = torch.cat([o['image'].cpu() for o in out]).numpy()
images_raw = torch.cat([o['image_raw'].cpu() for o in out]).numpy()
images_depth = -torch.cat([o['image_depth'].cpu() for o in out]).numpy()
save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size)
save_image_grid(images_raw, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_raw.png'), drange=[-1,1], grid_size=grid_size)
save_image_grid(images_depth, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_depth.png'), drange=[images_depth.min(), images_depth.max()], grid_size=grid_size)
if 'semantic' in out[0]:
if training_set_kwargs.data_type == 'seg':
images_semantic = torch.cat([torch.argmax(o['semantic'], dim=1).cpu() for o in out]).numpy()
images_semantic_raw = torch.cat([torch.argmax(o['semantic_raw'], dim=1).cpu() for o in out]).numpy()
images_semantic_color = color_mask(images_semantic).transpose(0, 3, 1, 2)
images_semantic_raw_color = color_mask(images_semantic_raw).transpose(0, 3, 1, 2)
elif training_set_kwargs.data_type == 'edge':
images_semantic = torch.cat([o['semantic'].cpu() for o in out]).numpy()
images_semantic_raw = torch.cat([o['semantic_raw'].cpu() for o in out]).numpy()
images_semantic_color = (images_semantic + 1) * 127.5
images_semantic_raw_color = (images_semantic_raw + 1) * 127.5
save_image_grid(images_semantic_color, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_semantic.png'), drange=[0, 255], grid_size=grid_size)
save_image_grid(images_semantic_raw_color, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_semantic_raw.png'), drange=[0, 255], grid_size=grid_size)
if 'weight' in out[0]:
images_weight = torch.cat([o['weight'].cpu() for o in out]).numpy()
save_image_grid(images_weight, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_weight.png'), drange=[0, 1], grid_size=grid_size)
if wandb_log:
wandb.log({'fakes': wandb.Image(get_image_grid(images, drange=[-1,1], grid_size=grid_size))}, step=cur_nimg//1000)
wandb.log({'fakes_raw': wandb.Image(get_image_grid(images_raw, drange=[-1,1], grid_size=grid_size))}, step=cur_nimg//1000)
wandb.log({'fakes_depth': wandb.Image(get_image_grid(images_depth, drange=[images_depth.min(), images_depth.max()], grid_size=grid_size))}, step=cur_nimg//1000)
if 'semantic' in out[0]:
wandb.log({'fakes_semantic': wandb.Image(get_image_grid(images_semantic_color, drange=[0, 255], grid_size=grid_size))}, step=cur_nimg//1000)
wandb.log({'fakes_semantic_raw': wandb.Image(get_image_grid(images_semantic_raw_color, drange=[0, 255], grid_size=grid_size))}, step=cur_nimg//1000)
if 'weight' in out[0]:
wandb.log({'fakes_weight': wandb.Image(get_image_grid(images_weight, drange=[0, 1], grid_size=grid_size))}, step=cur_nimg//1000)
#--------------------
# Log front-view images.
del out
forward_cam2world_pose = LookAtPoseSampler.sample(3.14/2, 3.14/2, torch.tensor([0, 0, 0.2], device=device), radius=2.7, device=device)
intrinsics = torch.tensor([[4.2647, 0, 0.5], [0, 4.2647, 0.5], [0, 0, 1]], device=device)
forward_pose = torch.cat([forward_cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
out = []
for round_idx in range(len(grid_z)):
o = G_ema(grid_z[round_idx].to(device), forward_pose.expand(grid_z[round_idx].shape[0], -1),
{'image':grid_i[round_idx].to(device), 'pose':grid_p[round_idx].to(device), 'mask':grid_m[round_idx].to(device)}, noise_mode='const')
out.append(o)
# out = [G_ema(z=z, c=c, noise_mode='const') for z, c in zip(grid_z, grid_c)]
images = torch.cat([o['image'].cpu() for o in out]).numpy()
images_raw = torch.cat([o['image_raw'].cpu() for o in out]).numpy()
images_depth = -torch.cat([o['image_depth'].cpu() for o in out]).numpy()
save_image_grid(images, os.path.join(run_dir, f'fakes_front{cur_nimg//1000:06d}.png'), drange=[-1,1], grid_size=grid_size)
save_image_grid(images_raw, os.path.join(run_dir, f'fakes_front{cur_nimg//1000:06d}_raw.png'), drange=[-1,1], grid_size=grid_size)
save_image_grid(images_depth, os.path.join(run_dir, f'fakes_front{cur_nimg//1000:06d}_depth.png'), drange=[images_depth.min(), images_depth.max()], grid_size=grid_size)
if 'semantic' in out[0]:
if training_set_kwargs.data_type == 'seg':
images_semantic = torch.cat([torch.argmax(o['semantic'], dim=1).cpu() for o in out]).numpy()
images_semantic_raw = torch.cat([torch.argmax(o['semantic_raw'], dim=1).cpu() for o in out]).numpy()
images_semantic_color = color_mask(images_semantic).transpose(0, 3, 1, 2)
images_semantic_raw_color = color_mask(images_semantic_raw).transpose(0, 3, 1, 2)
elif training_set_kwargs.data_type == 'edge':
images_semantic = torch.cat([o['semantic'].cpu() for o in out]).numpy()
images_semantic_raw = torch.cat([o['semantic_raw'].cpu() for o in out]).numpy()
images_semantic_color = (images_semantic + 1) * 127.5
images_semantic_raw_color = (images_semantic_raw + 1) * 127.5
save_image_grid(images_semantic_color, os.path.join(run_dir, f'fakes_front{cur_nimg//1000:06d}_semantic.png'), drange=[0, 255], grid_size=grid_size)
save_image_grid(images_semantic_raw_color, os.path.join(run_dir, f'fakes_front{cur_nimg//1000:06d}_semantic_raw.png'), drange=[0, 255], grid_size=grid_size)
if wandb_log:
wandb.log({'fakes_front': wandb.Image(get_image_grid(images, drange=[-1,1], grid_size=grid_size))}, step=cur_nimg//1000)
wandb.log({'fakes_front_raw': wandb.Image(get_image_grid(images_raw, drange=[-1,1], grid_size=grid_size))}, step=cur_nimg//1000)
wandb.log({'fakes_front_depth': wandb.Image(get_image_grid(images_depth, drange=[images_depth.min(), images_depth.max()], grid_size=grid_size))}, step=cur_nimg//1000)
if 'semantic' in out[0]:
wandb.log({'fakes_front_semantic': wandb.Image(get_image_grid(images_semantic_color, drange=[0, 255], grid_size=grid_size))}, step=cur_nimg//1000)
wandb.log({'fakes_front_semantic_raw': wandb.Image(get_image_grid(images_semantic_raw_color, drange=[0, 255], grid_size=grid_size))}, step=cur_nimg//1000)
#--------------------
# Log Multi-view images.
log_table(G_ema, grid_z, grid_i, grid_c, grid_m, grid_p, mask_type=training_set_kwargs.data_type, global_step=cur_nimg//1000, device=device, wandb=wandb)
#--------------------
# # Log forward-conditioned images
# forward_cam2world_pose = LookAtPoseSampler.sample(3.14/2, 3.14/2, torch.tensor([0, 0, 0.2], device=device), radius=2.7, device=device)
# intrinsics = torch.tensor([[4.2647, 0, 0.5], [0, 4.2647, 0.5], [0, 0, 1]], device=device)
# forward_label = torch.cat([forward_cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
# grid_ws = [G_ema.mapping(z, forward_label.expand(z.shape[0], -1)) for z, c in zip(grid_z, grid_c)]
# out = [G_ema.synthesis(ws, c=c, noise_mode='const') for ws, c in zip(grid_ws, grid_c)]
# images = torch.cat([o['image'].cpu() for o in out]).numpy()
# images_raw = torch.cat([o['image_raw'].cpu() for o in out]).numpy()
# images_depth = -torch.cat([o['image_depth'].cpu() for o in out]).numpy()
# save_image_grid(images, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_f.png'), drange=[-1,1], grid_size=grid_size)
# save_image_grid(images_raw, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_raw_f.png'), drange=[-1,1], grid_size=grid_size)
# save_image_grid(images_depth, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_depth_f.png'), drange=[images_depth.min(), images_depth.max()], grid_size=grid_size)
#--------------------
# # Log Cross sections
# grid_ws = [G_ema.mapping(z, c.expand(z.shape[0], -1)) for z, c in zip(grid_z, grid_c)]
# out = [sample_cross_section(G_ema, ws, w=G.rendering_kwargs['box_warp']) for ws, c in zip(grid_ws, grid_c)]
# crossections = torch.cat([o.cpu() for o in out]).numpy()
# save_image_grid(crossections, os.path.join(run_dir, f'fakes{cur_nimg//1000:06d}_crossection.png'), drange=[-50,100], grid_size=grid_size)
# Save network snapshot.
snapshot_pkl = None
snapshot_data = None
if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
snapshot_data = dict(training_set_kwargs=dict(training_set_kwargs))
for name, module in [('G', G), ('D', D), ('G_ema', G_ema), ('augment_pipe', augment_pipe), ('D_semantic', D_semantic)]:
if module is not None:
if num_gpus > 1:
misc.check_ddp_consistency(module, ignore_regex=r'.*\.[^.]+_(avg|ema)')
module = copy.deepcopy(module).eval().requires_grad_(False).cpu()
else:
continue
snapshot_data[name] = module
del module # conserve memory
snapshot_pkl = os.path.join(run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
if rank == 0:
with open(snapshot_pkl, 'wb') as f:
pickle.dump(snapshot_data, f)
# Evaluate metrics.
if (snapshot_data is not None) and (len(metrics) > 0) and not no_eval:
if rank == 0:
print(run_dir)
print('Evaluating metrics...')
for metric in metrics:
result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
if rank == 0:
metric_main.report_metric(result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
stats_metrics.update(result_dict.results)
del snapshot_data # conserve memory
# Collect statistics.
if debug:
print(f"rank: {rank}, cur_tick: {cur_tick}, Collecting statistics.")
for phase in phases:
value = []
if (phase.start_event is not None) and (phase.end_event is not None):
phase.end_event.synchronize()
value = phase.start_event.elapsed_time(phase.end_event)
training_stats.report0('Timing/' + phase.name, value)
stats_collector.update()
stats_dict = stats_collector.as_dict()
# Update logs.
if debug:
print(f"rank: {rank}, cur_tick: {cur_tick}, Updating logs.")
timestamp = time.time()
if stats_jsonl is not None:
fields = dict(stats_dict, timestamp=timestamp)
stats_jsonl.write(json.dumps(fields) + '\n')
stats_jsonl.flush()
if stats_tfevents is not None:
global_step = int(cur_nimg / 1e3)
walltime = timestamp - start_time
for name, value in stats_dict.items():
stats_tfevents.add_scalar(name, value.mean, global_step=global_step, walltime=walltime)
for name, value in stats_metrics.items():
stats_tfevents.add_scalar(f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
stats_tfevents.flush()
if progress_fn is not None:
progress_fn(cur_nimg // 1000, total_kimg)
if wandb_log and rank == 0:
wandb.log({name: value.mean for name, value in stats_dict.items()} , step=int(cur_nimg / 1e3))
wandb.log(stats_metrics, step=int(cur_nimg / 1e3))
# Update state.
if debug:
print(f"rank: {rank}, cur_tick: {cur_tick}, Updating state.")
cur_tick += 1
tick_start_nimg = cur_nimg
tick_start_time = time.time()
maintenance_time = tick_start_time - tick_end_time
if done:
break
# Done.
if rank == 0:
print()
print('Exiting...')
#----------------------------------------------------------------------------
| 45,240 | 55.410224 | 266 | py |
pix2pix3D | pix2pix3D-main/training/networks_stylegan2.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Network architectures from the paper
"Analyzing and Improving the Image Quality of StyleGAN".
Matches the original implementation of configs E-F by Karras et al. at
https://github.com/NVlabs/stylegan2/blob/master/training/networks_stylegan2.py"""
import numpy as np
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_resample
from torch_utils.ops import upfirdn2d
from torch_utils.ops import bias_act
from torch_utils.ops import fma
#----------------------------------------------------------------------------
@misc.profiled_function
def normalize_2nd_moment(x, dim=1, eps=1e-8):
return x * (x.square().mean(dim=dim, keepdim=True) + eps).rsqrt()
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor of shape [batch_size, in_channels, in_height, in_width].
weight, # Weight tensor of shape [out_channels, in_channels, kernel_height, kernel_width].
styles, # Modulation coefficients of shape [batch_size, in_channels].
noise = None, # Optional noise tensor to add to the output activations.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
padding = 0, # Padding with respect to the upsampled image.
resample_filter = None, # Low-pass filter to apply when resampling activations. Must be prepared beforehand by calling upfirdn2d.setup_filter().
demodulate = True, # Apply weight demodulation?
flip_weight = True, # False = convolution, True = correlation (matches torch.nn.functional.conv2d).
fused_modconv = True, # Perform modulation, convolution, and demodulation as a single fused operation?
):
batch_size = x.shape[0]
out_channels, in_channels, kh, kw = weight.shape
misc.assert_shape(weight, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(styles, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs to avoid FP16 overflow.
if x.dtype == torch.float16 and demodulate:
weight = weight * (1 / np.sqrt(in_channels * kh * kw) / weight.norm(float('inf'), dim=[1,2,3], keepdim=True)) # max_Ikk
styles = styles / styles.norm(float('inf'), dim=1, keepdim=True) # max_I
# Calculate per-sample weights and demodulation coefficients.
w = None
dcoefs = None
if demodulate or fused_modconv:
w = weight.unsqueeze(0) # [NOIkk]
w = w * styles.reshape(batch_size, 1, -1, 1, 1) # [NOIkk]
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
if demodulate and fused_modconv:
w = w * dcoefs.reshape(batch_size, -1, 1, 1, 1) # [NOIkk]
# Execute by scaling the activations before and after the convolution.
if not fused_modconv:
x = x * styles.to(x.dtype).reshape(batch_size, -1, 1, 1)
x = conv2d_resample.conv2d_resample(x=x, w=weight.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, flip_weight=flip_weight)
if demodulate and noise is not None:
x = fma.fma(x, dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1), noise.to(x.dtype))
elif demodulate:
x = x * dcoefs.to(x.dtype).reshape(batch_size, -1, 1, 1)
elif noise is not None:
x = x.add_(noise.to(x.dtype))
return x
# Execute as one fused op using grouped convolution.
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(batch_size)
misc.assert_shape(x, [batch_size, in_channels, None, None])
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=resample_filter, up=up, down=down, padding=padding, groups=batch_size, flip_weight=flip_weight)
x = x.reshape(batch_size, -1, *x.shape[2:])
if noise is not None:
x = x.add_(noise)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 1, # Learning rate multiplier.
bias_init = 0, # Initial value for the additive bias.
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) / lr_multiplier)
self.bias = torch.nn.Parameter(torch.full([out_features], np.float32(bias_init))) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
def extra_repr(self):
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class Conv2dLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
kernel_size, # Width and height of the convolution kernel.
bias = True, # Apply additive bias before the activation function?
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
up = 1, # Integer upsampling factor.
down = 1, # Integer downsampling factor.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output to +-X, None = disable clamping.
channels_last = False, # Expect the input to have memory_format=channels_last?
trainable = True, # Update the weights of this layer during training?
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.activation = activation
self.up = up
self.down = down
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
self.act_gain = bias_act.activation_funcs[activation].def_gain
memory_format = torch.channels_last if channels_last else torch.contiguous_format
weight = torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format)
bias = torch.zeros([out_channels]) if bias else None
if trainable:
self.weight = torch.nn.Parameter(weight)
self.bias = torch.nn.Parameter(bias) if bias is not None else None
else:
self.register_buffer('weight', weight)
if bias is not None:
self.register_buffer('bias', bias)
else:
self.bias = None
def forward(self, x, gain=1):
w = self.weight * self.weight_gain
b = self.bias.to(x.dtype) if self.bias is not None else None
flip_weight = (self.up == 1) # slightly faster
x = conv2d_resample.conv2d_resample(x=x, w=w.to(x.dtype), f=self.resample_filter, up=self.up, down=self.down, padding=self.padding, flip_weight=flip_weight)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, b, act=self.activation, gain=act_gain, clamp=act_clamp)
return x
def extra_repr(self):
return ' '.join([
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, activation={self.activation:s},',
f'up={self.up}, down={self.down}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no label.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.998, # Decay for tracking the moving average of W during training, None = do not track.
**unused_kwargs # Ignore unrecognized keyword args.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if c_dim == 0:
embed_features = 0
if layer_features is None:
layer_features = w_dim
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0:
self.embed = FullyConnectedLayer(c_dim, embed_features)
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32))
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, y], dim=1) if x is not None else y
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Update moving average of W.
if update_emas and self.w_avg_beta is not None:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
def extra_repr(self):
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this layer.
kernel_size = 3, # Convolution kernel size.
up = 1, # Integer upsampling factor.
use_noise = True, # Enable noise input?
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
channels_last = False, # Use channels_last format for the weights?
**unused_kwargs # Ignore unrecognized keyword args.
):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.w_dim = w_dim
self.resolution = resolution
self.up = up
self.use_noise = use_noise
self.activation = activation
self.conv_clamp = conv_clamp
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.padding = kernel_size // 2
self.act_gain = bias_act.activation_funcs[activation].def_gain
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
if use_noise:
self.register_buffer('noise_const', torch.randn([resolution, resolution]))
self.noise_strength = torch.nn.Parameter(torch.zeros([]))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
def forward(self, x, w, noise_mode='random', fused_modconv=True, gain=1):
assert noise_mode in ['random', 'const', 'none']
in_resolution = self.resolution // self.up
misc.assert_shape(x, [None, self.in_channels, in_resolution, in_resolution])
styles = self.affine(w)
noise = None
if self.use_noise and noise_mode == 'random':
noise = torch.randn([x.shape[0], 1, self.resolution, self.resolution], device=x.device) * self.noise_strength
if self.use_noise and noise_mode == 'const':
noise = self.noise_const * self.noise_strength
flip_weight = (self.up == 1) # slightly faster
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, noise=noise, up=self.up,
padding=self.padding, resample_filter=self.resample_filter, flip_weight=flip_weight, fused_modconv=fused_modconv)
act_gain = self.act_gain * gain
act_clamp = self.conv_clamp * gain if self.conv_clamp is not None else None
x = bias_act.bias_act(x, self.bias.to(x.dtype), act=self.activation, gain=act_gain, clamp=act_clamp)
return x
def extra_repr(self):
return ' '.join([
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d},',
f'resolution={self.resolution:d}, up={self.up}, activation={self.activation:s}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class ToRGBLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, w_dim, kernel_size=1, conv_clamp=None, channels_last=False):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.w_dim = w_dim
self.conv_clamp = conv_clamp
self.affine = FullyConnectedLayer(w_dim, in_channels, bias_init=1)
memory_format = torch.channels_last if channels_last else torch.contiguous_format
self.weight = torch.nn.Parameter(torch.randn([out_channels, in_channels, kernel_size, kernel_size]).to(memory_format=memory_format))
self.bias = torch.nn.Parameter(torch.zeros([out_channels]))
self.weight_gain = 1 / np.sqrt(in_channels * (kernel_size ** 2))
def forward(self, x, w, fused_modconv=True):
styles = self.affine(w) * self.weight_gain
x = modulated_conv2d(x=x, weight=self.weight, styles=styles, demodulate=False, fused_modconv=fused_modconv)
x = bias_act.bias_act(x, self.bias.to(x.dtype), clamp=self.conv_clamp)
return x
def extra_repr(self):
return f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}, w_dim={self.w_dim:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
out_channels, # Number of output channels.
w_dim, # Intermediate latent (W) dimensionality.
resolution, # Resolution of this block.
img_channels, # Number of output color channels.
is_last, # Is this the last block?
architecture = 'skip', # Architecture: 'orig', 'skip', 'resnet'.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
fused_modconv_default = True, # Default value of fused_modconv. 'inference_only' = True for inference, False for training.
**layer_kwargs, # Arguments for SynthesisLayer.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.w_dim = w_dim
self.resolution = resolution
self.img_channels = img_channels
self.is_last = is_last
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.fused_modconv_default = fused_modconv_default
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_conv = 0
self.num_torgb = 0
if in_channels == 0:
self.const = torch.nn.Parameter(torch.randn([out_channels, resolution, resolution]))
if in_channels != 0:
self.conv0 = SynthesisLayer(in_channels, out_channels, w_dim=w_dim, resolution=resolution, up=2,
resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
self.conv1 = SynthesisLayer(out_channels, out_channels, w_dim=w_dim, resolution=resolution,
conv_clamp=conv_clamp, channels_last=self.channels_last, **layer_kwargs)
self.num_conv += 1
if is_last or architecture == 'skip':
self.torgb = ToRGBLayer(out_channels, img_channels, w_dim=w_dim,
conv_clamp=conv_clamp, channels_last=self.channels_last)
self.num_torgb += 1
if in_channels != 0 and architecture == 'resnet':
self.skip = Conv2dLayer(in_channels, out_channels, kernel_size=1, bias=False, up=2,
resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, ws, force_fp32=False, fused_modconv=None, update_emas=False, **layer_kwargs):
_ = update_emas # unused
misc.assert_shape(ws, [None, self.num_conv + self.num_torgb, self.w_dim])
w_iter = iter(ws.unbind(dim=1))
if ws.device.type != 'cuda':
force_fp32 = True
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
if fused_modconv is None:
fused_modconv = self.fused_modconv_default
if fused_modconv == 'inference_only':
fused_modconv = (not self.training)
# Input.
if self.in_channels == 0:
x = self.const.to(dtype=dtype, memory_format=memory_format)
x = x.unsqueeze(0).repeat([ws.shape[0], 1, 1, 1])
else:
misc.assert_shape(x, [None, self.in_channels, self.resolution // 2, self.resolution // 2])
x = x.to(dtype=dtype, memory_format=memory_format)
# Main layers.
if self.in_channels == 0:
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
elif self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, gain=np.sqrt(0.5), **layer_kwargs)
x = y.add_(x)
else:
x = self.conv0(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
x = self.conv1(x, next(w_iter), fused_modconv=fused_modconv, **layer_kwargs)
# ToRGB.
if img is not None:
misc.assert_shape(img, [None, self.img_channels, self.resolution // 2, self.resolution // 2])
img = upfirdn2d.upsample2d(img, self.resample_filter)
if self.is_last or self.architecture == 'skip':
y = self.torgb(x, next(w_iter), fused_modconv=fused_modconv)
y = y.to(dtype=torch.float32, memory_format=torch.contiguous_format)
img = img.add_(y) if img is not None else y
assert x.dtype == dtype
assert img is None or img.dtype == torch.float32
return x, img
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
**block_kwargs, # Arguments for SynthesisBlock.
):
assert img_resolution >= 4 and img_resolution & (img_resolution - 1) == 0
super().__init__()
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.num_fp16_res = num_fp16_res
self.block_resolutions = [2 ** i for i in range(2, self.img_resolution_log2 + 1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
self.num_ws = 0
for res in self.block_resolutions:
in_channels = channels_dict[res // 2] if res > 4 else 0
out_channels = channels_dict[res]
use_fp16 = (res >= fp16_resolution)
is_last = (res == self.img_resolution)
block = SynthesisBlock(in_channels, out_channels, w_dim=w_dim, resolution=res,
img_channels=img_channels, is_last=is_last, use_fp16=use_fp16, **block_kwargs)
self.num_ws += block.num_conv
if is_last:
self.num_ws += block.num_torgb
setattr(self, f'b{res}', block)
def forward(self, ws, **block_kwargs):
block_ws = []
with torch.autograd.profiler.record_function('split_ws'):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
ws = ws.to(torch.float32)
w_idx = 0
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
block_ws.append(ws.narrow(1, w_idx, block.num_conv + block.num_torgb))
w_idx += block.num_conv
x = img = None
for res, cur_ws in zip(self.block_resolutions, block_ws):
block = getattr(self, f'b{res}')
x, img = block(x, img, cur_ws, **block_kwargs)
return img
def extra_repr(self):
return ' '.join([
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
f'num_fp16_res={self.num_fp16_res:d}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
mapping_kwargs = {}, # Arguments for MappingNetwork.
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorBlock(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels, 0 = first block.
tmp_channels, # Number of intermediate channels.
out_channels, # Number of output channels.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
first_layer_idx, # Index of the first layer.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
resample_filter = [1,3,3,1], # Low-pass filter to apply when resampling activations.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
use_fp16 = False, # Use FP16 for this block?
fp16_channels_last = False, # Use channels-last memory format with FP16?
freeze_layers = 0, # Freeze-D: Number of layers to freeze.
):
assert in_channels in [0, tmp_channels]
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.resolution = resolution
self.img_channels = img_channels
self.first_layer_idx = first_layer_idx
self.architecture = architecture
self.use_fp16 = use_fp16
self.channels_last = (use_fp16 and fp16_channels_last)
self.register_buffer('resample_filter', upfirdn2d.setup_filter(resample_filter))
self.num_layers = 0
def trainable_gen():
while True:
layer_idx = self.first_layer_idx + self.num_layers
trainable = (layer_idx >= freeze_layers)
self.num_layers += 1
yield trainable
trainable_iter = trainable_gen()
if in_channels == 0 or architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, tmp_channels, kernel_size=1, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv0 = Conv2dLayer(tmp_channels, tmp_channels, kernel_size=3, activation=activation,
trainable=next(trainable_iter), conv_clamp=conv_clamp, channels_last=self.channels_last)
self.conv1 = Conv2dLayer(tmp_channels, out_channels, kernel_size=3, activation=activation, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, conv_clamp=conv_clamp, channels_last=self.channels_last)
if architecture == 'resnet':
self.skip = Conv2dLayer(tmp_channels, out_channels, kernel_size=1, bias=False, down=2,
trainable=next(trainable_iter), resample_filter=resample_filter, channels_last=self.channels_last)
def forward(self, x, img, force_fp32=False):
if (x if x is not None else img).device.type != 'cuda':
force_fp32 = True
dtype = torch.float16 if self.use_fp16 and not force_fp32 else torch.float32
memory_format = torch.channels_last if self.channels_last and not force_fp32 else torch.contiguous_format
# Input.
if x is not None:
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution])
x = x.to(dtype=dtype, memory_format=memory_format)
# FromRGB.
if self.in_channels == 0 or self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
y = self.fromrgb(img)
x = x + y if x is not None else y
img = upfirdn2d.downsample2d(img, self.resample_filter) if self.architecture == 'skip' else None
# Main layers.
if self.architecture == 'resnet':
y = self.skip(x, gain=np.sqrt(0.5))
x = self.conv0(x)
x = self.conv1(x, gain=np.sqrt(0.5))
x = y.add_(x)
else:
x = self.conv0(x)
x = self.conv1(x)
assert x.dtype == dtype
return x, img
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class MinibatchStdLayer(torch.nn.Module):
def __init__(self, group_size, num_channels=1):
super().__init__()
self.group_size = group_size
self.num_channels = num_channels
def forward(self, x):
N, C, H, W = x.shape
with misc.suppress_tracer_warnings(): # as_tensor results are registered as constants
G = torch.min(torch.as_tensor(self.group_size), torch.as_tensor(N)) if self.group_size is not None else N
F = self.num_channels
c = C // F
y = x.reshape(G, -1, F, c, H, W) # [GnFcHW] Split minibatch N into n groups of size G, and channels C into F groups of size c.
y = y - y.mean(dim=0) # [GnFcHW] Subtract mean over group.
y = y.square().mean(dim=0) # [nFcHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [nFcHW] Calc stddev over group.
y = y.mean(dim=[2,3,4]) # [nF] Take average over channels and pixels.
y = y.reshape(-1, F, 1, 1) # [nF11] Add missing dimensions.
y = y.repeat(G, 1, H, W) # [NFHW] Replicate over group and pixels.
x = torch.cat([x, y], dim=1) # [NCHW] Append to input as new channels.
return x
def extra_repr(self):
return f'group_size={self.group_size}, num_channels={self.num_channels:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class DiscriminatorEpilogue(torch.nn.Module):
def __init__(self,
in_channels, # Number of input channels.
cmap_dim, # Dimensionality of mapped conditioning label, 0 = no label.
resolution, # Resolution of this block.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
mbstd_group_size = 4, # Group size for the minibatch standard deviation layer, None = entire minibatch.
mbstd_num_channels = 1, # Number of features for the minibatch standard deviation layer, 0 = disable.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping.
):
assert architecture in ['orig', 'skip', 'resnet']
super().__init__()
self.in_channels = in_channels
self.cmap_dim = cmap_dim
self.resolution = resolution
self.img_channels = img_channels
self.architecture = architecture
if architecture == 'skip':
self.fromrgb = Conv2dLayer(img_channels, in_channels, kernel_size=1, activation=activation)
self.mbstd = MinibatchStdLayer(group_size=mbstd_group_size, num_channels=mbstd_num_channels) if mbstd_num_channels > 0 else None
self.conv = Conv2dLayer(in_channels + mbstd_num_channels, in_channels, kernel_size=3, activation=activation, conv_clamp=conv_clamp)
self.fc = FullyConnectedLayer(in_channels * (resolution ** 2), in_channels, activation=activation)
self.out = FullyConnectedLayer(in_channels, 1 if cmap_dim == 0 else cmap_dim)
def forward(self, x, img, cmap, force_fp32=False):
misc.assert_shape(x, [None, self.in_channels, self.resolution, self.resolution]) # [NCHW]
_ = force_fp32 # unused
dtype = torch.float32
memory_format = torch.contiguous_format
# FromRGB.
x = x.to(dtype=dtype, memory_format=memory_format)
if self.architecture == 'skip':
misc.assert_shape(img, [None, self.img_channels, self.resolution, self.resolution])
img = img.to(dtype=dtype, memory_format=memory_format)
x = x + self.fromrgb(img)
# Main layers.
if self.mbstd is not None:
x = self.mbstd(x)
x = self.conv(x)
x = self.fc(x.flatten(1))
x = self.out(x)
# Conditioning.
if self.cmap_dim > 0:
misc.assert_shape(cmap, [None, self.cmap_dim])
x = (x * cmap).sum(dim=1, keepdim=True) * (1 / np.sqrt(self.cmap_dim))
assert x.dtype == dtype
return x
def extra_repr(self):
return f'resolution={self.resolution:d}, architecture={self.architecture:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class Discriminator(torch.nn.Module):
def __init__(self,
c_dim, # Conditioning label (C) dimensionality.
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
conv_clamp = 256, # Clamp the output of convolution layers to +-X, None = disable clamping.
cmap_dim = None, # Dimensionality of mapped conditioning label, None = default.
block_kwargs = {}, # Arguments for DiscriminatorBlock.
mapping_kwargs = {}, # Arguments for MappingNetwork.
epilogue_kwargs = {}, # Arguments for DiscriminatorEpilogue.
):
super().__init__()
self.c_dim = c_dim
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, 2, -1)]
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
if cmap_dim is None:
cmap_dim = channels_dict[4]
if c_dim == 0:
cmap_dim = 0
common_kwargs = dict(img_channels=img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
if c_dim > 0:
self.mapping = MappingNetwork(z_dim=0, c_dim=c_dim, w_dim=cmap_dim, num_ws=None, w_avg_beta=None, **mapping_kwargs)
self.b4 = DiscriminatorEpilogue(channels_dict[4], cmap_dim=cmap_dim, resolution=4, **epilogue_kwargs, **common_kwargs)
def forward(self, img, c, update_emas=False, **block_kwargs):
_ = update_emas # unused
x = None
for res in self.block_resolutions:
block = getattr(self, f'b{res}')
x, img = block(x, img, **block_kwargs)
cmap = None
if self.c_dim > 0:
cmap = self.mapping(None, c)
x = self.b4(x, img, cmap)
return x
def extra_repr(self):
return f'c_dim={self.c_dim:d}, img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d}'
#---------------------------------------------------------------------------- | 40,563 | 49.83208 | 164 | py |
pix2pix3D | pix2pix3D-main/training/networks_stylegan3.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Generator architecture from the paper
"Alias-Free Generative Adversarial Networks"."""
import numpy as np
import scipy.signal
import scipy.optimize
import torch
from torch_utils import misc
from torch_utils import persistence
from torch_utils.ops import conv2d_gradfix
from torch_utils.ops import filtered_lrelu
from torch_utils.ops import bias_act
#----------------------------------------------------------------------------
@misc.profiled_function
def modulated_conv2d(
x, # Input tensor: [batch_size, in_channels, in_height, in_width]
w, # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width]
s, # Style tensor: [batch_size, in_channels]
demodulate = True, # Apply weight demodulation?
padding = 0, # Padding: int or [padH, padW]
input_gain = None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels]
):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
batch_size = int(x.shape[0])
out_channels, in_channels, kh, kw = w.shape
misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk]
misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW]
misc.assert_shape(s, [batch_size, in_channels]) # [NI]
# Pre-normalize inputs.
if demodulate:
w = w * w.square().mean([1,2,3], keepdim=True).rsqrt()
s = s * s.square().mean().rsqrt()
# Modulate weights.
w = w.unsqueeze(0) # [NOIkk]
w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Demodulate weights.
if demodulate:
dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO]
w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Apply input scaling.
if input_gain is not None:
input_gain = input_gain.expand(batch_size, in_channels) # [NI]
w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk]
# Execute as one fused op using grouped convolution.
x = x.reshape(1, -1, *x.shape[2:])
w = w.reshape(-1, in_channels, kh, kw)
x = conv2d_gradfix.conv2d(input=x, weight=w.to(x.dtype), padding=padding, groups=batch_size)
x = x.reshape(batch_size, -1, *x.shape[2:])
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class FullyConnectedLayer(torch.nn.Module):
def __init__(self,
in_features, # Number of input features.
out_features, # Number of output features.
activation = 'linear', # Activation function: 'relu', 'lrelu', etc.
bias = True, # Apply additive bias before the activation function?
lr_multiplier = 1, # Learning rate multiplier.
weight_init = 1, # Initial standard deviation of the weight tensor.
bias_init = 0, # Initial value of the additive bias.
):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.activation = activation
self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) * (weight_init / lr_multiplier))
bias_init = np.broadcast_to(np.asarray(bias_init, dtype=np.float32), [out_features])
self.bias = torch.nn.Parameter(torch.from_numpy(bias_init / lr_multiplier)) if bias else None
self.weight_gain = lr_multiplier / np.sqrt(in_features)
self.bias_gain = lr_multiplier
def forward(self, x):
w = self.weight.to(x.dtype) * self.weight_gain
b = self.bias
if b is not None:
b = b.to(x.dtype)
if self.bias_gain != 1:
b = b * self.bias_gain
if self.activation == 'linear' and b is not None:
x = torch.addmm(b.unsqueeze(0), x, w.t())
else:
x = x.matmul(w.t())
x = bias_act.bias_act(x, b, act=self.activation)
return x
def extra_repr(self):
return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class MappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality, 0 = no labels.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output.
num_layers = 2, # Number of mapping layers.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.998, # Decay for tracking the moving average of W during training.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
# Construct layers.
self.embed = FullyConnectedLayer(self.c_dim, self.w_dim) if self.c_dim > 0 else None
features = [self.z_dim + (self.w_dim if self.c_dim > 0 else 0)] + [self.w_dim] * self.num_layers
for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]):
layer = FullyConnectedLayer(in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False):
misc.assert_shape(z, [None, self.z_dim])
if truncation_cutoff is None:
truncation_cutoff = self.num_ws
# Embed, normalize, and concatenate inputs.
x = z.to(torch.float32)
x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt()
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
y = self.embed(c.to(torch.float32))
y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt()
x = torch.cat([x, y], dim=1) if x is not None else y
# Execute layers.
for idx in range(self.num_layers):
x = getattr(self, f'fc{idx}')(x)
# Update moving average of W.
if update_emas:
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast and apply truncation.
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
if truncation_psi != 1:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
def extra_repr(self):
return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}'
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisInput(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
channels, # Number of output channels.
size, # Output spatial size: int or [width, height].
sampling_rate, # Output sampling rate.
bandwidth, # Output bandwidth.
):
super().__init__()
self.w_dim = w_dim
self.channels = channels
self.size = np.broadcast_to(np.asarray(size), [2])
self.sampling_rate = sampling_rate
self.bandwidth = bandwidth
# Draw random frequencies from uniform 2D disc.
freqs = torch.randn([self.channels, 2])
radii = freqs.square().sum(dim=1, keepdim=True).sqrt()
freqs /= radii * radii.square().exp().pow(0.25)
freqs *= bandwidth
phases = torch.rand([self.channels]) - 0.5
# Setup parameters and buffers.
self.weight = torch.nn.Parameter(torch.randn([self.channels, self.channels]))
self.affine = FullyConnectedLayer(w_dim, 4, weight_init=0, bias_init=[1,0,0,0])
self.register_buffer('transform', torch.eye(3, 3)) # User-specified inverse transform wrt. resulting image.
self.register_buffer('freqs', freqs)
self.register_buffer('phases', phases)
def forward(self, w):
# Introduce batch dimension.
transforms = self.transform.unsqueeze(0) # [batch, row, col]
freqs = self.freqs.unsqueeze(0) # [batch, channel, xy]
phases = self.phases.unsqueeze(0) # [batch, channel]
# Apply learned transformation.
t = self.affine(w) # t = (r_c, r_s, t_x, t_y)
t = t / t[:, :2].norm(dim=1, keepdim=True) # t' = (r'_c, r'_s, t'_x, t'_y)
m_r = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse rotation wrt. resulting image.
m_r[:, 0, 0] = t[:, 0] # r'_c
m_r[:, 0, 1] = -t[:, 1] # r'_s
m_r[:, 1, 0] = t[:, 1] # r'_s
m_r[:, 1, 1] = t[:, 0] # r'_c
m_t = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse translation wrt. resulting image.
m_t[:, 0, 2] = -t[:, 2] # t'_x
m_t[:, 1, 2] = -t[:, 3] # t'_y
transforms = m_r @ m_t @ transforms # First rotate resulting image, then translate, and finally apply user-specified transform.
# Transform frequencies.
phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2)
freqs = freqs @ transforms[:, :2, :2]
# Dampen out-of-band frequencies that may occur due to the user-specified transform.
amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) / (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1)
# Construct sampling grid.
theta = torch.eye(2, 3, device=w.device)
theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate
theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate
grids = torch.nn.functional.affine_grid(theta.unsqueeze(0), [1, 1, self.size[1], self.size[0]], align_corners=False)
# Compute Fourier features.
x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)).squeeze(3) # [batch, height, width, channel]
x = x + phases.unsqueeze(1).unsqueeze(2)
x = torch.sin(x * (np.pi * 2))
x = x * amplitudes.unsqueeze(1).unsqueeze(2)
# Apply trainable mapping.
weight = self.weight / np.sqrt(self.channels)
x = x @ weight.t()
# Ensure correct shape.
x = x.permute(0, 3, 1, 2) # [batch, channel, height, width]
misc.assert_shape(x, [w.shape[0], self.channels, int(self.size[1]), int(self.size[0])])
return x
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},',
f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisLayer(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
is_torgb, # Is this the final ToRGB layer?
is_critically_sampled, # Does this layer use critical sampling?
use_fp16, # Does this layer use FP16?
# Input & output specifications.
in_channels, # Number of input channels.
out_channels, # Number of output channels.
in_size, # Input spatial size: int or [width, height].
out_size, # Output spatial size: int or [width, height].
in_sampling_rate, # Input sampling rate (s).
out_sampling_rate, # Output sampling rate (s).
in_cutoff, # Input cutoff frequency (f_c).
out_cutoff, # Output cutoff frequency (f_c).
in_half_width, # Input transition band half-width (f_h).
out_half_width, # Output Transition band half-width (f_h).
# Hyperparameters.
conv_kernel = 3, # Convolution kernel size. Ignored for final the ToRGB layer.
filter_size = 6, # Low-pass filter size relative to the lower resolution when up/downsampling.
lrelu_upsampling = 2, # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer.
use_radial_filters = False, # Use radially symmetric downsampling filter? Ignored for critically sampled layers.
conv_clamp = 256, # Clamp the output to [-X, +X], None = disable clamping.
magnitude_ema_beta = 0.999, # Decay rate for the moving average of input magnitudes.
):
super().__init__()
self.w_dim = w_dim
self.is_torgb = is_torgb
self.is_critically_sampled = is_critically_sampled
self.use_fp16 = use_fp16
self.in_channels = in_channels
self.out_channels = out_channels
self.in_size = np.broadcast_to(np.asarray(in_size), [2])
self.out_size = np.broadcast_to(np.asarray(out_size), [2])
self.in_sampling_rate = in_sampling_rate
self.out_sampling_rate = out_sampling_rate
self.tmp_sampling_rate = max(in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling)
self.in_cutoff = in_cutoff
self.out_cutoff = out_cutoff
self.in_half_width = in_half_width
self.out_half_width = out_half_width
self.conv_kernel = 1 if is_torgb else conv_kernel
self.conv_clamp = conv_clamp
self.magnitude_ema_beta = magnitude_ema_beta
# Setup parameters and buffers.
self.affine = FullyConnectedLayer(self.w_dim, self.in_channels, bias_init=1)
self.weight = torch.nn.Parameter(torch.randn([self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel]))
self.bias = torch.nn.Parameter(torch.zeros([self.out_channels]))
self.register_buffer('magnitude_ema', torch.ones([]))
# Design upsampling filter.
self.up_factor = int(np.rint(self.tmp_sampling_rate / self.in_sampling_rate))
assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate
self.up_taps = filter_size * self.up_factor if self.up_factor > 1 and not self.is_torgb else 1
self.register_buffer('up_filter', self.design_lowpass_filter(
numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate))
# Design downsampling filter.
self.down_factor = int(np.rint(self.tmp_sampling_rate / self.out_sampling_rate))
assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate
self.down_taps = filter_size * self.down_factor if self.down_factor > 1 and not self.is_torgb else 1
self.down_radial = use_radial_filters and not self.is_critically_sampled
self.register_buffer('down_filter', self.design_lowpass_filter(
numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial))
# Compute padding.
pad_total = (self.out_size - 1) * self.down_factor + 1 # Desired output size before downsampling.
pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor # Input size after upsampling.
pad_total += self.up_taps + self.down_taps - 2 # Size reduction caused by the filters.
pad_lo = (pad_total + self.up_factor) // 2 # Shift sample locations according to the symmetric interpretation (Appendix C.3).
pad_hi = pad_total - pad_lo
self.padding = [int(pad_lo[0]), int(pad_hi[0]), int(pad_lo[1]), int(pad_hi[1])]
def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False):
assert noise_mode in ['random', 'const', 'none'] # unused
misc.assert_shape(x, [None, self.in_channels, int(self.in_size[1]), int(self.in_size[0])])
misc.assert_shape(w, [x.shape[0], self.w_dim])
# Track input magnitude.
if update_emas:
with torch.autograd.profiler.record_function('update_magnitude_ema'):
magnitude_cur = x.detach().to(torch.float32).square().mean()
self.magnitude_ema.copy_(magnitude_cur.lerp(self.magnitude_ema, self.magnitude_ema_beta))
input_gain = self.magnitude_ema.rsqrt()
# Execute affine layer.
styles = self.affine(w)
if self.is_torgb:
weight_gain = 1 / np.sqrt(self.in_channels * (self.conv_kernel ** 2))
styles = styles * weight_gain
# Execute modulated conv2d.
dtype = torch.float16 if (self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32
x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles,
padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain)
# Execute bias, filtered leaky ReLU, and clamping.
gain = 1 if self.is_torgb else np.sqrt(2)
slope = 1 if self.is_torgb else 0.2
x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype),
up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp)
# Ensure correct shape and dtype.
misc.assert_shape(x, [None, self.out_channels, int(self.out_size[1]), int(self.out_size[0])])
assert x.dtype == dtype
return x
@staticmethod
def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False):
assert numtaps >= 1
# Identity filter.
if numtaps == 1:
return None
# Separable Kaiser low-pass filter.
if not radial:
f = scipy.signal.firwin(numtaps=numtaps, cutoff=cutoff, width=width, fs=fs)
return torch.as_tensor(f, dtype=torch.float32)
# Radially symmetric jinc-based filter.
x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs
r = np.hypot(*np.meshgrid(x, x))
f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r)
beta = scipy.signal.kaiser_beta(scipy.signal.kaiser_atten(numtaps, width / (fs / 2)))
w = np.kaiser(numtaps, beta)
f *= np.outer(w, w)
f /= np.sum(f)
return torch.as_tensor(f, dtype=torch.float32)
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},',
f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},',
f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},',
f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},',
f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},',
f'in_size={list(self.in_size)}, out_size={list(self.out_size)},',
f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class SynthesisNetwork(torch.nn.Module):
def __init__(self,
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output image resolution.
img_channels, # Number of color channels.
channel_base = 32768, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_layers = 14, # Total number of layers, excluding Fourier features and ToRGB.
num_critical = 2, # Number of critically sampled layers at the end.
first_cutoff = 2, # Cutoff frequency of the first layer (f_{c,0}).
first_stopband = 2**2.1, # Minimum stopband of the first layer (f_{t,0}).
last_stopband_rel = 2**0.3, # Minimum stopband of the last layer, expressed relative to the cutoff.
margin_size = 10, # Number of additional pixels outside the image.
output_scale = 0.25, # Scale factor for the output image.
num_fp16_res = 4, # Use FP16 for the N highest resolutions.
**layer_kwargs, # Arguments for SynthesisLayer.
):
super().__init__()
self.w_dim = w_dim
self.num_ws = num_layers + 2
self.img_resolution = img_resolution
self.img_channels = img_channels
self.num_layers = num_layers
self.num_critical = num_critical
self.margin_size = margin_size
self.output_scale = output_scale
self.num_fp16_res = num_fp16_res
# Geometric progression of layer cutoffs and min. stopbands.
last_cutoff = self.img_resolution / 2 # f_{c,N}
last_stopband = last_cutoff * last_stopband_rel # f_{t,N}
exponents = np.minimum(np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1)
cutoffs = first_cutoff * (last_cutoff / first_cutoff) ** exponents # f_c[i]
stopbands = first_stopband * (last_stopband / first_stopband) ** exponents # f_t[i]
# Compute remaining layer parameters.
sampling_rates = np.exp2(np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i]
half_widths = np.maximum(stopbands, sampling_rates / 2) - cutoffs # f_h[i]
sizes = sampling_rates + self.margin_size * 2
sizes[-2:] = self.img_resolution
channels = np.rint(np.minimum((channel_base / 2) / cutoffs, channel_max))
channels[-1] = self.img_channels
# Construct layers.
self.input = SynthesisInput(
w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]),
sampling_rate=sampling_rates[0], bandwidth=cutoffs[0])
self.layer_names = []
for idx in range(self.num_layers + 1):
prev = max(idx - 1, 0)
is_torgb = (idx == self.num_layers)
is_critically_sampled = (idx >= self.num_layers - self.num_critical)
use_fp16 = (sampling_rates[idx] * (2 ** self.num_fp16_res) > self.img_resolution)
layer = SynthesisLayer(
w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16,
in_channels=int(channels[prev]), out_channels= int(channels[idx]),
in_size=int(sizes[prev]), out_size=int(sizes[idx]),
in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]),
in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx],
in_half_width=half_widths[prev], out_half_width=half_widths[idx],
**layer_kwargs)
name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}'
setattr(self, name, layer)
self.layer_names.append(name)
def forward(self, ws, **layer_kwargs):
misc.assert_shape(ws, [None, self.num_ws, self.w_dim])
ws = ws.to(torch.float32).unbind(dim=1)
# Execute layers.
x = self.input(ws[0])
for name, w in zip(self.layer_names, ws[1:]):
x = getattr(self, name)(x, w, **layer_kwargs)
if self.output_scale != 1:
x = x * self.output_scale
# Ensure correct shape and dtype.
misc.assert_shape(x, [None, self.img_channels, self.img_resolution, self.img_resolution])
x = x.to(torch.float32)
return x
def extra_repr(self):
return '\n'.join([
f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},',
f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},',
f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},',
f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}'])
#----------------------------------------------------------------------------
@persistence.persistent_class
class Generator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
mapping_kwargs = {}, # Arguments for MappingNetwork.
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
return img
#----------------------------------------------------------------------------
| 26,322 | 49.816602 | 141 | py |
pix2pix3D | pix2pix3D-main/training/triplane_cond.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
import torch
from torch_utils import persistence
from training.networks_stylegan2 import SynthesisNetwork, FullyConnectedLayer, normalize_2nd_moment, DiscriminatorBlock
from training.triplane import OSGDecoder
from training.volumetric_rendering.renderer import ImportanceRenderer, ImportanceSemanticRenderer
from training.volumetric_rendering.ray_sampler import RaySampler
from training.networks_stylegan2 import Generator as StyleGAN2Backbone
import dnnlib
from torch_utils import misc
import numpy as np
from einops import repeat, rearrange
import math
import torch.nn.functional as F
# ------------------------------------------------------------------------------------------- #
@persistence.persistent_class
class EqualConv2d(torch.nn.Module):
def __init__(
self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True
):
super().__init__()
new_scale = 1.0
self.weight = torch.nn.Parameter(
torch.randn(out_channel, in_channel, kernel_size, kernel_size) * new_scale
)
self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2)
self.stride = stride
self.padding = padding
if bias:
self.bias = torch.nn.Parameter(torch.zeros(out_channel))
else:
self.bias = None
def forward(self, input):
out = F.conv2d(
input,
self.weight * self.scale,
bias=self.bias,
stride=self.stride,
padding=self.padding,
)
return out
def __repr__(self):
return (
f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},'
f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})'
)
# ------------------------------------------------------------------------------------------- #
@persistence.persistent_class
class Encoder(torch.nn.Module):
def __init__(self,
img_resolution, # Input resolution.
img_channels, # Number of input color channels.
bottleneck_factor = 2, # By default, the same as discriminator we use 4x4 features
architecture = 'resnet', # Architecture: 'orig', 'skip', 'resnet'.
channel_base = 1, # Overall multiplier for the number of channels.
channel_max = 512, # Maximum number of channels in any layer.
num_fp16_res = 0, # Use FP16 for the N highest resolutions.
conv_clamp = None, # Clamp the output of convolution layers to +-X, None = disable clamping
lowres_head = None, # add a low-resolution discriminator head
block_kwargs = {}, # Arguments for DiscriminatorBlock.
model_kwargs = {},
upsample_type = 'default',
progressive = False,
**unused
):
super().__init__()
self.img_resolution = img_resolution
self.img_resolution_log2 = int(np.log2(img_resolution))
self.img_channels = img_channels
self.block_resolutions = [2 ** i for i in range(self.img_resolution_log2, bottleneck_factor, -1)]
self.architecture = architecture
self.lowres_head = lowres_head
self.upsample_type = upsample_type
self.progressive = progressive
self.model_kwargs = model_kwargs
self.output_mode = model_kwargs.get('output_mode', 'styles')
if self.progressive:
assert self.architecture == 'skip', "not supporting other types for now."
self.predict_camera = model_kwargs.get('predict_camera', False)
channel_base = int(channel_base * 32768)
channels_dict = {res: min(channel_base // res, channel_max) for res in self.block_resolutions + [4]}
fp16_resolution = max(2 ** (self.img_resolution_log2 + 1 - num_fp16_res), 8)
common_kwargs = dict(img_channels=self.img_channels, architecture=architecture, conv_clamp=conv_clamp)
cur_layer_idx = 0
for res in self.block_resolutions:
in_channels = channels_dict[res] if res < img_resolution else 0
tmp_channels = channels_dict[res]
out_channels = channels_dict[res // 2]
use_fp16 = (res >= fp16_resolution)
block = DiscriminatorBlock(in_channels, tmp_channels, out_channels, resolution=res,
first_layer_idx=cur_layer_idx, use_fp16=use_fp16, **block_kwargs, **common_kwargs)
setattr(self, f'b{res}', block)
cur_layer_idx += block.num_layers
# this is an encoder
if self.output_mode in ['W', 'W+', 'None']:
self.num_ws = self.model_kwargs.get('num_ws', 0)
self.n_latents = self.num_ws if self.output_mode == 'W+' else (0 if self.output_mode == 'None' else 1)
self.w_dim = self.model_kwargs.get('w_dim', 512)
self.add_dim = self.model_kwargs.get('add_dim', 0) if not self.predict_camera else 9
self.out_dim = self.w_dim * self.n_latents + self.add_dim
assert self.out_dim > 0, 'output dimenstion has to be larger than 0'
assert self.block_resolutions[-1] // 2 == 4, "make sure the last resolution is 4x4"
self.projector = EqualConv2d(channels_dict[4], self.out_dim, 4, padding=0, bias=False)
else:
raise NotImplementedError
self.register_buffer("alpha", torch.scalar_tensor(-1))
def set_alpha(self, alpha):
if alpha is not None:
self.alpha.fill_(alpha)
def set_resolution(self, res):
self.curr_status = res
def get_block_resolutions(self, input_img):
block_resolutions = self.block_resolutions
lowres_head = self.lowres_head
alpha = self.alpha
img_res = input_img.size(-1)
if self.progressive and (self.lowres_head is not None) and (self.alpha > -1):
if (self.alpha < 1) and (self.alpha > 0):
try:
n_levels, _, before_res, target_res = self.curr_status
alpha, index = math.modf(self.alpha * n_levels)
index = int(index)
except Exception as e: # TODO: this is a hack, better to save status as buffers.
before_res = target_res = img_res
if before_res == target_res:
# no upsampling was used in generator, do not increase the discriminator
alpha = 0
block_resolutions = [res for res in self.block_resolutions if res <= target_res]
lowres_head = before_res
elif self.alpha == 0:
block_resolutions = [res for res in self.block_resolutions if res <= lowres_head]
return block_resolutions, alpha, lowres_head
def forward(self, inputs, **block_kwargs):
if isinstance(inputs, dict):
img = inputs['img']
else:
img = inputs
block_resolutions, alpha, lowres_head = self.get_block_resolutions(img)
if img.size(-1) > block_resolutions[0]:
img = downsample(img, block_resolutions[0])
if self.progressive and (self.lowres_head is not None) and (self.alpha > -1) and (self.alpha < 1) and (alpha > 0):
img0 = downsample(img, img.size(-1) // 2)
x = None if (not self.progressive) or (block_resolutions[0] == self.img_resolution) \
else getattr(self, f'b{block_resolutions[0]}').fromrgb(img)
for res in block_resolutions:
block = getattr(self, f'b{res}')
if (lowres_head == res) and (self.alpha > -1) and (self.alpha < 1) and (alpha > 0):
if self.architecture == 'skip':
img = img * alpha + img0 * (1 - alpha)
if self.progressive:
x = x * alpha + block.fromrgb(img0) * (1 - alpha) # combine from img0
x, img = block(x, img, **block_kwargs)
outputs = {}
if self.output_mode in ['W', 'W+', 'None']:
out = self.projector(x)[:,:,0,0]
if self.predict_camera:
out, out_cam_9d = out[:, 9:], out[:, :9]
outputs['camera'] = camera_9d_to_16d(out_cam_9d)
if self.output_mode == 'W+':
out = rearrange(out, 'b (n s) -> b n s', n=self.num_ws, s=self.w_dim)
elif self.output_mode == 'W':
out = repeat(out, 'b s -> b n s', n=self.num_ws)
else:
out = None
outputs['ws'] = out
return outputs
#----------------------------------------------------------------------------
@persistence.persistent_class
class MaskMappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no labels.
in_resolution, # Input resolution.
in_channels, # Number of input channels.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.
one_hot = True,
**unused,
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.in_resolution = in_resolution
self.in_channels = in_channels
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
self.one_hot = one_hot
if embed_features is None:
embed_features = w_dim
if layer_features is None:
layer_features = w_dim
if c_dim == 0:
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
else:
features_list = [z_dim + embed_features * 2] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0: # project label condition
self.embed = FullyConnectedLayer(c_dim, embed_features)
self.embed_mask = Encoder(img_resolution=in_resolution, img_channels=in_channels, model_kwargs={'num_ws': 1, 'w_dim': embed_features, 'output_mode': 'W'})
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z=None, c=None, batch=None, truncation_psi=1, truncation_cutoff=None, update_emas=False, **unused_kwargs):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32)) # normalize z to sphere
# assert (batch['mask'].squeeze(1).long() >= 0).all()
if self.one_hot:
mask_one_hot = torch.nn.functional.one_hot(batch['mask'].squeeze(1).long(), self.in_channels).permute(0,3,1,2)
else:
mask_one_hot = batch['mask']
misc.assert_shape(mask_one_hot, [None, self.in_channels, self.in_resolution, self.in_resolution])
y = normalize_2nd_moment(self.embed_mask(mask_one_hot.to(torch.float32))['ws'].squeeze(1))
misc.assert_shape(y, [None, self.w_dim])
x = torch.cat([x.contiguous(), y.contiguous()], dim=1) if x is not None else y
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
c_embed = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, c_embed], dim=1) if x is not None else c_embed
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and update_emas:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class MaskMappingNetwork_disentangle(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no labels.
in_resolution, # Input resolution.
in_channels, # Number of input channels.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.
one_hot = True,
**unused,
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.in_resolution = in_resolution
self.in_channels = in_channels
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
self.one_hot = one_hot
self.geometry_layer = 7
if embed_features is None:
embed_features = w_dim
if layer_features is None:
layer_features = w_dim
if c_dim == 0:
features_list = [z_dim] + [layer_features] * (num_layers - 1) + [w_dim]
else:
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0: # project label condition
self.embed = FullyConnectedLayer(c_dim, embed_features)
self.embed_mask = Encoder(img_resolution=in_resolution, img_channels=in_channels, model_kwargs={'num_ws': self.geometry_layer, 'w_dim': w_dim, 'output_mode': 'W+'})
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([num_ws, w_dim]))
def forward(self, z=None, c=None, batch=None, truncation_psi=1, truncation_cutoff=None, update_emas=False, **unused_kwargs):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32)) # normalize z to sphere
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
c_embed = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, c_embed], dim=1) if x is not None else c_embed
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Geometry Code from Mask
misc.assert_shape(batch['mask'], [z.shape[0], 1, None, None])
if self.one_hot:
mask_one_hot = torch.nn.functional.one_hot(batch['mask'].squeeze(1).long(), self.in_channels).permute(0,3,1,2)
else:
mask_one_hot = batch['mask']
misc.assert_shape(mask_one_hot, [z.shape[0], self.in_channels, self.in_resolution, self.in_resolution])
y = self.embed_mask(mask_one_hot.to(torch.float32))['ws'] # B x 7 x w_dim
misc.assert_shape(y, [None, self.geometry_layer, self.w_dim])
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws - self.geometry_layer, 1])
x = torch.cat([y, x], dim=1)
# Update moving average of W.
if self.w_avg_beta is not None and update_emas:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class EdgeMappingNetwork(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no labels.
in_resolution, # Input resolution.
in_channels, # Number of input channels.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.
**unused,
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.in_resolution = in_resolution
self.in_channels = in_channels
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
if embed_features is None:
embed_features = w_dim
if layer_features is None:
layer_features = w_dim
if c_dim == 0:
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
else:
features_list = [z_dim + embed_features * 2] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0: # project label condition
self.embed = FullyConnectedLayer(c_dim, embed_features)
self.embed_edge = Encoder(img_resolution=in_resolution, img_channels=in_channels, model_kwargs={'num_ws': 1, 'w_dim': embed_features, 'output_mode': 'W'})
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([w_dim]))
def forward(self, z=None, c=None, batch=None, truncation_psi=1, truncation_cutoff=None, update_emas=False, **unused_kwargs):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32)) # normalize z to shpere
# mask_one_hot = torch.nn.functional.one_hot(batch['mask'].squeeze(1).long(), self.in_channels).permute(0,3,1,2)
edge = batch['mask'].to(torch.float32)
misc.assert_shape(edge, [None, self.in_channels, self.in_resolution, self.in_resolution])
y = normalize_2nd_moment(self.embed_edge(edge)['ws'].squeeze(1))
misc.assert_shape(y, [None, self.w_dim])
x = torch.cat([x.contiguous(), y.contiguous()], dim=1) if x is not None else y
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
c_embed = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, c_embed], dim=1) if x is not None else c_embed
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Update moving average of W.
if self.w_avg_beta is not None and update_emas:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws, 1])
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
#----------------------------------------------------------------------------
@persistence.persistent_class
class EdgeMappingNetwork_disentangle(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality, 0 = no latent.
c_dim, # Conditioning label (C) dimensionality, 0 = no labels.
in_resolution, # Input resolution.
in_channels, # Number of input channels.
w_dim, # Intermediate latent (W) dimensionality.
num_ws, # Number of intermediate latents to output, None = do not broadcast.
num_layers = 8, # Number of mapping layers.
embed_features = None, # Label embedding dimensionality, None = same as w_dim.
layer_features = None, # Number of intermediate features in the mapping layers, None = same as w_dim.
activation = 'lrelu', # Activation function: 'relu', 'lrelu', etc.
lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers.
w_avg_beta = 0.995, # Decay for tracking the moving average of W during training, None = do not track.
**unused,
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.in_resolution = in_resolution
self.in_channels = in_channels
self.w_dim = w_dim
self.num_ws = num_ws
self.num_layers = num_layers
self.w_avg_beta = w_avg_beta
self.geometry_layer = 7
if embed_features is None:
embed_features = w_dim
if layer_features is None:
layer_features = w_dim
if c_dim == 0:
features_list = [z_dim] + [layer_features] * (num_layers - 1) + [w_dim]
else:
features_list = [z_dim + embed_features] + [layer_features] * (num_layers - 1) + [w_dim]
if c_dim > 0: # project label condition
self.embed = FullyConnectedLayer(c_dim, embed_features)
self.embed_mask = Encoder(img_resolution=in_resolution, img_channels=in_channels, model_kwargs={'num_ws': self.geometry_layer, 'w_dim': w_dim, 'output_mode': 'W+'})
for idx in range(num_layers):
in_features = features_list[idx]
out_features = features_list[idx + 1]
layer = FullyConnectedLayer(in_features, out_features, activation=activation, lr_multiplier=lr_multiplier)
setattr(self, f'fc{idx}', layer)
if num_ws is not None and w_avg_beta is not None:
self.register_buffer('w_avg', torch.zeros([num_ws, w_dim]))
def forward(self, z=None, c=None, batch=None, truncation_psi=1, truncation_cutoff=None, update_emas=False, **unused_kwargs):
# Embed, normalize, and concat inputs.
x = None
with torch.autograd.profiler.record_function('input'):
if self.z_dim > 0:
misc.assert_shape(z, [None, self.z_dim])
x = normalize_2nd_moment(z.to(torch.float32)) # normalize z to sphere
if self.c_dim > 0:
misc.assert_shape(c, [None, self.c_dim])
c_embed = normalize_2nd_moment(self.embed(c.to(torch.float32)))
x = torch.cat([x, c_embed], dim=1) if x is not None else c_embed
# Main layers.
for idx in range(self.num_layers):
layer = getattr(self, f'fc{idx}')
x = layer(x)
# Geometry Code from Mask
misc.assert_shape(batch['mask'], [z.shape[0], 1, None, None])
edge = batch['mask'].to(torch.float32)
misc.assert_shape(edge, [z.shape[0], self.in_channels, self.in_resolution, self.in_resolution])
y = self.embed_mask(edge.to(torch.float32))['ws'] # B x 7 x w_dim
misc.assert_shape(y, [None, self.geometry_layer, self.w_dim])
# Broadcast.
if self.num_ws is not None:
with torch.autograd.profiler.record_function('broadcast'):
x = x.unsqueeze(1).repeat([1, self.num_ws - self.geometry_layer, 1])
x = torch.cat([y, x], dim=1)
# Update moving average of W.
if self.w_avg_beta is not None and update_emas:
with torch.autograd.profiler.record_function('update_w_avg'):
self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta))
# Apply truncation.
if truncation_psi != 1:
with torch.autograd.profiler.record_function('truncate'):
assert self.w_avg_beta is not None
if self.num_ws is None or truncation_cutoff is None:
x = self.w_avg.lerp(x, truncation_psi)
else:
x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi)
return x
# ----------------------------------------------------------------------------
@persistence.persistent_class
class Generator_cond(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
mapping_kwargs = {}, # Arguments for MappingNetwork.
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim = z_dim
self.c_dim = c_dim
self.w_dim = w_dim
self.img_resolution = img_resolution
self.img_channels = img_channels
self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs)
self.num_ws = self.synthesis.num_ws
# self.mapping = MaskMappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs)
self.mapping = dnnlib.util.construct_class_by_name(**mapping_kwargs, z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws)
def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
return img
#----------------------------------------------------------------------------
@persistence.persistent_class
class TriPlaneGenerator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.img_resolution=img_resolution
self.img_channels=img_channels
self.renderer = ImportanceRenderer()
self.ray_sampler = RaySampler()
self.backbone = Generator_cond(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
self.superresolution = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module'], channels=32, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], **sr_kwargs)
self.decoder = OSGDecoder(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32})
self.neural_rendering_resolution = 64
self.rendering_kwargs = rendering_kwargs
self._last_planes = None
def mapping(self, z, c, batch, truncation_psi=1, truncation_cutoff=None, update_emas=False):
if self.rendering_kwargs['c_gen_conditioning_zero']:
c = torch.zeros_like(c)
return self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
def synthesis(self, ws, c, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
cam2world_matrix = c[:, :16].view(-1, 4, 4)
intrinsics = c[:, 16:25].view(-1, 3, 3)
if neural_rendering_resolution is None:
neural_rendering_resolution = self.neural_rendering_resolution
else:
self.neural_rendering_resolution = neural_rendering_resolution
# Create a batch of rays for volume rendering
ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution)
# Create triplanes by running StyleGAN backbone
N, M, _ = ray_origins.shape
if use_cached_backbone and self._last_planes is not None:
planes = self._last_planes
else:
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
if cache_backbone:
self._last_planes = planes
# Reshape output into three 32-channel planes
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
# Perform volume rendering
feature_samples, depth_samples, weights_samples = self.renderer(planes, self.decoder, ray_origins, ray_directions, self.rendering_kwargs) # channels last
# Reshape into 'raw' neural-rendered image
H = W = self.neural_rendering_resolution
feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous()
depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W)
# Run superresolution to get final image
rgb_image = feature_image[:, :3]
sr_image = self.superresolution(rgb_image, feature_image, ws, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'})
return {'image': sr_image, 'image_raw': rgb_image, 'image_depth': depth_image}
def sample(self, coordinates, directions, z, c, batch, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes.
ws = self.mapping(z, batch['pose'], batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def sample_mixed(self, coordinates, directions, ws, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Same as sample, but expects latent vectors 'ws' instead of Gaussian noise 'z'
planes = self.backbone.synthesis(ws, update_emas = update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def forward(self, z, c, batch, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
# Render a batch of generated images.
ws = self.mapping(z, batch['pose'], batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
return self.synthesis(ws, c, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, **synthesis_kwargs)
#----------------------------------------------------------------------------
@persistence.persistent_class
class TriPlaneSemanticGenerator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
semantic_channels, # Number of semantic channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
data_type = None,
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.img_resolution=img_resolution
self.img_channels=img_channels
self.semantic_channels=semantic_channels
self.data_type = data_type
self.renderer = ImportanceSemanticRenderer()
self.ray_sampler = RaySampler()
self.backbone = StyleGAN2Backbone(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
# mapping_semantic_kwargs = mapping_kwargs.copy()
# mapping_semantic_kwargs['z_dim'] = 0
self.backbone_semantic = Generator_cond(0, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
self.superresolution = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module'], channels=32, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], **sr_kwargs)
self.superresolution_semantic = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module_semantic'], channels=32, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], semantic_channels=semantic_channels, **sr_kwargs)
self.decoder = OSGDecoder(64, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32, 'sigmoid': True})
self.decoder_semantic = OSGDecoder_semantic(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32, 'sigmoid': True if semantic_channels == 1 else False})
self.neural_rendering_resolution = 64
self.rendering_kwargs = rendering_kwargs
self._last_planes = None
def mapping(self, z, c, batch, truncation_psi=1, truncation_cutoff=None, update_emas=False):
if self.rendering_kwargs['c_gen_conditioning_zero']:
c = torch.zeros_like(c)
ws_texture = self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
ws_semantic = self.backbone_semantic.mapping(None, c * self.rendering_kwargs.get('c_scale', 0), batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
return torch.cat([ws_texture, ws_semantic], dim=-1)
def synthesis(self, ws, c, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
cam2world_matrix = c[:, :16].view(-1, 4, 4)
intrinsics = c[:, 16:25].view(-1, 3, 3)
if neural_rendering_resolution is None:
neural_rendering_resolution = self.neural_rendering_resolution
else:
self.neural_rendering_resolution = neural_rendering_resolution
# Create a batch of rays for volume rendering
ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution)
# Create triplanes by running StyleGAN backbone
# N, M, _ = ray_origins.shape
# if use_cached_backbone and self._last_planes is not None:
# planes = self._last_planes
# else:
# planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
# if cache_backbone:
# self._last_planes = planes
N, M, _ = ray_origins.shape
assert ws.shape[-1] == self.w_dim * 2
ws_texture, ws_semantic = ws[..., :self.w_dim], ws[..., self.w_dim:]
planes_texture = self.backbone.synthesis(ws_texture, update_emas=update_emas, **synthesis_kwargs)
planes_semantic = self.backbone_semantic.synthesis(ws_semantic, update_emas=update_emas, **synthesis_kwargs)
# Reshape output into three 32-channel planes
planes_texture = planes_texture.view(len(planes_texture), 3, 32, planes_texture.shape[-2], planes_texture.shape[-1])
planes_semantic = planes_semantic.view(len(planes_semantic), 3, 32, planes_semantic.shape[-2], planes_semantic.shape[-1])
# Perform volume rendering
feature_samples, depth_samples, weights_samples = self.renderer(planes_texture, planes_semantic, self.decoder, self.decoder_semantic, ray_origins, ray_directions, self.rendering_kwargs) # channels last
# Reshape into 'raw' neural-rendered image
H = W = self.neural_rendering_resolution
feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous()
depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W)
rgb_feature_image, semantics_feature_image = feature_image[:, :feature_image.shape[1] // 2], feature_image[:, feature_image.shape[1] // 2:]
# Run superresolution to get final image
rgb_image = rgb_feature_image[:, :3]
sr_image = self.superresolution(rgb_image, rgb_feature_image, ws_texture, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'})
semantic_image = semantics_feature_image[:, :self.semantic_channels]
sr_semantic_image = self.superresolution_semantic(semantic_image, semantics_feature_image, ws_semantic, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'})
return {'image': sr_image, 'image_raw': rgb_image, 'image_depth': depth_image, 'semantic': sr_semantic_image, 'semantic_raw': semantic_image}
def sample(self, coordinates, directions, z, c, batch, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes.
ws = self.mapping(z, batch['pose'], batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
assert ws.shape[-1] == self.w_dim * 2
ws_texture, ws_semantic = ws[..., :self.w_dim], ws[..., self.w_dim:]
planes_texture = self.backbone.synthesis(ws_texture, update_emas=update_emas, **synthesis_kwargs)
planes_semantic = self.backbone_semantic.synthesis(ws_semantic, update_emas=update_emas, **synthesis_kwargs)
# Reshape output into three 32-channel planes
planes_texture = planes_texture.view(len(planes_texture), 3, 32, planes_texture.shape[-2], planes_texture.shape[-1])
planes_semantic = planes_semantic.view(len(planes_semantic), 3, 32, planes_semantic.shape[-2], planes_semantic.shape[-1])
return self.renderer.run_model(planes_texture, planes_semantic, self.decoder, self.decoder_semantic, coordinates, directions, self.rendering_kwargs)
def sample_mixed(self, coordinates, directions, ws, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Same as sample, but expects latent vectors 'ws' instead of Gaussian noise 'z'
assert ws.shape[-1] == self.w_dim * 2
ws_texture, ws_semantic = ws[..., :self.w_dim], ws[..., self.w_dim:]
planes_texture = self.backbone.synthesis(ws_texture, update_emas=update_emas, **synthesis_kwargs)
planes_semantic = self.backbone_semantic.synthesis(ws_semantic, update_emas=update_emas, **synthesis_kwargs)
# Reshape output into three 32-channel planes
planes_texture = planes_texture.view(len(planes_texture), 3, 32, planes_texture.shape[-2], planes_texture.shape[-1])
planes_semantic = planes_semantic.view(len(planes_semantic), 3, 32, planes_semantic.shape[-2], planes_semantic.shape[-1])
return self.renderer.run_model(planes_texture, planes_semantic, self.decoder, self.decoder_semantic, coordinates, directions, self.rendering_kwargs)
def forward(self, z, c, batch, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
# Render a batch of generated images.
ws = self.mapping(z, batch['pose'], batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
return self.synthesis(ws, c, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, **synthesis_kwargs)
# ----------------------------------------------------------------------------
class OSGDecoder_semantic(torch.nn.Module):
def __init__(self, n_features, options):
super().__init__()
self.hidden_dim = 64
self.net = torch.nn.Sequential(
FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']),
torch.nn.Softplus(),
FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], lr_multiplier=options['decoder_lr_mul'])
)
self.final_sigmoid = options['sigmoid']
def forward(self, sampled_features, ray_directions):
# Aggregate features
sampled_features = sampled_features.mean(1)
x = sampled_features
N, M, C = x.shape
x = x.view(N*M, C)
x = self.net(x)
x = x.view(N, M, -1)
if self.final_sigmoid:
rgb = torch.sigmoid(x[..., 1:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
else:
rgb = x[..., 1:]
sigma = x[..., 0:1]
return {'rgb': rgb, 'sigma': sigma}
#----------------------------------------------------------------------------
class OSGDecoder_semantic_entangle(torch.nn.Module):
def __init__(self, n_features, options):
super().__init__()
self.hidden_dim = 64
self.net = torch.nn.Sequential(
FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']),
torch.nn.Softplus(),
FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], lr_multiplier=options['decoder_lr_mul'])
)
self.feature_sigmoid = options['sigmoid']
self.semantic_channels = options['semantic_channels']
def forward(self, sampled_features, ray_directions):
# Aggregate features
sampled_features = sampled_features.mean(1)
x = sampled_features
N, M, C = x.shape
x = x.view(N*M, C)
x = self.net(x)
x = x.view(N, M, -1)
if self.feature_sigmoid:
feature = torch.sigmoid(x[..., 1:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
else:
rgb = torch.sigmoid(x[..., 1:4])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
semantic = x[..., 4:4+self.semantic_channels]
feature = torch.sigmoid(x[..., 4+self.semantic_channels:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
feature = torch.cat((rgb, semantic, feature), dim=-1)
# feature = x[..., 1:]
sigma = x[..., 0:1]
return {'rgb': feature, 'sigma': sigma}
class OSGDecoder_semantic_lateSeparate(torch.nn.Module):
def __init__(self, n_features, options):
super().__init__()
self.hidden_dim = 64
self.net = torch.nn.Sequential(
FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']),
torch.nn.Softplus(),
FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], lr_multiplier=options['decoder_lr_mul'])
)
self.net_semantic = torch.nn.Sequential(
FullyConnectedLayer(n_features, self.hidden_dim, lr_multiplier=options['decoder_lr_mul']),
torch.nn.Softplus(),
FullyConnectedLayer(self.hidden_dim, 1 + options['decoder_output_dim'], lr_multiplier=options['decoder_lr_mul'])
)
self.semantic_sigmoid = options['sigmoid']
# self.semantic_channels = options['semantic_channels']
def forward(self, sampled_features, ray_directions):
# Aggregate features
sampled_features = sampled_features.mean(1)
x = sampled_features
N, M, C = x.shape
x = x.view(N*M, C)
rgb = self.net(x)
semantic = self.net_semantic(x)
rgb = rgb.view(N, M, -1)
semantic = semantic.view(N, M, -1)
sigma = semantic[..., 0:1]
rgb = torch.sigmoid(rgb[..., 1:])*(1 + 2*0.001) - 0.001 # Uses sigmoid clamping from MipNeRF
if self.semantic_sigmoid:
semantic = torch.sigmoid(semantic[..., 1:])*(1 + 2*0.001) - 0.001
else:
semantic = semantic[..., 1:]
feature = torch.cat((rgb, semantic), dim=-1)
# feature = x[..., 1:]
return {'rgb': feature, 'sigma': sigma}
#----------------------------------------------------------------------------
@persistence.persistent_class
class TriPlaneSemanticEntangleGenerator(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
semantic_channels, # Number of semantic channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
data_type = None,
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.img_resolution=img_resolution
self.img_channels=img_channels
self.semantic_channels=semantic_channels
self.data_type = data_type
self.renderer = ImportanceRenderer()
self.ray_sampler = RaySampler()
self.backbone = Generator_cond(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
self.superresolution = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module'], channels=32, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], **sr_kwargs)
self.superresolution_semantic = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module_semantic'], channels=32, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], semantic_channels=semantic_channels, **sr_kwargs)
self.decoder = OSGDecoder_semantic_lateSeparate(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32, 'sigmoid': True if semantic_channels == 1 else False, 'semantic_channels': semantic_channels})
# self.decoder_semantic = OSGDecoder_semantic(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32, 'sigmoid': True if semantic_channels == 1 else False})
self.neural_rendering_resolution = 64
self.rendering_kwargs = rendering_kwargs
self._last_planes = None
def mapping(self, z, c, batch, truncation_psi=1, truncation_cutoff=None, update_emas=False):
if self.rendering_kwargs['c_gen_conditioning_zero']:
c = torch.zeros_like(c)
return self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
def synthesis(self, ws, c, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
cam2world_matrix = c[:, :16].view(-1, 4, 4)
intrinsics = c[:, 16:25].view(-1, 3, 3)
if neural_rendering_resolution is None:
neural_rendering_resolution = self.neural_rendering_resolution
else:
self.neural_rendering_resolution = neural_rendering_resolution
# Create a batch of rays for volume rendering
ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution)
# Create triplanes by running StyleGAN backbone
N, M, _ = ray_origins.shape
if use_cached_backbone and self._last_planes is not None:
planes = self._last_planes
else:
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
if cache_backbone:
self._last_planes = planes
# Reshape output into three 32-channel planes
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
# Perform volume rendering
feature_samples, depth_samples, weights_samples = self.renderer(planes, self.decoder, ray_origins, ray_directions, self.rendering_kwargs) # channels last
# Reshape into 'raw' neural-rendered image
H = W = self.neural_rendering_resolution
feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous()
depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W)
rgb_feature_image, semantics_feature_image = feature_image[:, :feature_image.shape[1] // 2], feature_image[:, feature_image.shape[1] // 2:]
# Run superresolution to get final image
rgb_image = rgb_feature_image[:, :3]
sr_image = self.superresolution(rgb_image, rgb_feature_image, ws, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'})
semantic_image = semantics_feature_image[:, :self.semantic_channels]
sr_semantic_image = self.superresolution_semantic(semantic_image, semantics_feature_image, ws, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'})
return {'image': sr_image, 'image_raw': rgb_image, 'image_depth': depth_image, 'semantic': sr_semantic_image, 'semantic_raw': semantic_image}
def sample(self, coordinates, directions, z, c, batch, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes.
ws = self.mapping(z, batch['pose'], batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def sample_mixed(self, coordinates, directions, ws, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Same as sample, but expects latent vectors 'ws' instead of Gaussian noise 'z'
planes = self.backbone.synthesis(ws, update_emas = update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def forward(self, z, c, batch, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
# Render a batch of generated images.
ws = self.mapping(z, batch['pose'], batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
return self.synthesis(ws, c, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, **synthesis_kwargs)
# ----------------------------------------------------------------------------
@persistence.persistent_class
class TriPlaneSemanticEntangleGenerator_withBG(torch.nn.Module):
def __init__(self,
z_dim, # Input latent (Z) dimensionality.
c_dim, # Conditioning label (C) dimensionality.
w_dim, # Intermediate latent (W) dimensionality.
img_resolution, # Output resolution.
img_channels, # Number of output color channels.
semantic_channels, # Number of semantic channels.
sr_num_fp16_res = 0,
mapping_kwargs = {}, # Arguments for MappingNetwork.
rendering_kwargs = {},
sr_kwargs = {},
data_type = None,
**synthesis_kwargs, # Arguments for SynthesisNetwork.
):
super().__init__()
self.z_dim=z_dim
self.c_dim=c_dim
self.w_dim=w_dim
self.img_resolution=img_resolution
self.img_channels=img_channels
self.semantic_channels=semantic_channels
self.data_type = data_type
self.renderer = ImportanceRenderer()
self.ray_sampler = RaySampler()
self.backbone = Generator_cond(z_dim, c_dim, w_dim, img_resolution=256, img_channels=32*3, mapping_kwargs=mapping_kwargs, **synthesis_kwargs)
mapping_bg_kwargs = mapping_kwargs.copy()
mapping_bg_kwargs['class_name'] = None
self.backbone_bg = StyleGAN2Backbone(z_dim, 0, w_dim, img_resolution=256, img_channels=32*2, mapping_kwargs=mapping_bg_kwargs, **synthesis_kwargs)
self.superresolution = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module'], channels=32, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], **sr_kwargs)
self.superresolution_semantic = dnnlib.util.construct_class_by_name(class_name=rendering_kwargs['superresolution_module_semantic'], channels=32, img_resolution=img_resolution, sr_num_fp16_res=sr_num_fp16_res, sr_antialias=rendering_kwargs['sr_antialias'], semantic_channels=semantic_channels, **sr_kwargs)
self.decoder = OSGDecoder_semantic_lateSeparate(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32, 'sigmoid': True if semantic_channels == 1 else False, 'semantic_channels': semantic_channels})
# self.decoder_semantic = OSGDecoder_semantic(32, {'decoder_lr_mul': rendering_kwargs.get('decoder_lr_mul', 1), 'decoder_output_dim': 32, 'sigmoid': True if semantic_channels == 1 else False})
self.neural_rendering_resolution = 64
self.rendering_kwargs = rendering_kwargs
self._last_planes = None
def mapping(self, z, c, batch, truncation_psi=1, truncation_cutoff=None, update_emas=False):
if self.rendering_kwargs['c_gen_conditioning_zero']:
c = torch.zeros_like(c)
return self.backbone.mapping(z, c * self.rendering_kwargs.get('c_scale', 0), batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
def synthesis(self, ws, c, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
cam2world_matrix = c[:, :16].view(-1, 4, 4)
intrinsics = c[:, 16:25].view(-1, 3, 3)
if neural_rendering_resolution is None:
neural_rendering_resolution = self.neural_rendering_resolution
else:
self.neural_rendering_resolution = neural_rendering_resolution
# Create a batch of rays for volume rendering
ray_origins, ray_directions = self.ray_sampler(cam2world_matrix, intrinsics, neural_rendering_resolution)
# Create triplanes by running StyleGAN backbone
N, M, _ = ray_origins.shape
if use_cached_backbone and self._last_planes is not None:
planes = self._last_planes
else:
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
if cache_backbone:
self._last_planes = planes
# Reshape output into three 32-channel planes
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
# Perform volume rendering
feature_samples, depth_samples, weights_samples = self.renderer(planes, self.decoder, ray_origins, ray_directions, self.rendering_kwargs) # channels last
# Create background plane
ws_bg = ws[:,-1,:].unsqueeze(1).repeat([1, ws.shape[1], 1])
planes_bg = self.backbone_bg.synthesis(ws_bg, update_emas=update_emas, **synthesis_kwargs)
planes_bg = planes_bg.view(len(planes_bg), 64, planes_bg.shape[-2], planes_bg.shape[-1])
# Combine foreground and background
feature_samples, depth_samples = self.combine_fg_bg(feature_samples, depth_samples, weights_samples, planes_bg, ray_origins, ray_directions, self.rendering_kwargs)
# Reshape into 'raw' neural-rendered image
H = W = self.neural_rendering_resolution
feature_image = feature_samples.permute(0, 2, 1).reshape(N, feature_samples.shape[-1], H, W).contiguous()
depth_image = depth_samples.permute(0, 2, 1).reshape(N, 1, H, W)
weight_image = weights_samples.permute(0, 2, 1).reshape(N, 1, H, W)
rgb_feature_image, semantics_feature_image = feature_image[:, :feature_image.shape[1] // 2], feature_image[:, feature_image.shape[1] // 2:]
# Run superresolution to get final image
rgb_image = rgb_feature_image[:, :3]
sr_image = self.superresolution(rgb_image, rgb_feature_image, ws, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'})
semantic_image = semantics_feature_image[:, :self.semantic_channels]
sr_semantic_image = self.superresolution_semantic(semantic_image, semantics_feature_image, ws, noise_mode=self.rendering_kwargs['superresolution_noise_mode'], **{k:synthesis_kwargs[k] for k in synthesis_kwargs.keys() if k != 'noise_mode'})
return {'image': sr_image, 'image_raw': rgb_image, 'image_depth': depth_image, 'semantic': sr_semantic_image, 'semantic_raw': semantic_image, 'weight': weight_image}
def sample(self, coordinates, directions, z, c, batch, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Compute RGB features, density for arbitrary 3D coordinates. Mostly used for extracting shapes.
ws = self.mapping(z, batch['pose'], batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
planes = self.backbone.synthesis(ws, update_emas=update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def sample_mixed(self, coordinates, directions, ws, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs):
# Same as sample, but expects latent vectors 'ws' instead of Gaussian noise 'z'
planes = self.backbone.synthesis(ws, update_emas = update_emas, **synthesis_kwargs)
planes = planes.view(len(planes), 3, 32, planes.shape[-2], planes.shape[-1])
return self.renderer.run_model(planes, self.decoder, coordinates, directions, self.rendering_kwargs)
def forward(self, z, c, batch, truncation_psi=1, truncation_cutoff=None, neural_rendering_resolution=None, update_emas=False, cache_backbone=False, use_cached_backbone=False, **synthesis_kwargs):
# Render a batch of generated images.
ws = self.mapping(z, batch['pose'], batch, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas)
return self.synthesis(ws, c, update_emas=update_emas, neural_rendering_resolution=neural_rendering_resolution, cache_backbone=cache_backbone, use_cached_backbone=use_cached_backbone, **synthesis_kwargs)
def combine_fg_bg(self, feature_samples, depth_samples, weights_samples, planes_bg, ray_origins, ray_directions, rendering_kwargs):
# Combine foreground and background
# feature_samples: [N, M, 64]
# depth_samples: [N, M, 1]
# weights_samples: [N, M, 1]
# planes_bg: [N, 64, H, W]
# ray_origins: [N, M, 3]
# ray_directions: [N, M, 3]
# rendering_kwargs: dict
# Convert ray directions to spherical coordinates
ray_directions = ray_directions / torch.norm(ray_directions, dim=-1, keepdim=True)
theta = torch.atan2(ray_directions[:, :, 1], ray_directions[:, :, 0])
phi = torch.acos(ray_directions[:, :, 2])
# Convert spherical coordinates to pixel coordinates (-1 to 1)
x = theta * 2 / np.pi
y = phi * 2 / np.pi - 1
# Sample background planes
feature_samples_bg = F.grid_sample(planes_bg, torch.stack([x, y], dim=-1).unsqueeze(1), mode='bilinear', padding_mode='border') # [N, 64, 1, M]
feature_samples_bg = feature_samples_bg.squeeze(2).permute(0, 2, 1) # [N, M, 64]
assert feature_samples_bg.shape == feature_samples.shape
# Sigmoid the rgb features and bound the semantic features
feature_samples_bg = torch.sigmoid(feature_samples_bg)*(1 + 2*0.001) - 0.001
feature_samples_bg = feature_samples_bg * 2 - 1 # [-1, 1]
feature_samples_bg[:,:,32:] = feature_samples_bg[:,:,32:] * 10 # [-10, 10]
if self.semantic_channels > 1:
# Hardcode the background semantic class to 0
feature_samples_bg[:, :, 32+1:32+self.semantic_channels] = 0
feature_samples_bg[:, :, 32] = 20
# feature_samples[:, :, 32] = 0
# Combine foreground and background
feature_samples = feature_samples + feature_samples_bg * (1 - weights_samples)
depth_samples_bg = torch.ones_like(depth_samples) * rendering_kwargs['ray_end']
depth_samples = depth_samples + depth_samples_bg * (1 - weights_samples)
return feature_samples, depth_samples
# ---------------------------------------------------------------------------- | 68,256 | 53.6056 | 313 | py |
pix2pix3D | pix2pix3D-main/training/volumetric_rendering/renderer.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
The renderer is a module that takes in rays, decides where to sample along each
ray, and computes pixel colors using the volume rendering equation.
"""
import math
import torch
import torch.nn as nn
from training.volumetric_rendering.ray_marcher import MipRayMarcher2
from training.volumetric_rendering import math_utils
def generate_planes():
"""
Defines planes by the three vectors that form the "axes" of the
plane. Should work with arbitrary number of planes and planes of
arbitrary orientation.
"""
return torch.tensor([[[1, 0, 0],
[0, 1, 0],
[0, 0, 1]],
[[1, 0, 0],
[0, 0, 1],
[0, 1, 0]],
[[0, 0, 1],
[1, 0, 0],
[0, 1, 0]]], dtype=torch.float32)
def project_onto_planes(planes, coordinates):
"""
Does a projection of a 3D point onto a batch of 2D planes,
returning 2D plane coordinates.
Takes plane axes of shape n_planes, 3, 3
# Takes coordinates of shape N, M, 3
# returns projections of shape N*n_planes, M, 2
"""
N, M, C = coordinates.shape
n_planes, _, _ = planes.shape
coordinates = coordinates.unsqueeze(1).expand(-1, n_planes, -1, -1).reshape(N*n_planes, M, 3)
inv_planes = torch.linalg.inv(planes).unsqueeze(0).expand(N, -1, -1, -1).reshape(N*n_planes, 3, 3)
projections = torch.bmm(coordinates, inv_planes)
return projections[..., :2]
def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear', padding_mode='zeros', box_warp=None):
assert padding_mode == 'zeros'
N, n_planes, C, H, W = plane_features.shape
_, M, _ = coordinates.shape
plane_features = plane_features.view(N*n_planes, C, H, W)
coordinates = (2/box_warp) * coordinates # TODO: add specific box bounds
projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)
output_features = torch.nn.functional.grid_sample(plane_features, projected_coordinates.float(), mode=mode, padding_mode=padding_mode, align_corners=False).permute(0, 3, 2, 1).reshape(N, n_planes, M, C)
return output_features
def sample_from_3dgrid(grid, coordinates):
"""
Expects coordinates in shape (batch_size, num_points_per_batch, 3)
Expects grid in shape (1, channels, H, W, D)
(Also works if grid has batch size)
Returns sampled features of shape (batch_size, num_points_per_batch, feature_channels)
"""
batch_size, n_coords, n_dims = coordinates.shape
sampled_features = torch.nn.functional.grid_sample(grid.expand(batch_size, -1, -1, -1, -1),
coordinates.reshape(batch_size, 1, 1, -1, n_dims),
mode='bilinear', padding_mode='zeros', align_corners=False)
N, C, H, W, D = sampled_features.shape
sampled_features = sampled_features.permute(0, 4, 3, 2, 1).reshape(N, H*W*D, C)
return sampled_features
class ImportanceRenderer(torch.nn.Module):
def __init__(self):
super().__init__()
self.ray_marcher = MipRayMarcher2()
self.plane_axes = generate_planes()
def forward(self, planes, decoder, ray_origins, ray_directions, rendering_options):
self.plane_axes = self.plane_axes.to(ray_origins.device)
if rendering_options['ray_start'] == rendering_options['ray_end'] == 'auto':
ray_start, ray_end = math_utils.get_ray_limits_box(ray_origins, ray_directions, box_side_length=rendering_options['box_warp'])
is_ray_valid = ray_end > ray_start
if torch.any(is_ray_valid).item():
ray_start[~is_ray_valid] = ray_start[is_ray_valid].min()
ray_end[~is_ray_valid] = ray_start[is_ray_valid].max()
depths_coarse = self.sample_stratified(ray_origins, ray_start, ray_end, rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])
else:
# Create stratified depth samples
depths_coarse = self.sample_stratified(ray_origins, rendering_options['ray_start'], rendering_options['ray_end'], rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])
batch_size, num_rays, samples_per_ray, _ = depths_coarse.shape
# Coarse Pass
sample_coordinates = (ray_origins.unsqueeze(-2) + depths_coarse * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)
sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, samples_per_ray, -1).reshape(batch_size, -1, 3)
out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options)
colors_coarse = out['rgb']
densities_coarse = out['sigma']
colors_coarse = colors_coarse.reshape(batch_size, num_rays, samples_per_ray, colors_coarse.shape[-1])
densities_coarse = densities_coarse.reshape(batch_size, num_rays, samples_per_ray, 1)
# Fine Pass
N_importance = rendering_options['depth_resolution_importance']
if N_importance > 0:
_, _, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)
depths_fine = self.sample_importance(depths_coarse, weights, N_importance)
sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, N_importance, -1).reshape(batch_size, -1, 3)
sample_coordinates = (ray_origins.unsqueeze(-2) + depths_fine * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)
out = self.run_model(planes, decoder, sample_coordinates, sample_directions, rendering_options)
colors_fine = out['rgb']
densities_fine = out['sigma']
colors_fine = colors_fine.reshape(batch_size, num_rays, N_importance, colors_fine.shape[-1])
densities_fine = densities_fine.reshape(batch_size, num_rays, N_importance, 1)
all_depths, all_colors, all_densities = self.unify_samples(depths_coarse, colors_coarse, densities_coarse,
depths_fine, colors_fine, densities_fine)
# Aggregate
rgb_final, depth_final, weights = self.ray_marcher(all_colors, all_densities, all_depths, rendering_options)
else:
rgb_final, depth_final, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)
return rgb_final, depth_final, weights.sum(2)
def run_model(self, planes, decoder, sample_coordinates, sample_directions, options):
sampled_features = sample_from_planes(self.plane_axes, planes, sample_coordinates, padding_mode='zeros', box_warp=options['box_warp'])
out = decoder(sampled_features, sample_directions)
if options.get('density_noise', 0) > 0:
out['sigma'] += torch.randn_like(out['sigma']) * options['density_noise']
return out
def sort_samples(self, all_depths, all_colors, all_densities):
_, indices = torch.sort(all_depths, dim=-2)
all_depths = torch.gather(all_depths, -2, indices)
all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))
all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))
return all_depths, all_colors, all_densities
def unify_samples(self, depths1, colors1, densities1, depths2, colors2, densities2):
all_depths = torch.cat([depths1, depths2], dim = -2)
all_colors = torch.cat([colors1, colors2], dim = -2)
all_densities = torch.cat([densities1, densities2], dim = -2)
_, indices = torch.sort(all_depths, dim=-2)
all_depths = torch.gather(all_depths, -2, indices)
all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))
all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))
return all_depths, all_colors, all_densities
def sample_stratified(self, ray_origins, ray_start, ray_end, depth_resolution, disparity_space_sampling=False):
"""
Return depths of approximately uniformly spaced samples along rays.
"""
N, M, _ = ray_origins.shape
if disparity_space_sampling:
depths_coarse = torch.linspace(0,
1,
depth_resolution,
device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)
depth_delta = 1/(depth_resolution - 1)
depths_coarse += torch.rand_like(depths_coarse) * depth_delta
depths_coarse = 1./(1./ray_start * (1. - depths_coarse) + 1./ray_end * depths_coarse)
else:
if type(ray_start) == torch.Tensor:
depths_coarse = math_utils.linspace(ray_start, ray_end, depth_resolution).permute(1,2,0,3)
depth_delta = (ray_end - ray_start) / (depth_resolution - 1)
depths_coarse += torch.rand_like(depths_coarse) * depth_delta[..., None]
else:
depths_coarse = torch.linspace(ray_start, ray_end, depth_resolution, device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)
depth_delta = (ray_end - ray_start)/(depth_resolution - 1)
depths_coarse += torch.rand_like(depths_coarse) * depth_delta
return depths_coarse
def sample_importance(self, z_vals, weights, N_importance):
"""
Return depths of importance sampled points along rays. See NeRF importance sampling for more.
"""
with torch.no_grad():
batch_size, num_rays, samples_per_ray, _ = z_vals.shape
z_vals = z_vals.reshape(batch_size * num_rays, samples_per_ray)
weights = weights.reshape(batch_size * num_rays, -1) # -1 to account for loss of 1 sample in MipRayMarcher
# smooth weights
weights = torch.nn.functional.max_pool1d(weights.unsqueeze(1).float(), 2, 1, padding=1)
weights = torch.nn.functional.avg_pool1d(weights, 2, 1).squeeze()
weights = weights + 0.01
z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:])
importance_z_vals = self.sample_pdf(z_vals_mid, weights[:, 1:-1],
N_importance).detach().reshape(batch_size, num_rays, N_importance, 1)
return importance_z_vals
def sample_pdf(self, bins, weights, N_importance, det=False, eps=1e-5):
"""
Sample @N_importance samples from @bins with distribution defined by @weights.
Inputs:
bins: (N_rays, N_samples_+1) where N_samples_ is "the number of coarse samples per ray - 2"
weights: (N_rays, N_samples_)
N_importance: the number of samples to draw from the distribution
det: deterministic or not
eps: a small number to prevent division by zero
Outputs:
samples: the sampled samples
"""
N_rays, N_samples_ = weights.shape
weights = weights + eps # prevent division by zero (don't do inplace op!)
pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_)
cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function
cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1)
# padded to 0~1 inclusive
if det:
u = torch.linspace(0, 1, N_importance, device=bins.device)
u = u.expand(N_rays, N_importance)
else:
u = torch.rand(N_rays, N_importance, device=bins.device)
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.clamp_min(inds-1, 0)
above = torch.clamp_max(inds, N_samples_)
inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance)
cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2)
bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2)
denom = cdf_g[...,1]-cdf_g[...,0]
denom[denom<eps] = 1 # denom equals 0 means a bin has weight 0, in which case it will not be sampled
# anyway, therefore any value for it is fine (set to 1 here)
samples = bins_g[...,0] + (u-cdf_g[...,0])/denom * (bins_g[...,1]-bins_g[...,0])
return samples
class ImportanceSemanticRenderer(torch.nn.Module):
def __init__(self):
super().__init__()
self.ray_marcher = MipRayMarcher2()
self.plane_axes = generate_planes()
def forward(self, planes_texture, planes_semantic, decoder_texture, decoder_semantic, ray_origins, ray_directions, rendering_options):
self.plane_axes = self.plane_axes.to(ray_origins.device)
if rendering_options['ray_start'] == rendering_options['ray_end'] == 'auto':
ray_start, ray_end = math_utils.get_ray_limits_box(ray_origins, ray_directions, box_side_length=rendering_options['box_warp'])
is_ray_valid = ray_end > ray_start
if torch.any(is_ray_valid).item():
ray_start[~is_ray_valid] = ray_start[is_ray_valid].min()
ray_end[~is_ray_valid] = ray_start[is_ray_valid].max()
depths_coarse = self.sample_stratified(ray_origins, ray_start, ray_end, rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])
else:
# Create stratified depth samples
depths_coarse = self.sample_stratified(ray_origins, rendering_options['ray_start'], rendering_options['ray_end'], rendering_options['depth_resolution'], rendering_options['disparity_space_sampling'])
batch_size, num_rays, samples_per_ray, _ = depths_coarse.shape
# Coarse Pass
sample_coordinates = (ray_origins.unsqueeze(-2) + depths_coarse * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)
sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, samples_per_ray, -1).reshape(batch_size, -1, 3)
out = self.run_model(planes_texture, planes_semantic, decoder_texture, decoder_semantic, sample_coordinates, sample_directions, rendering_options)
colors_coarse = out['rgb']
densities_coarse = out['sigma']
semantics_coarse = out['semantic']
colors_coarse = colors_coarse.reshape(batch_size, num_rays, samples_per_ray, colors_coarse.shape[-1])
densities_coarse = densities_coarse.reshape(batch_size, num_rays, samples_per_ray, 1)
semantics_coarse = semantics_coarse.reshape(batch_size, num_rays, samples_per_ray, semantics_coarse.shape[-1])
features_coarse = torch.cat([colors_coarse, semantics_coarse], -1)
# Fine Pass
N_importance = rendering_options['depth_resolution_importance']
if N_importance > 0:
_, _, weights = self.ray_marcher(colors_coarse, densities_coarse, depths_coarse, rendering_options)
depths_fine = self.sample_importance(depths_coarse, weights, N_importance)
sample_directions = ray_directions.unsqueeze(-2).expand(-1, -1, N_importance, -1).reshape(batch_size, -1, 3)
sample_coordinates = (ray_origins.unsqueeze(-2) + depths_fine * ray_directions.unsqueeze(-2)).reshape(batch_size, -1, 3)
out = self.run_model(planes_texture, planes_semantic, decoder_texture, decoder_semantic, sample_coordinates, sample_directions, rendering_options)
colors_fine = out['rgb']
densities_fine = out['sigma']
semantics_fine = out['semantic']
colors_fine = colors_fine.reshape(batch_size, num_rays, N_importance, colors_fine.shape[-1])
densities_fine = densities_fine.reshape(batch_size, num_rays, N_importance, 1)
semantics_fine = semantics_fine.reshape(batch_size, num_rays, N_importance, semantics_fine.shape[-1])
features_fine = torch.cat([colors_fine, semantics_fine], -1)
all_depths, all_features, all_densities = self.unify_samples(depths_coarse, features_coarse, densities_coarse,
depths_fine, features_fine, densities_fine)
# Aggregate
feature_final, depth_final, weights = self.ray_marcher(all_features, all_densities, all_depths, rendering_options)
else:
feature_final, depth_final, weights = self.ray_marcher(features_coarse, densities_coarse, depths_coarse, rendering_options)
return feature_final, depth_final, weights.sum(2)
def run_model(self, planes_texture, planes_semantic, decoder_texture, decoder_semantic, sample_coordinates, sample_directions, options):
sampled_features_texture = sample_from_planes(self.plane_axes, planes_texture, sample_coordinates, padding_mode='zeros', box_warp=options['box_warp'])
sampled_features_semantic = sample_from_planes(self.plane_axes, planes_semantic, sample_coordinates, padding_mode='zeros', box_warp=options['box_warp'])
out_semantic = decoder_semantic(sampled_features_semantic, sample_directions)
out_texture = decoder_texture(torch.cat([sampled_features_texture, sampled_features_semantic], dim=-1), sample_directions)
out = {'sigma': out_semantic['sigma'], 'rgb': out_texture['rgb'], 'semantic': out_semantic['rgb']}
if options.get('density_noise', 0) > 0:
out['sigma'] += torch.randn_like(out['sigma']) * options['density_noise']
return out
def sort_samples(self, all_depths, all_colors, all_densities):
_, indices = torch.sort(all_depths, dim=-2)
all_depths = torch.gather(all_depths, -2, indices)
all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))
all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))
return all_depths, all_colors, all_densities
def unify_samples(self, depths1, colors1, densities1, depths2, colors2, densities2):
all_depths = torch.cat([depths1, depths2], dim = -2)
all_colors = torch.cat([colors1, colors2], dim = -2)
all_densities = torch.cat([densities1, densities2], dim = -2)
_, indices = torch.sort(all_depths, dim=-2)
all_depths = torch.gather(all_depths, -2, indices)
all_colors = torch.gather(all_colors, -2, indices.expand(-1, -1, -1, all_colors.shape[-1]))
all_densities = torch.gather(all_densities, -2, indices.expand(-1, -1, -1, 1))
return all_depths, all_colors, all_densities
def sample_stratified(self, ray_origins, ray_start, ray_end, depth_resolution, disparity_space_sampling=False):
"""
Return depths of approximately uniformly spaced samples along rays.
"""
N, M, _ = ray_origins.shape
if disparity_space_sampling:
depths_coarse = torch.linspace(0,
1,
depth_resolution,
device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)
depth_delta = 1/(depth_resolution - 1)
depths_coarse += torch.rand_like(depths_coarse) * depth_delta
depths_coarse = 1./(1./ray_start * (1. - depths_coarse) + 1./ray_end * depths_coarse)
else:
if type(ray_start) == torch.Tensor:
depths_coarse = math_utils.linspace(ray_start, ray_end, depth_resolution).permute(1,2,0,3)
depth_delta = (ray_end - ray_start) / (depth_resolution - 1)
depths_coarse += torch.rand_like(depths_coarse) * depth_delta[..., None]
else:
depths_coarse = torch.linspace(ray_start, ray_end, depth_resolution, device=ray_origins.device).reshape(1, 1, depth_resolution, 1).repeat(N, M, 1, 1)
depth_delta = (ray_end - ray_start)/(depth_resolution - 1)
depths_coarse += torch.rand_like(depths_coarse) * depth_delta
return depths_coarse
def sample_importance(self, z_vals, weights, N_importance):
"""
Return depths of importance sampled points along rays. See NeRF importance sampling for more.
"""
with torch.no_grad():
batch_size, num_rays, samples_per_ray, _ = z_vals.shape
z_vals = z_vals.reshape(batch_size * num_rays, samples_per_ray)
weights = weights.reshape(batch_size * num_rays, -1) # -1 to account for loss of 1 sample in MipRayMarcher
# smooth weights
weights = torch.nn.functional.max_pool1d(weights.unsqueeze(1).float(), 2, 1, padding=1)
weights = torch.nn.functional.avg_pool1d(weights, 2, 1).squeeze()
weights = weights + 0.01
z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:])
importance_z_vals = self.sample_pdf(z_vals_mid, weights[:, 1:-1],
N_importance).detach().reshape(batch_size, num_rays, N_importance, 1)
return importance_z_vals
def sample_pdf(self, bins, weights, N_importance, det=False, eps=1e-5):
"""
Sample @N_importance samples from @bins with distribution defined by @weights.
Inputs:
bins: (N_rays, N_samples_+1) where N_samples_ is "the number of coarse samples per ray - 2"
weights: (N_rays, N_samples_)
N_importance: the number of samples to draw from the distribution
det: deterministic or not
eps: a small number to prevent division by zero
Outputs:
samples: the sampled samples
"""
N_rays, N_samples_ = weights.shape
weights = weights + eps # prevent division by zero (don't do inplace op!)
pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_)
cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function
cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1)
# padded to 0~1 inclusive
if det:
u = torch.linspace(0, 1, N_importance, device=bins.device)
u = u.expand(N_rays, N_importance)
else:
u = torch.rand(N_rays, N_importance, device=bins.device)
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.clamp_min(inds-1, 0)
above = torch.clamp_max(inds, N_samples_)
inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance)
cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2)
bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2)
denom = cdf_g[...,1]-cdf_g[...,0]
denom[denom<eps] = 1 # denom equals 0 means a bin has weight 0, in which case it will not be sampled
# anyway, therefore any value for it is fine (set to 1 here)
samples = bins_g[...,0] + (u-cdf_g[...,0])/denom * (bins_g[...,1]-bins_g[...,0])
return samples
| 23,948 | 53.553531 | 211 | py |
pix2pix3D | pix2pix3D-main/training/volumetric_rendering/ray_sampler.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
The ray sampler is a module that takes in camera matrices and resolution and batches of rays.
Expects cam2world matrices that use the OpenCV camera coordinate system conventions.
"""
import torch
class RaySampler(torch.nn.Module):
def __init__(self):
super().__init__()
self.ray_origins_h, self.ray_directions, self.depths, self.image_coords, self.rendering_options = None, None, None, None, None
def forward(self, cam2world_matrix, intrinsics, resolution):
"""
Create batches of rays and return origins and directions.
cam2world_matrix: (N, 4, 4)
intrinsics: (N, 3, 3)
resolution: int
ray_origins: (N, M, 3)
ray_dirs: (N, M, 2)
"""
N, M = cam2world_matrix.shape[0], resolution**2
cam_locs_world = cam2world_matrix[:, :3, 3]
fx = intrinsics[:, 0, 0]
fy = intrinsics[:, 1, 1]
cx = intrinsics[:, 0, 2]
cy = intrinsics[:, 1, 2]
sk = intrinsics[:, 0, 1]
uv = torch.stack(torch.meshgrid(torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), torch.arange(resolution, dtype=torch.float32, device=cam2world_matrix.device), indexing='ij')) * (1./resolution) + (0.5/resolution)
uv = uv.flip(0).reshape(2, -1).transpose(1, 0)
uv = uv.unsqueeze(0).repeat(cam2world_matrix.shape[0], 1, 1)
x_cam = uv[:, :, 0].view(N, -1)
y_cam = uv[:, :, 1].view(N, -1)
z_cam = torch.ones((N, M), device=cam2world_matrix.device)
x_lift = (x_cam - cx.unsqueeze(-1) + cy.unsqueeze(-1)*sk.unsqueeze(-1)/fy.unsqueeze(-1) - sk.unsqueeze(-1)*y_cam/fy.unsqueeze(-1)) / fx.unsqueeze(-1) * z_cam
y_lift = (y_cam - cy.unsqueeze(-1)) / fy.unsqueeze(-1) * z_cam
cam_rel_points = torch.stack((x_lift, y_lift, z_cam, torch.ones_like(z_cam)), dim=-1)
world_rel_points = torch.bmm(cam2world_matrix, cam_rel_points.permute(0, 2, 1)).permute(0, 2, 1)[:, :, :3]
ray_dirs = world_rel_points - cam_locs_world[:, None, :]
ray_dirs = torch.nn.functional.normalize(ray_dirs, dim=2)
ray_origins = cam_locs_world.unsqueeze(1).repeat(1, ray_dirs.shape[1], 1)
return ray_origins, ray_dirs | 2,783 | 43.190476 | 250 | py |
pix2pix3D | pix2pix3D-main/training/volumetric_rendering/ray_marcher.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""
The ray marcher takes the raw output of the implicit representation and uses the volume rendering equation to produce composited colors and depths.
Based off of the implementation in MipNeRF (this one doesn't do any cone tracing though!)
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class MipRayMarcher2(nn.Module):
def __init__(self):
super().__init__()
def run_forward(self, colors, densities, depths, rendering_options):
deltas = depths[:, :, 1:] - depths[:, :, :-1]
colors_mid = (colors[:, :, :-1] + colors[:, :, 1:]) / 2
densities_mid = (densities[:, :, :-1] + densities[:, :, 1:]) / 2
depths_mid = (depths[:, :, :-1] + depths[:, :, 1:]) / 2
if rendering_options['clamp_mode'] == 'softplus':
densities_mid = F.softplus(densities_mid - 1) # activation bias of -1 makes things initialize better
else:
assert False, "MipRayMarcher only supports `clamp_mode`=`softplus`!"
density_delta = densities_mid * deltas
alpha = 1 - torch.exp(-density_delta)
alpha_shifted = torch.cat([torch.ones_like(alpha[:, :, :1]), 1-alpha + 1e-10], -2)
weights = alpha * torch.cumprod(alpha_shifted, -2)[:, :, :-1]
composite_rgb = torch.sum(weights * colors_mid, -2)
weight_total = weights.sum(2)
composite_depth = torch.sum(weights * depths_mid, -2) / weight_total # Kangle: I don't think this is correct (normalized weights)
# clip the composite to min/max range of depths
composite_depth = torch.nan_to_num(composite_depth, float('inf'))
composite_depth = torch.clamp(composite_depth, torch.min(depths), torch.max(depths))
if rendering_options.get('white_back', False):
composite_rgb = composite_rgb + 1 - weight_total
composite_rgb = composite_rgb * 2 - 1 # Scale to (-1, 1)
return composite_rgb, composite_depth, weights
def forward(self, colors, densities, depths, rendering_options):
composite_rgb, composite_depth, weights = self.run_forward(colors, densities, depths, rendering_options)
return composite_rgb, composite_depth, weights | 2,747 | 42.619048 | 147 | py |
pix2pix3D | pix2pix3D-main/training/volumetric_rendering/math_utils.py | # MIT License
# Copyright (c) 2022 Petr Kellnhofer
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
def transform_vectors(matrix: torch.Tensor, vectors4: torch.Tensor) -> torch.Tensor:
"""
Left-multiplies MxM @ NxM. Returns NxM.
"""
res = torch.matmul(vectors4, matrix.T)
return res
def normalize_vecs(vectors: torch.Tensor) -> torch.Tensor:
"""
Normalize vector lengths.
"""
return vectors / (torch.norm(vectors, dim=-1, keepdim=True))
def torch_dot(x: torch.Tensor, y: torch.Tensor):
"""
Dot product of two tensors.
"""
return (x * y).sum(-1)
def get_ray_limits_box(rays_o: torch.Tensor, rays_d: torch.Tensor, box_side_length):
"""
Author: Petr Kellnhofer
Intersects rays with the [-1, 1] NDC volume.
Returns min and max distance of entry.
Returns -1 for no intersection.
https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
"""
o_shape = rays_o.shape
rays_o = rays_o.detach().reshape(-1, 3)
rays_d = rays_d.detach().reshape(-1, 3)
bb_min = [-1*(box_side_length/2), -1*(box_side_length/2), -1*(box_side_length/2)]
bb_max = [1*(box_side_length/2), 1*(box_side_length/2), 1*(box_side_length/2)]
bounds = torch.tensor([bb_min, bb_max], dtype=rays_o.dtype, device=rays_o.device)
is_valid = torch.ones(rays_o.shape[:-1], dtype=bool, device=rays_o.device)
# Precompute inverse for stability.
invdir = 1 / rays_d
sign = (invdir < 0).long()
# Intersect with YZ plane.
tmin = (bounds.index_select(0, sign[..., 0])[..., 0] - rays_o[..., 0]) * invdir[..., 0]
tmax = (bounds.index_select(0, 1 - sign[..., 0])[..., 0] - rays_o[..., 0]) * invdir[..., 0]
# Intersect with XZ plane.
tymin = (bounds.index_select(0, sign[..., 1])[..., 1] - rays_o[..., 1]) * invdir[..., 1]
tymax = (bounds.index_select(0, 1 - sign[..., 1])[..., 1] - rays_o[..., 1]) * invdir[..., 1]
# Resolve parallel rays.
is_valid[torch.logical_or(tmin > tymax, tymin > tmax)] = False
# Use the shortest intersection.
tmin = torch.max(tmin, tymin)
tmax = torch.min(tmax, tymax)
# Intersect with XY plane.
tzmin = (bounds.index_select(0, sign[..., 2])[..., 2] - rays_o[..., 2]) * invdir[..., 2]
tzmax = (bounds.index_select(0, 1 - sign[..., 2])[..., 2] - rays_o[..., 2]) * invdir[..., 2]
# Resolve parallel rays.
is_valid[torch.logical_or(tmin > tzmax, tzmin > tmax)] = False
# Use the shortest intersection.
tmin = torch.max(tmin, tzmin)
tmax = torch.min(tmax, tzmax)
# Mark invalid.
tmin[torch.logical_not(is_valid)] = -1
tmax[torch.logical_not(is_valid)] = -2
return tmin.reshape(*o_shape[:-1], 1), tmax.reshape(*o_shape[:-1], 1)
def linspace(start: torch.Tensor, stop: torch.Tensor, num: int):
"""
Creates a tensor of shape [num, *start.shape] whose values are evenly spaced from start to end, inclusive.
Replicates but the multi-dimensional bahaviour of numpy.linspace in PyTorch.
"""
# create a tensor of 'num' steps from 0 to 1
steps = torch.arange(num, dtype=torch.float32, device=start.device) / (num - 1)
# reshape the 'steps' tensor to [-1, *([1]*start.ndim)] to allow for broadcastings
# - using 'steps.reshape([-1, *([1]*start.ndim)])' would be nice here but torchscript
# "cannot statically infer the expected size of a list in this contex", hence the code below
for i in range(start.ndim):
steps = steps.unsqueeze(-1)
# the output starts at 'start' and increments until 'stop' in each dimension
out = start[None] + steps * (stop - start)[None]
return out
| 4,708 | 38.571429 | 124 | py |
pix2pix3D | pix2pix3D-main/torch_utils/custom_ops.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
import glob
import hashlib
import importlib
import os
import re
import shutil
import uuid
import torch
import torch.utils.cpp_extension
from torch.utils.file_baton import FileBaton
#----------------------------------------------------------------------------
# Global options.
verbosity = 'brief' # Verbosity level: 'none', 'brief', 'full'
#----------------------------------------------------------------------------
# Internal helper funcs.
def _find_compiler_bindir():
patterns = [
'C:/Program Files (x86)/Microsoft Visual Studio/*/Professional/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/BuildTools/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio/*/Community/VC/Tools/MSVC/*/bin/Hostx64/x64',
'C:/Program Files (x86)/Microsoft Visual Studio */vc/bin',
]
for pattern in patterns:
matches = sorted(glob.glob(pattern))
if len(matches):
return matches[-1]
return None
#----------------------------------------------------------------------------
def _get_mangled_gpu_name():
name = torch.cuda.get_device_name().lower()
out = []
for c in name:
if re.match('[a-z0-9_-]+', c):
out.append(c)
else:
out.append('-')
return ''.join(out)
#----------------------------------------------------------------------------
# Main entry point for compiling and loading C++/CUDA plugins.
_cached_plugins = dict()
def get_plugin(module_name, sources, headers=None, source_dir=None, **build_kwargs):
assert verbosity in ['none', 'brief', 'full']
if headers is None:
headers = []
if source_dir is not None:
sources = [os.path.join(source_dir, fname) for fname in sources]
headers = [os.path.join(source_dir, fname) for fname in headers]
# Already cached?
if module_name in _cached_plugins:
return _cached_plugins[module_name]
# Print status.
if verbosity == 'full':
print(f'Setting up PyTorch plugin "{module_name}"...')
elif verbosity == 'brief':
print(f'Setting up PyTorch plugin "{module_name}"... ', end='', flush=True)
verbose_build = (verbosity == 'full')
# Compile and load.
try: # pylint: disable=too-many-nested-blocks
# Make sure we can find the necessary compiler binaries.
if os.name == 'nt' and os.system("where cl.exe >nul 2>nul") != 0:
compiler_bindir = _find_compiler_bindir()
if compiler_bindir is None:
raise RuntimeError(f'Could not find MSVC/GCC/CLANG installation on this computer. Check _find_compiler_bindir() in "{__file__}".')
os.environ['PATH'] += ';' + compiler_bindir
# Some containers set TORCH_CUDA_ARCH_LIST to a list that can either
# break the build or unnecessarily restrict what's available to nvcc.
# Unset it to let nvcc decide based on what's available on the
# machine.
os.environ['TORCH_CUDA_ARCH_LIST'] = ''
# Incremental build md5sum trickery. Copies all the input source files
# into a cached build directory under a combined md5 digest of the input
# source files. Copying is done only if the combined digest has changed.
# This keeps input file timestamps and filenames the same as in previous
# extension builds, allowing for fast incremental rebuilds.
#
# This optimization is done only in case all the source files reside in
# a single directory (just for simplicity) and if the TORCH_EXTENSIONS_DIR
# environment variable is set (we take this as a signal that the user
# actually cares about this.)
#
# EDIT: We now do it regardless of TORCH_EXTENSIOS_DIR, in order to work
# around the *.cu dependency bug in ninja config.
#
all_source_files = sorted(sources + headers)
all_source_dirs = set(os.path.dirname(fname) for fname in all_source_files)
if len(all_source_dirs) == 1: # and ('TORCH_EXTENSIONS_DIR' in os.environ):
# Compute combined hash digest for all source files.
hash_md5 = hashlib.md5()
for src in all_source_files:
with open(src, 'rb') as f:
hash_md5.update(f.read())
# Select cached build directory name.
source_digest = hash_md5.hexdigest()
build_top_dir = torch.utils.cpp_extension._get_build_directory(module_name, verbose=verbose_build) # pylint: disable=protected-access
cached_build_dir = os.path.join(build_top_dir, f'{source_digest}-{_get_mangled_gpu_name()}')
if not os.path.isdir(cached_build_dir):
tmpdir = f'{build_top_dir}/srctmp-{uuid.uuid4().hex}'
os.makedirs(tmpdir)
for src in all_source_files:
shutil.copyfile(src, os.path.join(tmpdir, os.path.basename(src)))
try:
os.replace(tmpdir, cached_build_dir) # atomic
except OSError:
# source directory already exists, delete tmpdir and its contents.
shutil.rmtree(tmpdir)
if not os.path.isdir(cached_build_dir): raise
# Compile.
cached_sources = [os.path.join(cached_build_dir, os.path.basename(fname)) for fname in sources]
torch.utils.cpp_extension.load(name=module_name, build_directory=cached_build_dir,
verbose=verbose_build, sources=cached_sources, **build_kwargs)
else:
torch.utils.cpp_extension.load(name=module_name, verbose=verbose_build, sources=sources, **build_kwargs)
# Load.
module = importlib.import_module(module_name)
except:
if verbosity == 'brief':
print('Failed!')
raise
# Print status and add to cache dict.
if verbosity == 'full':
print(f'Done setting up PyTorch plugin "{module_name}".')
elif verbosity == 'brief':
print('Done.')
_cached_plugins[module_name] = module
return module
#----------------------------------------------------------------------------
| 6,780 | 41.38125 | 146 | py |
pix2pix3D | pix2pix3D-main/torch_utils/training_stats.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Facilities for reporting and collecting training statistics across
multiple processes and devices. The interface is designed to minimize
synchronization overhead as well as the amount of boilerplate in user
code."""
import re
import numpy as np
import torch
import dnnlib
from . import misc
#----------------------------------------------------------------------------
_num_moments = 3 # [num_scalars, sum_of_scalars, sum_of_squares]
_reduce_dtype = torch.float32 # Data type to use for initial per-tensor reduction.
_counter_dtype = torch.float64 # Data type to use for the internal counters.
_rank = 0 # Rank of the current process.
_sync_device = None # Device to use for multiprocess communication. None = single-process.
_sync_called = False # Has _sync() been called yet?
_counters = dict() # Running counters on each device, updated by report(): name => device => torch.Tensor
_cumulative = dict() # Cumulative counters on the CPU, updated by _sync(): name => torch.Tensor
#----------------------------------------------------------------------------
def init_multiprocessing(rank, sync_device):
r"""Initializes `torch_utils.training_stats` for collecting statistics
across multiple processes.
This function must be called after
`torch.distributed.init_process_group()` and before `Collector.update()`.
The call is not necessary if multi-process collection is not needed.
Args:
rank: Rank of the current process.
sync_device: PyTorch device to use for inter-process
communication, or None to disable multi-process
collection. Typically `torch.device('cuda', rank)`.
"""
global _rank, _sync_device
assert not _sync_called
_rank = rank
_sync_device = sync_device
#----------------------------------------------------------------------------
@misc.profiled_function
def report(name, value):
r"""Broadcasts the given set of scalars to all interested instances of
`Collector`, across device and process boundaries.
This function is expected to be extremely cheap and can be safely
called from anywhere in the training loop, loss function, or inside a
`torch.nn.Module`.
Warning: The current implementation expects the set of unique names to
be consistent across processes. Please make sure that `report()` is
called at least once for each unique name by each process, and in the
same order. If a given process has no scalars to broadcast, it can do
`report(name, [])` (empty list).
Args:
name: Arbitrary string specifying the name of the statistic.
Averages are accumulated separately for each unique name.
value: Arbitrary set of scalars. Can be a list, tuple,
NumPy array, PyTorch tensor, or Python scalar.
Returns:
The same `value` that was passed in.
"""
if name not in _counters:
_counters[name] = dict()
elems = torch.as_tensor(value)
if elems.numel() == 0:
return value
elems = elems.detach().flatten().to(_reduce_dtype)
moments = torch.stack([
torch.ones_like(elems).sum(),
elems.sum(),
elems.square().sum(),
])
assert moments.ndim == 1 and moments.shape[0] == _num_moments
moments = moments.to(_counter_dtype)
device = moments.device
if device not in _counters[name]:
_counters[name][device] = torch.zeros_like(moments)
_counters[name][device].add_(moments)
return value
#----------------------------------------------------------------------------
def report0(name, value):
r"""Broadcasts the given set of scalars by the first process (`rank = 0`),
but ignores any scalars provided by the other processes.
See `report()` for further details.
"""
report(name, value if _rank == 0 else [])
return value
#----------------------------------------------------------------------------
class Collector:
r"""Collects the scalars broadcasted by `report()` and `report0()` and
computes their long-term averages (mean and standard deviation) over
user-defined periods of time.
The averages are first collected into internal counters that are not
directly visible to the user. They are then copied to the user-visible
state as a result of calling `update()` and can then be queried using
`mean()`, `std()`, `as_dict()`, etc. Calling `update()` also resets the
internal counters for the next round, so that the user-visible state
effectively reflects averages collected between the last two calls to
`update()`.
Args:
regex: Regular expression defining which statistics to
collect. The default is to collect everything.
keep_previous: Whether to retain the previous averages if no
scalars were collected on a given round
(default: True).
"""
def __init__(self, regex='.*', keep_previous=True):
self._regex = re.compile(regex)
self._keep_previous = keep_previous
self._cumulative = dict()
self._moments = dict()
self.update()
self._moments.clear()
def names(self):
r"""Returns the names of all statistics broadcasted so far that
match the regular expression specified at construction time.
"""
return [name for name in _counters if self._regex.fullmatch(name)]
def update(self):
r"""Copies current values of the internal counters to the
user-visible state and resets them for the next round.
If `keep_previous=True` was specified at construction time, the
operation is skipped for statistics that have received no scalars
since the last update, retaining their previous averages.
This method performs a number of GPU-to-CPU transfers and one
`torch.distributed.all_reduce()`. It is intended to be called
periodically in the main training loop, typically once every
N training steps.
"""
if not self._keep_previous:
self._moments.clear()
for name, cumulative in _sync(self.names()):
if name not in self._cumulative:
self._cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
delta = cumulative - self._cumulative[name]
self._cumulative[name].copy_(cumulative)
if float(delta[0]) != 0:
self._moments[name] = delta
def _get_delta(self, name):
r"""Returns the raw moments that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
assert self._regex.fullmatch(name)
if name not in self._moments:
self._moments[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
return self._moments[name]
def num(self, name):
r"""Returns the number of scalars that were accumulated for the given
statistic between the last two calls to `update()`, or zero if
no scalars were collected.
"""
delta = self._get_delta(name)
return int(delta[0])
def mean(self, name):
r"""Returns the mean of the scalars that were accumulated for the
given statistic between the last two calls to `update()`, or NaN if
no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0:
return float('nan')
return float(delta[1] / delta[0])
def std(self, name):
r"""Returns the standard deviation of the scalars that were
accumulated for the given statistic between the last two calls to
`update()`, or NaN if no scalars were collected.
"""
delta = self._get_delta(name)
if int(delta[0]) == 0 or not np.isfinite(float(delta[1])):
return float('nan')
if int(delta[0]) == 1:
return float(0)
mean = float(delta[1] / delta[0])
raw_var = float(delta[2] / delta[0])
return np.sqrt(max(raw_var - np.square(mean), 0))
def as_dict(self):
r"""Returns the averages accumulated between the last two calls to
`update()` as an `dnnlib.EasyDict`. The contents are as follows:
dnnlib.EasyDict(
NAME = dnnlib.EasyDict(num=FLOAT, mean=FLOAT, std=FLOAT),
...
)
"""
stats = dnnlib.EasyDict()
for name in self.names():
stats[name] = dnnlib.EasyDict(num=self.num(name), mean=self.mean(name), std=self.std(name))
return stats
def __getitem__(self, name):
r"""Convenience getter.
`collector[name]` is a synonym for `collector.mean(name)`.
"""
return self.mean(name)
#----------------------------------------------------------------------------
def _sync(names):
r"""Synchronize the global cumulative counters across devices and
processes. Called internally by `Collector.update()`.
"""
if len(names) == 0:
return []
global _sync_called
_sync_called = True
# Collect deltas within current rank.
deltas = []
device = _sync_device if _sync_device is not None else torch.device('cpu')
for name in names:
delta = torch.zeros([_num_moments], dtype=_counter_dtype, device=device)
for counter in _counters[name].values():
delta.add_(counter.to(device))
counter.copy_(torch.zeros_like(counter))
deltas.append(delta)
deltas = torch.stack(deltas)
# Sum deltas across ranks.
if _sync_device is not None:
torch.distributed.all_reduce(deltas)
# Update cumulative values.
deltas = deltas.cpu()
for idx, name in enumerate(names):
if name not in _cumulative:
_cumulative[name] = torch.zeros([_num_moments], dtype=_counter_dtype)
_cumulative[name].add_(deltas[idx])
# Return name-value pairs.
return [(name, _cumulative[name]) for name in names]
#----------------------------------------------------------------------------
| 10,834 | 38.98155 | 118 | py |
pix2pix3D | pix2pix3D-main/torch_utils/persistence.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Facilities for pickling Python code alongside other data.
The pickled code is automatically imported into a separate Python module
during unpickling. This way, any previously exported pickles will remain
usable even if the original code is no longer available, or if the current
version of the code is not consistent with what was originally pickled."""
import sys
import pickle
import io
import inspect
import copy
import uuid
import types
import dnnlib
#----------------------------------------------------------------------------
_version = 6 # internal version number
_decorators = set() # {decorator_class, ...}
_import_hooks = [] # [hook_function, ...]
_module_to_src_dict = dict() # {module: src, ...}
_src_to_module_dict = dict() # {src: module, ...}
#----------------------------------------------------------------------------
def persistent_class(orig_class):
r"""Class decorator that extends a given class to save its source code
when pickled.
Example:
from torch_utils import persistence
@persistence.persistent_class
class MyNetwork(torch.nn.Module):
def __init__(self, num_inputs, num_outputs):
super().__init__()
self.fc = MyLayer(num_inputs, num_outputs)
...
@persistence.persistent_class
class MyLayer(torch.nn.Module):
...
When pickled, any instance of `MyNetwork` and `MyLayer` will save its
source code alongside other internal state (e.g., parameters, buffers,
and submodules). This way, any previously exported pickle will remain
usable even if the class definitions have been modified or are no
longer available.
The decorator saves the source code of the entire Python module
containing the decorated class. It does *not* save the source code of
any imported modules. Thus, the imported modules must be available
during unpickling, also including `torch_utils.persistence` itself.
It is ok to call functions defined in the same module from the
decorated class. However, if the decorated class depends on other
classes defined in the same module, they must be decorated as well.
This is illustrated in the above example in the case of `MyLayer`.
It is also possible to employ the decorator just-in-time before
calling the constructor. For example:
cls = MyLayer
if want_to_make_it_persistent:
cls = persistence.persistent_class(cls)
layer = cls(num_inputs, num_outputs)
As an additional feature, the decorator also keeps track of the
arguments that were used to construct each instance of the decorated
class. The arguments can be queried via `obj.init_args` and
`obj.init_kwargs`, and they are automatically pickled alongside other
object state. A typical use case is to first unpickle a previous
instance of a persistent class, and then upgrade it to use the latest
version of the source code:
with open('old_pickle.pkl', 'rb') as f:
old_net = pickle.load(f)
new_net = MyNetwork(*old_obj.init_args, **old_obj.init_kwargs)
misc.copy_params_and_buffers(old_net, new_net, require_all=True)
"""
assert isinstance(orig_class, type)
if is_persistent(orig_class):
return orig_class
assert orig_class.__module__ in sys.modules
orig_module = sys.modules[orig_class.__module__]
orig_module_src = _module_to_src(orig_module)
class Decorator(orig_class):
_orig_module_src = orig_module_src
_orig_class_name = orig_class.__name__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._init_args = copy.deepcopy(args)
self._init_kwargs = copy.deepcopy(kwargs)
assert orig_class.__name__ in orig_module.__dict__
_check_pickleable(self.__reduce__())
@property
def init_args(self):
return copy.deepcopy(self._init_args)
@property
def init_kwargs(self):
return dnnlib.EasyDict(copy.deepcopy(self._init_kwargs))
def __reduce__(self):
fields = list(super().__reduce__())
fields += [None] * max(3 - len(fields), 0)
if fields[0] is not _reconstruct_persistent_obj:
meta = dict(type='class', version=_version, module_src=self._orig_module_src, class_name=self._orig_class_name, state=fields[2])
fields[0] = _reconstruct_persistent_obj # reconstruct func
fields[1] = (meta,) # reconstruct args
fields[2] = None # state dict
return tuple(fields)
Decorator.__name__ = orig_class.__name__
_decorators.add(Decorator)
return Decorator
#----------------------------------------------------------------------------
def is_persistent(obj):
r"""Test whether the given object or class is persistent, i.e.,
whether it will save its source code when pickled.
"""
try:
if obj in _decorators:
return True
except TypeError:
pass
return type(obj) in _decorators # pylint: disable=unidiomatic-typecheck
#----------------------------------------------------------------------------
def import_hook(hook):
r"""Register an import hook that is called whenever a persistent object
is being unpickled. A typical use case is to patch the pickled source
code to avoid errors and inconsistencies when the API of some imported
module has changed.
The hook should have the following signature:
hook(meta) -> modified meta
`meta` is an instance of `dnnlib.EasyDict` with the following fields:
type: Type of the persistent object, e.g. `'class'`.
version: Internal version number of `torch_utils.persistence`.
module_src Original source code of the Python module.
class_name: Class name in the original Python module.
state: Internal state of the object.
Example:
@persistence.import_hook
def wreck_my_network(meta):
if meta.class_name == 'MyNetwork':
print('MyNetwork is being imported. I will wreck it!')
meta.module_src = meta.module_src.replace("True", "False")
return meta
"""
assert callable(hook)
_import_hooks.append(hook)
#----------------------------------------------------------------------------
def _reconstruct_persistent_obj(meta):
r"""Hook that is called internally by the `pickle` module to unpickle
a persistent object.
"""
meta = dnnlib.EasyDict(meta)
meta.state = dnnlib.EasyDict(meta.state)
for hook in _import_hooks:
meta = hook(meta)
assert meta is not None
assert meta.version == _version
module = _src_to_module(meta.module_src)
assert meta.type == 'class'
orig_class = module.__dict__[meta.class_name]
decorator_class = persistent_class(orig_class)
obj = decorator_class.__new__(decorator_class)
setstate = getattr(obj, '__setstate__', None)
if callable(setstate):
setstate(meta.state) # pylint: disable=not-callable
else:
obj.__dict__.update(meta.state)
return obj
#----------------------------------------------------------------------------
def _module_to_src(module):
r"""Query the source code of a given Python module.
"""
src = _module_to_src_dict.get(module, None)
if src is None:
src = inspect.getsource(module)
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
return src
def _src_to_module(src):
r"""Get or create a Python module for the given source code.
"""
module = _src_to_module_dict.get(src, None)
if module is None:
module_name = "_imported_module_" + uuid.uuid4().hex
module = types.ModuleType(module_name)
sys.modules[module_name] = module
_module_to_src_dict[module] = src
_src_to_module_dict[src] = module
exec(src, module.__dict__) # pylint: disable=exec-used
return module
#----------------------------------------------------------------------------
def _check_pickleable(obj):
r"""Check that the given object is pickleable, raising an exception if
it is not. This function is expected to be considerably more efficient
than actually pickling the object.
"""
def recurse(obj):
if isinstance(obj, (list, tuple, set)):
return [recurse(x) for x in obj]
if isinstance(obj, dict):
return [[recurse(x), recurse(y)] for x, y in obj.items()]
if isinstance(obj, (str, int, float, bool, bytes, bytearray)):
return None # Python primitive types are pickleable.
if f'{type(obj).__module__}.{type(obj).__name__}' in ['numpy.ndarray', 'torch.Tensor', 'torch.nn.parameter.Parameter']:
return None # NumPy arrays and PyTorch tensors are pickleable.
if is_persistent(obj):
return None # Persistent objects are pickleable, by virtue of the constructor check.
return obj
with io.BytesIO() as f:
pickle.dump(recurse(obj), f)
#----------------------------------------------------------------------------
| 9,866 | 37.846457 | 144 | py |
pix2pix3D | pix2pix3D-main/torch_utils/misc.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
import re
import contextlib
import numpy as np
import torch
import warnings
import dnnlib
#----------------------------------------------------------------------------
# Cached construction of constant tensors. Avoids CPU=>GPU copy when the
# same constant is used multiple times.
_constant_cache = dict()
def constant(value, shape=None, dtype=None, device=None, memory_format=None):
value = np.asarray(value)
if shape is not None:
shape = tuple(shape)
if dtype is None:
dtype = torch.get_default_dtype()
if device is None:
device = torch.device('cpu')
if memory_format is None:
memory_format = torch.contiguous_format
key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format)
tensor = _constant_cache.get(key, None)
if tensor is None:
tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device)
if shape is not None:
tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape))
tensor = tensor.contiguous(memory_format=memory_format)
_constant_cache[key] = tensor
return tensor
#----------------------------------------------------------------------------
# Replace NaN/Inf with specified numerical values.
try:
nan_to_num = torch.nan_to_num # 1.8.0a0
except AttributeError:
def nan_to_num(input, nan=0.0, posinf=None, neginf=None, *, out=None): # pylint: disable=redefined-builtin
assert isinstance(input, torch.Tensor)
if posinf is None:
posinf = torch.finfo(input.dtype).max
if neginf is None:
neginf = torch.finfo(input.dtype).min
assert nan == 0
return torch.clamp(input.unsqueeze(0).nansum(0), min=neginf, max=posinf, out=out)
#----------------------------------------------------------------------------
# Symbolic assert.
try:
symbolic_assert = torch._assert # 1.8.0a0 # pylint: disable=protected-access
except AttributeError:
symbolic_assert = torch.Assert # 1.7.0
#----------------------------------------------------------------------------
# Context manager to temporarily suppress known warnings in torch.jit.trace().
# Note: Cannot use catch_warnings because of https://bugs.python.org/issue29672
@contextlib.contextmanager
def suppress_tracer_warnings():
flt = ('ignore', None, torch.jit.TracerWarning, None, 0)
warnings.filters.insert(0, flt)
yield
warnings.filters.remove(flt)
#----------------------------------------------------------------------------
# Assert that the shape of a tensor matches the given list of integers.
# None indicates that the size of a dimension is allowed to vary.
# Performs symbolic assertion when used in torch.jit.trace().
def assert_shape(tensor, ref_shape):
if tensor.ndim != len(ref_shape):
raise AssertionError(f'Wrong number of dimensions: got {tensor.ndim}, expected {len(ref_shape)}')
for idx, (size, ref_size) in enumerate(zip(tensor.shape, ref_shape)):
if ref_size is None:
pass
elif isinstance(ref_size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(torch.as_tensor(size), ref_size), f'Wrong size for dimension {idx}')
elif isinstance(size, torch.Tensor):
with suppress_tracer_warnings(): # as_tensor results are registered as constants
symbolic_assert(torch.equal(size, torch.as_tensor(ref_size)), f'Wrong size for dimension {idx}: expected {ref_size}')
elif size != ref_size:
raise AssertionError(f'Wrong size for dimension {idx}: got {size}, expected {ref_size}')
#----------------------------------------------------------------------------
# Function decorator that calls torch.autograd.profiler.record_function().
def profiled_function(fn):
def decorator(*args, **kwargs):
with torch.autograd.profiler.record_function(fn.__name__):
return fn(*args, **kwargs)
decorator.__name__ = fn.__name__
return decorator
#----------------------------------------------------------------------------
# Sampler for torch.utils.data.DataLoader that loops over the dataset
# indefinitely, shuffling items as it goes.
class InfiniteSampler(torch.utils.data.Sampler):
def __init__(self, dataset, rank=0, num_replicas=1, shuffle=True, seed=0, window_size=0.5):
assert len(dataset) > 0
assert num_replicas > 0
assert 0 <= rank < num_replicas
assert 0 <= window_size <= 1
super().__init__(dataset)
self.dataset = dataset
self.rank = rank
self.num_replicas = num_replicas
self.shuffle = shuffle
self.seed = seed
self.window_size = window_size
def __iter__(self):
order = np.arange(len(self.dataset))
rnd = None
window = 0
if self.shuffle:
rnd = np.random.RandomState(self.seed)
rnd.shuffle(order)
window = int(np.rint(order.size * self.window_size))
idx = 0
while True:
i = idx % order.size
if idx % self.num_replicas == self.rank:
yield order[i]
if window >= 2:
j = (i - rnd.randint(window)) % order.size
order[i], order[j] = order[j], order[i]
idx += 1
#----------------------------------------------------------------------------
# Utilities for operating with torch.nn.Module parameters and buffers.
def params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.parameters()) + list(module.buffers())
def named_params_and_buffers(module):
assert isinstance(module, torch.nn.Module)
return list(module.named_parameters()) + list(module.named_buffers())
def copy_params_and_buffers(src_module, dst_module, require_all=False, allow_mismatch=False):
assert isinstance(src_module, torch.nn.Module)
assert isinstance(dst_module, torch.nn.Module)
src_tensors = dict(named_params_and_buffers(src_module))
for name, tensor in named_params_and_buffers(dst_module):
assert (name in src_tensors) or (not require_all)
if name not in src_tensors and name.replace('_semantic', '') not in src_tensors:
print(f'Warning: {name} not found in source module')
continue
if name not in src_tensors:
# print(f'Warning: {name} not found in source module, using {name.replace("_semantic", "")}')
name_src = name.replace('_semantic', '')
else:
name_src = name
if src_tensors[name_src].shape != tensor.shape and allow_mismatch:
print(f'Warning: {name_src} shape mismatch: {src_tensors[name_src].shape} vs {tensor.shape}')
continue
if name_src in src_tensors:
tensor.copy_(src_tensors[name_src].detach()).requires_grad_(tensor.requires_grad)
# print(f'Copied {name}')
#----------------------------------------------------------------------------
# Context manager for easily enabling/disabling DistributedDataParallel
# synchronization.
@contextlib.contextmanager
def ddp_sync(module, sync):
assert isinstance(module, torch.nn.Module)
if sync or not isinstance(module, torch.nn.parallel.DistributedDataParallel):
yield
else:
with module.no_sync():
yield
#----------------------------------------------------------------------------
# Check DistributedDataParallel consistency across processes.
def check_ddp_consistency(module, ignore_regex=None):
assert isinstance(module, torch.nn.Module)
for name, tensor in named_params_and_buffers(module):
fullname = type(module).__name__ + '.' + name
if ignore_regex is not None and re.fullmatch(ignore_regex, fullname):
continue
tensor = tensor.detach()
if tensor.is_floating_point():
tensor = nan_to_num(tensor)
other = tensor.clone()
torch.distributed.broadcast(tensor=other, src=0)
assert (tensor == other).all(), fullname
#----------------------------------------------------------------------------
# Print summary table of module hierarchy.
def print_module_summary(module, inputs, max_nesting=3, skip_redundant=True):
assert isinstance(module, torch.nn.Module)
assert not isinstance(module, torch.jit.ScriptModule)
assert isinstance(inputs, (tuple, list))
# Register hooks.
entries = []
nesting = [0]
def pre_hook(_mod, _inputs):
nesting[0] += 1
def post_hook(mod, _inputs, outputs):
nesting[0] -= 1
if nesting[0] <= max_nesting:
outputs = list(outputs) if isinstance(outputs, (tuple, list)) else [outputs]
outputs = [t for t in outputs if isinstance(t, torch.Tensor)]
entries.append(dnnlib.EasyDict(mod=mod, outputs=outputs))
hooks = [mod.register_forward_pre_hook(pre_hook) for mod in module.modules()]
hooks += [mod.register_forward_hook(post_hook) for mod in module.modules()]
# Run module.
outputs = module(*inputs)
for hook in hooks:
hook.remove()
# Identify unique outputs, parameters, and buffers.
tensors_seen = set()
for e in entries:
e.unique_params = [t for t in e.mod.parameters() if id(t) not in tensors_seen]
e.unique_buffers = [t for t in e.mod.buffers() if id(t) not in tensors_seen]
e.unique_outputs = [t for t in e.outputs if id(t) not in tensors_seen]
tensors_seen |= {id(t) for t in e.unique_params + e.unique_buffers + e.unique_outputs}
# Filter out redundant entries.
if skip_redundant:
entries = [e for e in entries if len(e.unique_params) or len(e.unique_buffers) or len(e.unique_outputs)]
# Construct table.
rows = [[type(module).__name__, 'Parameters', 'Buffers', 'Output shape', 'Datatype']]
rows += [['---'] * len(rows[0])]
param_total = 0
buffer_total = 0
submodule_names = {mod: name for name, mod in module.named_modules()}
for e in entries:
name = '<top-level>' if e.mod is module else submodule_names[e.mod]
param_size = sum(t.numel() for t in e.unique_params)
buffer_size = sum(t.numel() for t in e.unique_buffers)
output_shapes = [str(list(t.shape)) for t in e.outputs]
output_dtypes = [str(t.dtype).split('.')[-1] for t in e.outputs]
rows += [[
name + (':0' if len(e.outputs) >= 2 else ''),
str(param_size) if param_size else '-',
str(buffer_size) if buffer_size else '-',
(output_shapes + ['-'])[0],
(output_dtypes + ['-'])[0],
]]
for idx in range(1, len(e.outputs)):
rows += [[name + f':{idx}', '-', '-', output_shapes[idx], output_dtypes[idx]]]
param_total += param_size
buffer_total += buffer_size
rows += [['---'] * len(rows[0])]
rows += [['Total', str(param_total), str(buffer_total), '-', '-']]
# Print table.
widths = [max(len(cell) for cell in column) for column in zip(*rows)]
print()
for row in rows:
print(' '.join(cell + ' ' * (width - len(cell)) for cell, width in zip(row, widths)))
print()
return outputs
#----------------------------------------------------------------------------
| 11,902 | 41.359431 | 133 | py |
pix2pix3D | pix2pix3D-main/torch_utils/ops/bias_act.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Custom PyTorch ops for efficient bias and activation."""
import os
import numpy as np
import torch
import dnnlib
from .. import custom_ops
from .. import misc
#----------------------------------------------------------------------------
activation_funcs = {
'linear': dnnlib.EasyDict(func=lambda x, **_: x, def_alpha=0, def_gain=1, cuda_idx=1, ref='', has_2nd_grad=False),
'relu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.relu(x), def_alpha=0, def_gain=np.sqrt(2), cuda_idx=2, ref='y', has_2nd_grad=False),
'lrelu': dnnlib.EasyDict(func=lambda x, alpha, **_: torch.nn.functional.leaky_relu(x, alpha), def_alpha=0.2, def_gain=np.sqrt(2), cuda_idx=3, ref='y', has_2nd_grad=False),
'tanh': dnnlib.EasyDict(func=lambda x, **_: torch.tanh(x), def_alpha=0, def_gain=1, cuda_idx=4, ref='y', has_2nd_grad=True),
'sigmoid': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x), def_alpha=0, def_gain=1, cuda_idx=5, ref='y', has_2nd_grad=True),
'elu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.elu(x), def_alpha=0, def_gain=1, cuda_idx=6, ref='y', has_2nd_grad=True),
'selu': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.selu(x), def_alpha=0, def_gain=1, cuda_idx=7, ref='y', has_2nd_grad=True),
'softplus': dnnlib.EasyDict(func=lambda x, **_: torch.nn.functional.softplus(x), def_alpha=0, def_gain=1, cuda_idx=8, ref='y', has_2nd_grad=True),
'swish': dnnlib.EasyDict(func=lambda x, **_: torch.sigmoid(x) * x, def_alpha=0, def_gain=np.sqrt(2), cuda_idx=9, ref='x', has_2nd_grad=True),
}
#----------------------------------------------------------------------------
_plugin = None
_null_tensor = torch.empty([0])
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='bias_act_plugin',
sources=['bias_act.cpp', 'bias_act.cu'],
headers=['bias_act.h'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math'],
)
return True
#----------------------------------------------------------------------------
def bias_act(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None, impl='cuda'):
r"""Fused bias and activation function.
Adds bias `b` to activation tensor `x`, evaluates activation function `act`,
and scales the result by `gain`. Each of the steps is optional. In most cases,
the fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports first and second order gradients,
but not third order gradients.
Args:
x: Input activation tensor. Can be of any shape.
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The shape must be known, and it must match the dimension of `x`
corresponding to `dim`.
dim: The dimension in `x` corresponding to the elements of `b`.
The value of `dim` is ignored if `b` is not specified.
act: Name of the activation function to evaluate, or `"linear"` to disable.
Can be e.g. `"relu"`, `"lrelu"`, `"tanh"`, `"sigmoid"`, `"swish"`, etc.
See `activation_funcs` for a full list. `None` is not allowed.
alpha: Shape parameter for the activation function, or `None` to use the default.
gain: Scaling factor for the output tensor, or `None` to use default.
See `activation_funcs` for the default scaling of each activation function.
If unsure, consider specifying 1.
clamp: Clamp the output values to `[-clamp, +clamp]`, or `None` to disable
the clamping (default).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the same shape and datatype as `x`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _bias_act_cuda(dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp).apply(x, b)
return _bias_act_ref(x=x, b=b, dim=dim, act=act, alpha=alpha, gain=gain, clamp=clamp)
#----------------------------------------------------------------------------
@misc.profiled_function
def _bias_act_ref(x, b=None, dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Slow reference implementation of `bias_act()` using standard TensorFlow ops.
"""
assert isinstance(x, torch.Tensor)
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Add bias.
if b is not None:
assert isinstance(b, torch.Tensor) and b.ndim == 1
assert 0 <= dim < x.ndim
assert b.shape[0] == x.shape[dim]
x = x + b.reshape([-1 if i == dim else 1 for i in range(x.ndim)])
# Evaluate activation function.
alpha = float(alpha)
x = spec.func(x, alpha=alpha)
# Scale by gain.
gain = float(gain)
if gain != 1:
x = x * gain
# Clamp.
if clamp >= 0:
x = x.clamp(-clamp, clamp) # pylint: disable=invalid-unary-operand-type
return x
#----------------------------------------------------------------------------
_bias_act_cuda_cache = dict()
def _bias_act_cuda(dim=1, act='linear', alpha=None, gain=None, clamp=None):
"""Fast CUDA implementation of `bias_act()` using custom ops.
"""
# Parse arguments.
assert clamp is None or clamp >= 0
spec = activation_funcs[act]
alpha = float(alpha if alpha is not None else spec.def_alpha)
gain = float(gain if gain is not None else spec.def_gain)
clamp = float(clamp if clamp is not None else -1)
# Lookup from cache.
key = (dim, act, alpha, gain, clamp)
if key in _bias_act_cuda_cache:
return _bias_act_cuda_cache[key]
# Forward op.
class BiasActCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, b): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if x.ndim > 2 and x.stride(1) == 1 else torch.contiguous_format
x = x.contiguous(memory_format=ctx.memory_format)
b = b.contiguous() if b is not None else _null_tensor
y = x
if act != 'linear' or gain != 1 or clamp >= 0 or b is not _null_tensor:
y = _plugin.bias_act(x, b, _null_tensor, _null_tensor, _null_tensor, 0, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
x if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
b if 'x' in spec.ref or spec.has_2nd_grad else _null_tensor,
y if 'y' in spec.ref else _null_tensor)
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
dy = dy.contiguous(memory_format=ctx.memory_format)
x, b, y = ctx.saved_tensors
dx = None
db = None
if ctx.needs_input_grad[0] or ctx.needs_input_grad[1]:
dx = dy
if act != 'linear' or gain != 1 or clamp >= 0:
dx = BiasActCudaGrad.apply(dy, x, b, y)
if ctx.needs_input_grad[1]:
db = dx.sum([i for i in range(dx.ndim) if i != dim])
return dx, db
# Backward op.
class BiasActCudaGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, dy, x, b, y): # pylint: disable=arguments-differ
ctx.memory_format = torch.channels_last if dy.ndim > 2 and dy.stride(1) == 1 else torch.contiguous_format
dx = _plugin.bias_act(dy, b, x, y, _null_tensor, 1, dim, spec.cuda_idx, alpha, gain, clamp)
ctx.save_for_backward(
dy if spec.has_2nd_grad else _null_tensor,
x, b, y)
return dx
@staticmethod
def backward(ctx, d_dx): # pylint: disable=arguments-differ
d_dx = d_dx.contiguous(memory_format=ctx.memory_format)
dy, x, b, y = ctx.saved_tensors
d_dy = None
d_x = None
d_b = None
d_y = None
if ctx.needs_input_grad[0]:
d_dy = BiasActCudaGrad.apply(d_dx, x, b, y)
if spec.has_2nd_grad and (ctx.needs_input_grad[1] or ctx.needs_input_grad[2]):
d_x = _plugin.bias_act(d_dx, b, x, y, dy, 2, dim, spec.cuda_idx, alpha, gain, clamp)
if spec.has_2nd_grad and ctx.needs_input_grad[2]:
d_b = d_x.sum([i for i in range(d_x.ndim) if i != dim])
return d_dy, d_x, d_b, d_y
# Add to cache.
_bias_act_cuda_cache[key] = BiasActCuda
return BiasActCuda
#----------------------------------------------------------------------------
| 9,927 | 45.830189 | 185 | py |
pix2pix3D | pix2pix3D-main/torch_utils/ops/grid_sample_gradfix.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Custom replacement for `torch.nn.functional.grid_sample` that
supports arbitrarily high order gradients between the input and output.
Only works on 2D images and assumes
`mode='bilinear'`, `padding_mode='zeros'`, `align_corners=False`."""
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
#----------------------------------------------------------------------------
def grid_sample(input, grid):
if _should_use_custom_op():
return _GridSample2dForward.apply(input, grid)
return torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
#----------------------------------------------------------------------------
def _should_use_custom_op():
return enabled
#----------------------------------------------------------------------------
class _GridSample2dForward(torch.autograd.Function):
@staticmethod
def forward(ctx, input, grid):
assert input.ndim == 4
assert grid.ndim == 4
output = torch.nn.functional.grid_sample(input=input, grid=grid, mode='bilinear', padding_mode='zeros', align_corners=False)
ctx.save_for_backward(input, grid)
return output
@staticmethod
def backward(ctx, grad_output):
input, grid = ctx.saved_tensors
grad_input, grad_grid = _GridSample2dBackward.apply(grad_output, input, grid)
return grad_input, grad_grid
#----------------------------------------------------------------------------
class _GridSample2dBackward(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input, grid):
op = torch._C._jit_get_operation('aten::grid_sampler_2d_backward')
grad_input, grad_grid = op(grad_output, input, grid, 0, 0, False)
ctx.save_for_backward(grid)
return grad_input, grad_grid
@staticmethod
def backward(ctx, grad2_grad_input, grad2_grad_grid):
_ = grad2_grad_grid # unused
grid, = ctx.saved_tensors
grad2_grad_output = None
grad2_input = None
grad2_grid = None
if ctx.needs_input_grad[0]:
grad2_grad_output = _GridSample2dForward.apply(grad2_grad_input, grid)
assert not ctx.needs_input_grad[2]
return grad2_grad_output, grad2_input, grad2_grid
#----------------------------------------------------------------------------
| 3,134 | 38.1875 | 132 | py |
pix2pix3D | pix2pix3D-main/torch_utils/ops/conv2d_gradfix.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Custom replacement for `torch.nn.functional.conv2d` that supports
arbitrarily high order gradients with zero performance penalty."""
import contextlib
import torch
# pylint: disable=redefined-builtin
# pylint: disable=arguments-differ
# pylint: disable=protected-access
#----------------------------------------------------------------------------
enabled = False # Enable the custom op by setting this to true.
weight_gradients_disabled = False # Forcefully disable computation of gradients with respect to the weights.
@contextlib.contextmanager
def no_weight_gradients(disable=True):
global weight_gradients_disabled
old = weight_gradients_disabled
if disable:
weight_gradients_disabled = True
yield
weight_gradients_disabled = old
#----------------------------------------------------------------------------
def conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=False, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=0, dilation=dilation, groups=groups).apply(input, weight, bias)
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, dilation=dilation, groups=groups)
def conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1):
if _should_use_custom_op(input):
return _conv2d_gradfix(transpose=True, weight_shape=weight.shape, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation).apply(input, weight, bias)
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, stride=stride, padding=padding, output_padding=output_padding, groups=groups, dilation=dilation)
#----------------------------------------------------------------------------
def _should_use_custom_op(input):
assert isinstance(input, torch.Tensor)
if (not enabled) or (not torch.backends.cudnn.enabled):
return False
if input.device.type != 'cuda':
return False
return True
def _tuple_of_ints(xs, ndim):
xs = tuple(xs) if isinstance(xs, (tuple, list)) else (xs,) * ndim
assert len(xs) == ndim
assert all(isinstance(x, int) for x in xs)
return xs
#----------------------------------------------------------------------------
_conv2d_gradfix_cache = dict()
_null_tensor = torch.empty([0])
def _conv2d_gradfix(transpose, weight_shape, stride, padding, output_padding, dilation, groups):
# Parse arguments.
ndim = 2
weight_shape = tuple(weight_shape)
stride = _tuple_of_ints(stride, ndim)
padding = _tuple_of_ints(padding, ndim)
output_padding = _tuple_of_ints(output_padding, ndim)
dilation = _tuple_of_ints(dilation, ndim)
# Lookup from cache.
key = (transpose, weight_shape, stride, padding, output_padding, dilation, groups)
if key in _conv2d_gradfix_cache:
return _conv2d_gradfix_cache[key]
# Validate arguments.
assert groups >= 1
assert len(weight_shape) == ndim + 2
assert all(stride[i] >= 1 for i in range(ndim))
assert all(padding[i] >= 0 for i in range(ndim))
assert all(dilation[i] >= 0 for i in range(ndim))
if not transpose:
assert all(output_padding[i] == 0 for i in range(ndim))
else: # transpose
assert all(0 <= output_padding[i] < max(stride[i], dilation[i]) for i in range(ndim))
# Helpers.
common_kwargs = dict(stride=stride, padding=padding, dilation=dilation, groups=groups)
def calc_output_padding(input_shape, output_shape):
if transpose:
return [0, 0]
return [
input_shape[i + 2]
- (output_shape[i + 2] - 1) * stride[i]
- (1 - 2 * padding[i])
- dilation[i] * (weight_shape[i + 2] - 1)
for i in range(ndim)
]
# Forward & backward.
class Conv2d(torch.autograd.Function):
@staticmethod
def forward(ctx, input, weight, bias):
assert weight.shape == weight_shape
ctx.save_for_backward(
input if weight.requires_grad else _null_tensor,
weight if input.requires_grad else _null_tensor,
)
ctx.input_shape = input.shape
# Simple 1x1 convolution => cuBLAS (only on Volta, not on Ampere).
if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0) and torch.cuda.get_device_capability(input.device) < (8, 0):
a = weight.reshape(groups, weight_shape[0] // groups, weight_shape[1])
b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1)
c = (a.transpose(1, 2) if transpose else a) @ b.permute(1, 2, 0, 3).flatten(2)
c = c.reshape(-1, input.shape[0], *input.shape[2:]).transpose(0, 1)
c = c if bias is None else c + bias.unsqueeze(0).unsqueeze(2).unsqueeze(3)
return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format))
# General case => cuDNN.
if transpose:
return torch.nn.functional.conv_transpose2d(input=input, weight=weight, bias=bias, output_padding=output_padding, **common_kwargs)
return torch.nn.functional.conv2d(input=input, weight=weight, bias=bias, **common_kwargs)
@staticmethod
def backward(ctx, grad_output):
input, weight = ctx.saved_tensors
input_shape = ctx.input_shape
grad_input = None
grad_weight = None
grad_bias = None
if ctx.needs_input_grad[0]:
p = calc_output_padding(input_shape=input_shape, output_shape=grad_output.shape)
op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs)
grad_input = op.apply(grad_output, weight, None)
assert grad_input.shape == input_shape
if ctx.needs_input_grad[1] and not weight_gradients_disabled:
grad_weight = Conv2dGradWeight.apply(grad_output, input, weight)
assert grad_weight.shape == weight_shape
if ctx.needs_input_grad[2]:
grad_bias = grad_output.sum([0, 2, 3])
return grad_input, grad_weight, grad_bias
# Gradient with respect to the weights.
class Conv2dGradWeight(torch.autograd.Function):
@staticmethod
def forward(ctx, grad_output, input, weight):
ctx.save_for_backward(
grad_output if input.requires_grad else _null_tensor,
input if grad_output.requires_grad else _null_tensor,
)
ctx.grad_output_shape = grad_output.shape
ctx.input_shape = input.shape
# Simple 1x1 convolution => cuBLAS (on both Volta and Ampere).
if weight_shape[2:] == stride == dilation == (1, 1) and padding == (0, 0):
a = grad_output.reshape(grad_output.shape[0], groups, grad_output.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2)
b = input.reshape(input.shape[0], groups, input.shape[1] // groups, -1).permute(1, 2, 0, 3).flatten(2)
c = (b @ a.transpose(1, 2) if transpose else a @ b.transpose(1, 2)).reshape(weight_shape)
return c.contiguous(memory_format=(torch.channels_last if input.stride(1) == 1 else torch.contiguous_format))
# General case => cuDNN.
return torch.ops.aten.convolution_backward(grad_output=grad_output, input=input, weight=weight, bias_sizes=None, stride=stride, padding=padding, dilation=dilation, transposed=transpose, output_padding=output_padding, groups=groups, output_mask=[False, True, False])[1]
@staticmethod
def backward(ctx, grad2_grad_weight):
grad_output, input = ctx.saved_tensors
grad_output_shape = ctx.grad_output_shape
input_shape = ctx.input_shape
grad2_grad_output = None
grad2_input = None
if ctx.needs_input_grad[0]:
grad2_grad_output = Conv2d.apply(input, grad2_grad_weight, None)
assert grad2_grad_output.shape == grad_output_shape
if ctx.needs_input_grad[1]:
p = calc_output_padding(input_shape=input_shape, output_shape=grad_output_shape)
op = _conv2d_gradfix(transpose=(not transpose), weight_shape=weight_shape, output_padding=p, **common_kwargs)
grad2_input = op.apply(grad_output, grad2_grad_weight, None)
assert grad2_input.shape == input_shape
return grad2_grad_output, grad2_input
_conv2d_gradfix_cache[key] = Conv2d
return Conv2d
#----------------------------------------------------------------------------
| 9,494 | 46.475 | 280 | py |
pix2pix3D | pix2pix3D-main/torch_utils/ops/upfirdn2d.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Custom PyTorch ops for efficient resampling of 2D images."""
import os
import numpy as np
import torch
from .. import custom_ops
from .. import misc
from . import conv2d_gradfix
#----------------------------------------------------------------------------
_plugin = None
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='upfirdn2d_plugin',
sources=['upfirdn2d.cpp', 'upfirdn2d.cu'],
headers=['upfirdn2d.h'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math'],
)
return True
def _parse_scaling(scaling):
if isinstance(scaling, int):
scaling = [scaling, scaling]
assert isinstance(scaling, (list, tuple))
assert all(isinstance(x, int) for x in scaling)
sx, sy = scaling
assert sx >= 1 and sy >= 1
return sx, sy
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, int) for x in padding)
if len(padding) == 2:
padx, pady = padding
padding = [padx, padx, pady, pady]
padx0, padx1, pady0, pady1 = padding
return padx0, padx1, pady0, pady1
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
fw = f.shape[-1]
fh = f.shape[0]
with misc.suppress_tracer_warnings():
fw = int(fw)
fh = int(fh)
misc.assert_shape(f, [fh, fw][:f.ndim])
assert fw >= 1 and fh >= 1
return fw, fh
#----------------------------------------------------------------------------
def setup_filter(f, device=torch.device('cpu'), normalize=True, flip_filter=False, gain=1, separable=None):
r"""Convenience function to setup 2D FIR filter for `upfirdn2d()`.
Args:
f: Torch tensor, numpy array, or python list of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable),
`[]` (impulse), or
`None` (identity).
device: Result device (default: cpu).
normalize: Normalize the filter so that it retains the magnitude
for constant input signal (DC)? (default: True).
flip_filter: Flip the filter? (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
separable: Return a separable filter? (default: select automatically).
Returns:
Float32 tensor of the shape
`[filter_height, filter_width]` (non-separable) or
`[filter_taps]` (separable).
"""
# Validate.
if f is None:
f = 1
f = torch.as_tensor(f, dtype=torch.float32)
assert f.ndim in [0, 1, 2]
assert f.numel() > 0
if f.ndim == 0:
f = f[np.newaxis]
# Separable?
if separable is None:
separable = (f.ndim == 1 and f.numel() >= 8)
if f.ndim == 1 and not separable:
f = f.ger(f)
assert f.ndim == (1 if separable else 2)
# Apply normalize, flip, gain, and device.
if normalize:
f /= f.sum()
if flip_filter:
f = f.flip(list(range(f.ndim)))
f = f * (gain ** (f.ndim / 2))
f = f.to(device=device)
return f
#----------------------------------------------------------------------------
def upfirdn2d(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Pad, upsample, filter, and downsample a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Upsample the image by inserting N-1 zeros after each pixel (`up`).
2. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
3. Convolve the image with the specified 2D FIR filter (`f`), shrinking it
so that the footprint of all output pixels lies within the input image.
4. Downsample the image by keeping every Nth pixel (`down`).
This sequence of operations bears close resemblance to scipy.signal.upfirdn().
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _upfirdn2d_cuda(up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain).apply(x, f)
return _upfirdn2d_ref(x, f, up=up, down=down, padding=padding, flip_filter=flip_filter, gain=gain)
#----------------------------------------------------------------------------
@misc.profiled_function
def _upfirdn2d_ref(x, f, up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Slow reference implementation of `upfirdn2d()` using standard PyTorch ops.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
assert f.dtype == torch.float32 and not f.requires_grad
batch_size, num_channels, in_height, in_width = x.shape
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Check that upsampled buffer is not smaller than the filter.
upW = in_width * upx + padx0 + padx1
upH = in_height * upy + pady0 + pady1
assert upW >= f.shape[-1] and upH >= f.shape[0]
# Upsample by inserting zeros.
x = x.reshape([batch_size, num_channels, in_height, 1, in_width, 1])
x = torch.nn.functional.pad(x, [0, upx - 1, 0, 0, 0, upy - 1])
x = x.reshape([batch_size, num_channels, in_height * upy, in_width * upx])
# Pad or crop.
x = torch.nn.functional.pad(x, [max(padx0, 0), max(padx1, 0), max(pady0, 0), max(pady1, 0)])
x = x[:, :, max(-pady0, 0) : x.shape[2] - max(-pady1, 0), max(-padx0, 0) : x.shape[3] - max(-padx1, 0)]
# Setup filter.
f = f * (gain ** (f.ndim / 2))
f = f.to(x.dtype)
if not flip_filter:
f = f.flip(list(range(f.ndim)))
# Convolve with the filter.
f = f[np.newaxis, np.newaxis].repeat([num_channels, 1] + [1] * f.ndim)
if f.ndim == 4:
x = conv2d_gradfix.conv2d(input=x, weight=f, groups=num_channels)
else:
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(2), groups=num_channels)
x = conv2d_gradfix.conv2d(input=x, weight=f.unsqueeze(3), groups=num_channels)
# Downsample by throwing away pixels.
x = x[:, :, ::downy, ::downx]
return x
#----------------------------------------------------------------------------
_upfirdn2d_cuda_cache = dict()
def _upfirdn2d_cuda(up=1, down=1, padding=0, flip_filter=False, gain=1):
"""Fast CUDA implementation of `upfirdn2d()` using custom ops.
"""
# Parse arguments.
upx, upy = _parse_scaling(up)
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
# Lookup from cache.
key = (upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
if key in _upfirdn2d_cuda_cache:
return _upfirdn2d_cuda_cache[key]
# Forward op.
class Upfirdn2dCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, f): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
if f is None:
f = torch.ones([1, 1], dtype=torch.float32, device=x.device)
if f.ndim == 1 and f.shape[0] == 1:
f = f.square().unsqueeze(0) # Convert separable-1 into full-1x1.
assert isinstance(f, torch.Tensor) and f.ndim in [1, 2]
y = x
if f.ndim == 2:
y = _plugin.upfirdn2d(y, f, upx, upy, downx, downy, padx0, padx1, pady0, pady1, flip_filter, gain)
else:
y = _plugin.upfirdn2d(y, f.unsqueeze(0), upx, 1, downx, 1, padx0, padx1, 0, 0, flip_filter, 1.0)
y = _plugin.upfirdn2d(y, f.unsqueeze(1), 1, upy, 1, downy, 0, 0, pady0, pady1, flip_filter, gain)
ctx.save_for_backward(f)
ctx.x_shape = x.shape
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
f, = ctx.saved_tensors
_, _, ih, iw = ctx.x_shape
_, _, oh, ow = dy.shape
fw, fh = _get_filter_size(f)
p = [
fw - padx0 - 1,
iw * upx - ow * downx + padx0 - upx + 1,
fh - pady0 - 1,
ih * upy - oh * downy + pady0 - upy + 1,
]
dx = None
df = None
if ctx.needs_input_grad[0]:
dx = _upfirdn2d_cuda(up=down, down=up, padding=p, flip_filter=(not flip_filter), gain=gain).apply(dy, f)
assert not ctx.needs_input_grad[1]
return dx, df
# Add to cache.
_upfirdn2d_cuda_cache[key] = Upfirdn2dCuda
return Upfirdn2dCuda
#----------------------------------------------------------------------------
def filter2d(x, f, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Filter a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape matches the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + fw // 2,
padx1 + (fw - 1) // 2,
pady0 + fh // 2,
pady1 + (fh - 1) // 2,
]
return upfirdn2d(x, f, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
def upsample2d(x, f, up=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Upsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a multiple of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
up: Integer upsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the output. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
upx, upy = _parse_scaling(up)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw + upx - 1) // 2,
padx1 + (fw - upx) // 2,
pady0 + (fh + upy - 1) // 2,
pady1 + (fh - upy) // 2,
]
return upfirdn2d(x, f, up=up, padding=p, flip_filter=flip_filter, gain=gain*upx*upy, impl=impl)
#----------------------------------------------------------------------------
def downsample2d(x, f, down=2, padding=0, flip_filter=False, gain=1, impl='cuda'):
r"""Downsample a batch of 2D images using the given 2D FIR filter.
By default, the result is padded so that its shape is a fraction of the input.
User-specified padding is applied on top of that, with negative values
indicating cropping. Pixels outside the image are assumed to be zero.
Args:
x: Float32/float64/float16 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
f: Float32 FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
down: Integer downsampling factor. Can be a single int or a list/tuple
`[x, y]` (default: 1).
padding: Padding with respect to the input. Can be a single number or a
list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
flip_filter: False = convolution, True = correlation (default: False).
gain: Overall scaling factor for signal magnitude (default: 1).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
downx, downy = _parse_scaling(down)
padx0, padx1, pady0, pady1 = _parse_padding(padding)
fw, fh = _get_filter_size(f)
p = [
padx0 + (fw - downx + 1) // 2,
padx1 + (fw - downx) // 2,
pady0 + (fh - downy + 1) // 2,
pady1 + (fh - downy) // 2,
]
return upfirdn2d(x, f, down=down, padding=p, flip_filter=flip_filter, gain=gain, impl=impl)
#----------------------------------------------------------------------------
| 16,506 | 41.109694 | 120 | py |
pix2pix3D | pix2pix3D-main/torch_utils/ops/filtered_lrelu.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
import os
import numpy as np
import torch
import warnings
from .. import custom_ops
from .. import misc
from . import upfirdn2d
from . import bias_act
#----------------------------------------------------------------------------
_plugin = None
def _init():
global _plugin
if _plugin is None:
_plugin = custom_ops.get_plugin(
module_name='filtered_lrelu_plugin',
sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'],
headers=['filtered_lrelu.h', 'filtered_lrelu.cu'],
source_dir=os.path.dirname(__file__),
extra_cuda_cflags=['--use_fast_math'],
)
return True
def _get_filter_size(f):
if f is None:
return 1, 1
assert isinstance(f, torch.Tensor)
assert 1 <= f.ndim <= 2
return f.shape[-1], f.shape[0] # width, height
def _parse_padding(padding):
if isinstance(padding, int):
padding = [padding, padding]
assert isinstance(padding, (list, tuple))
assert all(isinstance(x, (int, np.integer)) for x in padding)
padding = [int(x) for x in padding]
if len(padding) == 2:
px, py = padding
padding = [px, px, py, py]
px0, px1, py0, py1 = padding
return px0, px1, py0, py1
#----------------------------------------------------------------------------
def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'):
r"""Filtered leaky ReLU for a batch of 2D images.
Performs the following sequence of operations for each channel:
1. Add channel-specific bias if provided (`b`).
2. Upsample the image by inserting N-1 zeros after each pixel (`up`).
3. Pad the image with the specified number of zeros on each side (`padding`).
Negative padding corresponds to cropping the image.
4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it
so that the footprint of all output pixels lies within the input image.
5. Multiply each value by the provided gain factor (`gain`).
6. Apply leaky ReLU activation function to each value.
7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided.
8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking
it so that the footprint of all output pixels lies within the input image.
9. Downsample the image by keeping every Nth pixel (`down`).
The fused op is considerably more efficient than performing the same calculation
using standard PyTorch ops. It supports gradients of arbitrary order.
Args:
x: Float32/float16/float64 input tensor of the shape
`[batch_size, num_channels, in_height, in_width]`.
fu: Float32 upsampling FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
fd: Float32 downsampling FIR filter of the shape
`[filter_height, filter_width]` (non-separable),
`[filter_taps]` (separable), or
`None` (identity).
b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type
as `x`. The length of vector must must match the channel dimension of `x`.
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor. (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
gain: Overall scaling factor for signal magnitude (default: sqrt(2)).
slope: Slope on the negative side of leaky ReLU (default: 0.2).
clamp: Maximum magnitude for leaky ReLU output (default: None).
flip_filter: False = convolution, True = correlation (default: False).
impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
assert isinstance(x, torch.Tensor)
assert impl in ['ref', 'cuda']
if impl == 'cuda' and x.device.type == 'cuda' and _init():
return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0)
return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter)
#----------------------------------------------------------------------------
@misc.profiled_function
def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
"""Slow and memory-inefficient reference implementation of `filtered_lrelu()` using
existing `upfirdn2n()` and `bias_act()` ops.
"""
assert isinstance(x, torch.Tensor) and x.ndim == 4
fu_w, fu_h = _get_filter_size(fu)
fd_w, fd_h = _get_filter_size(fd)
if b is not None:
assert isinstance(b, torch.Tensor) and b.dtype == x.dtype
misc.assert_shape(b, [x.shape[1]])
assert isinstance(up, int) and up >= 1
assert isinstance(down, int) and down >= 1
px0, px1, py0, py1 = _parse_padding(padding)
assert gain == float(gain) and gain > 0
assert slope == float(slope) and slope >= 0
assert clamp is None or (clamp == float(clamp) and clamp >= 0)
# Calculate output size.
batch_size, channels, in_h, in_w = x.shape
in_dtype = x.dtype
out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down
out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down
# Compute using existing ops.
x = bias_act.bias_act(x=x, b=b) # Apply bias.
x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp.
x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample.
# Check output shape & dtype.
misc.assert_shape(x, [batch_size, channels, out_h, out_w])
assert x.dtype == in_dtype
return x
#----------------------------------------------------------------------------
_filtered_lrelu_cuda_cache = dict()
def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False):
"""Fast CUDA implementation of `filtered_lrelu()` using custom ops.
"""
assert isinstance(up, int) and up >= 1
assert isinstance(down, int) and down >= 1
px0, px1, py0, py1 = _parse_padding(padding)
assert gain == float(gain) and gain > 0
gain = float(gain)
assert slope == float(slope) and slope >= 0
slope = float(slope)
assert clamp is None or (clamp == float(clamp) and clamp >= 0)
clamp = float(clamp if clamp is not None else 'inf')
# Lookup from cache.
key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter)
if key in _filtered_lrelu_cuda_cache:
return _filtered_lrelu_cuda_cache[key]
# Forward op.
class FilteredLReluCuda(torch.autograd.Function):
@staticmethod
def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ
assert isinstance(x, torch.Tensor) and x.ndim == 4
# Replace empty up/downsample kernels with full 1x1 kernels (faster than separable).
if fu is None:
fu = torch.ones([1, 1], dtype=torch.float32, device=x.device)
if fd is None:
fd = torch.ones([1, 1], dtype=torch.float32, device=x.device)
assert 1 <= fu.ndim <= 2
assert 1 <= fd.ndim <= 2
# Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1.
if up == 1 and fu.ndim == 1 and fu.shape[0] == 1:
fu = fu.square()[None]
if down == 1 and fd.ndim == 1 and fd.shape[0] == 1:
fd = fd.square()[None]
# Missing sign input tensor.
if si is None:
si = torch.empty([0])
# Missing bias tensor.
if b is None:
b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device)
# Construct internal sign tensor only if gradients are needed.
write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad)
# Warn if input storage strides are not in decreasing order due to e.g. channels-last layout.
strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1]
if any(a < b for a, b in zip(strides[:-1], strides[1:])):
warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning)
# Call C++/Cuda plugin if datatype is supported.
if x.dtype in [torch.float16, torch.float32]:
if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device):
warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning)
y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs)
else:
return_code = -1
# No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because
# only the bit-packed sign tensor is retained for gradient computation.
if return_code < 0:
warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning)
y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias.
y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample.
so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place.
y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample.
# Prepare for gradient computation.
ctx.save_for_backward(fu, fd, (si if si.numel() else so))
ctx.x_shape = x.shape
ctx.y_shape = y.shape
ctx.s_ofs = sx, sy
return y
@staticmethod
def backward(ctx, dy): # pylint: disable=arguments-differ
fu, fd, si = ctx.saved_tensors
_, _, xh, xw = ctx.x_shape
_, _, yh, yw = ctx.y_shape
sx, sy = ctx.s_ofs
dx = None # 0
dfu = None; assert not ctx.needs_input_grad[1]
dfd = None; assert not ctx.needs_input_grad[2]
db = None # 3
dsi = None; assert not ctx.needs_input_grad[4]
dsx = None; assert not ctx.needs_input_grad[5]
dsy = None; assert not ctx.needs_input_grad[6]
if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]:
pp = [
(fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0,
xw * up - yw * down + px0 - (up - 1),
(fu.shape[0] - 1) + (fd.shape[0] - 1) - py0,
xh * up - yh * down + py0 - (up - 1),
]
gg = gain * (up ** 2) / (down ** 2)
ff = (not flip_filter)
sx = sx - (fu.shape[-1] - 1) + px0
sy = sy - (fu.shape[0] - 1) + py0
dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy)
if ctx.needs_input_grad[3]:
db = dx.sum([0, 2, 3])
return dx, dfu, dfd, db, dsi, dsx, dsy
# Add to cache.
_filtered_lrelu_cuda_cache[key] = FilteredLReluCuda
return FilteredLReluCuda
#----------------------------------------------------------------------------
| 12,998 | 45.927798 | 164 | py |
pix2pix3D | pix2pix3D-main/torch_utils/ops/conv2d_resample.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""2D convolution with optional up/downsampling."""
import torch
from .. import misc
from . import conv2d_gradfix
from . import upfirdn2d
from .upfirdn2d import _parse_padding
from .upfirdn2d import _get_filter_size
#----------------------------------------------------------------------------
def _get_weight_shape(w):
with misc.suppress_tracer_warnings(): # this value will be treated as a constant
shape = [int(sz) for sz in w.shape]
misc.assert_shape(w, shape)
return shape
#----------------------------------------------------------------------------
def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
"""Wrapper for the underlying `conv2d()` and `conv_transpose2d()` implementations.
"""
_out_channels, _in_channels_per_group, kh, kw = _get_weight_shape(w)
# Flip weight if requested.
# Note: conv2d() actually performs correlation (flip_weight=True) not convolution (flip_weight=False).
if not flip_weight and (kw > 1 or kh > 1):
w = w.flip([2, 3])
# Execute using conv2d_gradfix.
op = conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d
return op(x, w, stride=stride, padding=padding, groups=groups)
#----------------------------------------------------------------------------
@misc.profiled_function
def conv2d_resample(x, w, f=None, up=1, down=1, padding=0, groups=1, flip_weight=True, flip_filter=False):
r"""2D convolution with optional up/downsampling.
Padding is performed only once at the beginning, not between the operations.
Args:
x: Input tensor of shape
`[batch_size, in_channels, in_height, in_width]`.
w: Weight tensor of shape
`[out_channels, in_channels//groups, kernel_height, kernel_width]`.
f: Low-pass filter for up/downsampling. Must be prepared beforehand by
calling upfirdn2d.setup_filter(). None = identity (default).
up: Integer upsampling factor (default: 1).
down: Integer downsampling factor (default: 1).
padding: Padding with respect to the upsampled image. Can be a single number
or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]`
(default: 0).
groups: Split input channels into N groups (default: 1).
flip_weight: False = convolution, True = correlation (default: True).
flip_filter: False = convolution, True = correlation (default: False).
Returns:
Tensor of the shape `[batch_size, num_channels, out_height, out_width]`.
"""
# Validate arguments.
assert isinstance(x, torch.Tensor) and (x.ndim == 4)
assert isinstance(w, torch.Tensor) and (w.ndim == 4) and (w.dtype == x.dtype)
assert f is None or (isinstance(f, torch.Tensor) and f.ndim in [1, 2] and f.dtype == torch.float32)
assert isinstance(up, int) and (up >= 1)
assert isinstance(down, int) and (down >= 1)
assert isinstance(groups, int) and (groups >= 1)
out_channels, in_channels_per_group, kh, kw = _get_weight_shape(w)
fw, fh = _get_filter_size(f)
px0, px1, py0, py1 = _parse_padding(padding)
# Adjust padding to account for up/downsampling.
if up > 1:
px0 += (fw + up - 1) // 2
px1 += (fw - up) // 2
py0 += (fh + up - 1) // 2
py1 += (fh - up) // 2
if down > 1:
px0 += (fw - down + 1) // 2
px1 += (fw - down) // 2
py0 += (fh - down + 1) // 2
py1 += (fh - down) // 2
# Fast path: 1x1 convolution with downsampling only => downsample first, then convolve.
if kw == 1 and kh == 1 and (down > 1 and up == 1):
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
return x
# Fast path: 1x1 convolution with upsampling only => convolve first, then upsample.
if kw == 1 and kh == 1 and (up > 1 and down == 1):
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
x = upfirdn2d.upfirdn2d(x=x, f=f, up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
return x
# Fast path: downsampling only => use strided convolution.
if down > 1 and up == 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0,px1,py0,py1], flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, stride=down, groups=groups, flip_weight=flip_weight)
return x
# Fast path: upsampling with optional downsampling => use transpose strided convolution.
if up > 1:
if groups == 1:
w = w.transpose(0, 1)
else:
w = w.reshape(groups, out_channels // groups, in_channels_per_group, kh, kw)
w = w.transpose(1, 2)
w = w.reshape(groups * in_channels_per_group, out_channels // groups, kh, kw)
px0 -= kw - 1
px1 -= kw - up
py0 -= kh - 1
py1 -= kh - up
pxt = max(min(-px0, -px1), 0)
pyt = max(min(-py0, -py1), 0)
x = _conv2d_wrapper(x=x, w=w, stride=up, padding=[pyt,pxt], groups=groups, transpose=True, flip_weight=(not flip_weight))
x = upfirdn2d.upfirdn2d(x=x, f=f, padding=[px0+pxt,px1+pxt,py0+pyt,py1+pyt], gain=up**2, flip_filter=flip_filter)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
# Fast path: no up/downsampling, padding supported by the underlying implementation => use plain conv2d.
if up == 1 and down == 1:
if px0 == px1 and py0 == py1 and px0 >= 0 and py0 >= 0:
return _conv2d_wrapper(x=x, w=w, padding=[py0,px0], groups=groups, flip_weight=flip_weight)
# Fallback: Generic reference implementation.
x = upfirdn2d.upfirdn2d(x=x, f=(f if up > 1 else None), up=up, padding=[px0,px1,py0,py1], gain=up**2, flip_filter=flip_filter)
x = _conv2d_wrapper(x=x, w=w, groups=groups, flip_weight=flip_weight)
if down > 1:
x = upfirdn2d.upfirdn2d(x=x, f=f, down=down, flip_filter=flip_filter)
return x
#----------------------------------------------------------------------------
| 6,879 | 46.123288 | 130 | py |
pix2pix3D | pix2pix3D-main/torch_utils/ops/fma.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Fused multiply-add, with slightly faster gradients than `torch.addcmul()`."""
import torch
#----------------------------------------------------------------------------
def fma(a, b, c): # => a * b + c
return _FusedMultiplyAdd.apply(a, b, c)
#----------------------------------------------------------------------------
class _FusedMultiplyAdd(torch.autograd.Function): # a * b + c
@staticmethod
def forward(ctx, a, b, c): # pylint: disable=arguments-differ
out = torch.addcmul(c, a, b)
ctx.save_for_backward(a, b)
ctx.c_shape = c.shape
return out
@staticmethod
def backward(ctx, dout): # pylint: disable=arguments-differ
a, b = ctx.saved_tensors
c_shape = ctx.c_shape
da = None
db = None
dc = None
if ctx.needs_input_grad[0]:
da = _unbroadcast(dout * b, a.shape)
if ctx.needs_input_grad[1]:
db = _unbroadcast(dout * a, b.shape)
if ctx.needs_input_grad[2]:
dc = _unbroadcast(dout, c_shape)
return da, db, dc
#----------------------------------------------------------------------------
def _unbroadcast(x, shape):
extra_dims = x.ndim - len(shape)
assert extra_dims >= 0
dim = [i for i in range(x.ndim) if x.shape[i] > 1 and (i < extra_dims or shape[i - extra_dims] == 1)]
if len(dim):
x = x.sum(dim=dim, keepdim=True)
if extra_dims:
x = x.reshape(-1, *x.shape[extra_dims+1:])
assert x.shape == shape
return x
#----------------------------------------------------------------------------
| 2,161 | 33.31746 | 105 | py |
pix2pix3D | pix2pix3D-main/metrics/metric_utils.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Miscellaneous utilities used internally by the quality metrics."""
import os
import time
import hashlib
import pickle
import copy
import uuid
import numpy as np
import torch
import dnnlib
#----------------------------------------------------------------------------
class MetricOptions:
def __init__(self, G=None, G_kwargs={}, dataset_kwargs={}, num_gpus=1, rank=0, device=None, progress=None, cache=True):
assert 0 <= rank < num_gpus
self.G = G
self.G_kwargs = dnnlib.EasyDict(G_kwargs)
self.dataset_kwargs = dnnlib.EasyDict(dataset_kwargs)
self.num_gpus = num_gpus
self.rank = rank
self.device = device if device is not None else torch.device('cuda', rank)
self.progress = progress.sub() if progress is not None and rank == 0 else ProgressMonitor()
self.cache = cache
#----------------------------------------------------------------------------
_feature_detector_cache = dict()
def get_feature_detector_name(url):
return os.path.splitext(url.split('/')[-1])[0]
def get_feature_detector(url, device=torch.device('cpu'), num_gpus=1, rank=0, verbose=False):
assert 0 <= rank < num_gpus
key = (url, device)
if key not in _feature_detector_cache:
is_leader = (rank == 0)
if not is_leader and num_gpus > 1:
torch.distributed.barrier() # leader goes first
with dnnlib.util.open_url(url, verbose=(verbose and is_leader)) as f:
_feature_detector_cache[key] = pickle.load(f).to(device)
if is_leader and num_gpus > 1:
torch.distributed.barrier() # others follow
return _feature_detector_cache[key]
#----------------------------------------------------------------------------
def iterate_random_labels(opts, batch_size):
if opts.G.c_dim == 0:
c = torch.zeros([batch_size, opts.G.c_dim], device=opts.device)
while True:
yield c
else:
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
while True:
c = [dataset.get_label(np.random.randint(len(dataset))) for _i in range(batch_size)]
c = torch.from_numpy(np.stack(c)).pin_memory().to(opts.device)
yield c
#----------------------------------------------------------------------------
class FeatureStats:
def __init__(self, capture_all=False, capture_mean_cov=False, max_items=None):
self.capture_all = capture_all
self.capture_mean_cov = capture_mean_cov
self.max_items = max_items
self.num_items = 0
self.num_features = None
self.all_features = None
self.raw_mean = None
self.raw_cov = None
def set_num_features(self, num_features):
if self.num_features is not None:
assert num_features == self.num_features
else:
self.num_features = num_features
self.all_features = []
self.raw_mean = np.zeros([num_features], dtype=np.float64)
self.raw_cov = np.zeros([num_features, num_features], dtype=np.float64)
def is_full(self):
return (self.max_items is not None) and (self.num_items >= self.max_items)
def append(self, x):
x = np.asarray(x, dtype=np.float32)
assert x.ndim == 2
if (self.max_items is not None) and (self.num_items + x.shape[0] > self.max_items):
if self.num_items >= self.max_items:
return
x = x[:self.max_items - self.num_items]
self.set_num_features(x.shape[1])
self.num_items += x.shape[0]
if self.capture_all:
self.all_features.append(x)
if self.capture_mean_cov:
x64 = x.astype(np.float64)
self.raw_mean += x64.sum(axis=0)
self.raw_cov += x64.T @ x64
def append_torch(self, x, num_gpus=1, rank=0):
assert isinstance(x, torch.Tensor) and x.ndim == 2
assert 0 <= rank < num_gpus
if num_gpus > 1:
ys = []
for src in range(num_gpus):
y = x.clone()
torch.distributed.broadcast(y, src=src)
ys.append(y)
x = torch.stack(ys, dim=1).flatten(0, 1) # interleave samples
self.append(x.cpu().numpy())
def get_all(self):
assert self.capture_all
return np.concatenate(self.all_features, axis=0)
def get_all_torch(self):
return torch.from_numpy(self.get_all())
def get_mean_cov(self):
assert self.capture_mean_cov
mean = self.raw_mean / self.num_items
cov = self.raw_cov / self.num_items
cov = cov - np.outer(mean, mean)
return mean, cov
def save(self, pkl_file):
with open(pkl_file, 'wb') as f:
pickle.dump(self.__dict__, f)
@staticmethod
def load(pkl_file):
with open(pkl_file, 'rb') as f:
s = dnnlib.EasyDict(pickle.load(f))
obj = FeatureStats(capture_all=s.capture_all, max_items=s.max_items)
obj.__dict__.update(s)
return obj
#----------------------------------------------------------------------------
class ProgressMonitor:
def __init__(self, tag=None, num_items=None, flush_interval=1000, verbose=False, progress_fn=None, pfn_lo=0, pfn_hi=1000, pfn_total=1000):
self.tag = tag
self.num_items = num_items
self.verbose = verbose
self.flush_interval = flush_interval
self.progress_fn = progress_fn
self.pfn_lo = pfn_lo
self.pfn_hi = pfn_hi
self.pfn_total = pfn_total
self.start_time = time.time()
self.batch_time = self.start_time
self.batch_items = 0
if self.progress_fn is not None:
self.progress_fn(self.pfn_lo, self.pfn_total)
def update(self, cur_items):
assert (self.num_items is None) or (cur_items <= self.num_items)
if (cur_items < self.batch_items + self.flush_interval) and (self.num_items is None or cur_items < self.num_items):
return
cur_time = time.time()
total_time = cur_time - self.start_time
time_per_item = (cur_time - self.batch_time) / max(cur_items - self.batch_items, 1)
if (self.verbose) and (self.tag is not None):
print(f'{self.tag:<19s} items {cur_items:<7d} time {dnnlib.util.format_time(total_time):<12s} ms/item {time_per_item*1e3:.2f}')
self.batch_time = cur_time
self.batch_items = cur_items
if (self.progress_fn is not None) and (self.num_items is not None):
self.progress_fn(self.pfn_lo + (self.pfn_hi - self.pfn_lo) * (cur_items / self.num_items), self.pfn_total)
def sub(self, tag=None, num_items=None, flush_interval=1000, rel_lo=0, rel_hi=1):
return ProgressMonitor(
tag = tag,
num_items = num_items,
flush_interval = flush_interval,
verbose = self.verbose,
progress_fn = self.progress_fn,
pfn_lo = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_lo,
pfn_hi = self.pfn_lo + (self.pfn_hi - self.pfn_lo) * rel_hi,
pfn_total = self.pfn_total,
)
#----------------------------------------------------------------------------
def compute_feature_stats_for_dataset(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, data_loader_kwargs=None, max_items=None, **stats_kwargs):
dataset = dnnlib.util.construct_class_by_name(**opts.dataset_kwargs)
if data_loader_kwargs is None:
data_loader_kwargs = dict(pin_memory=True, num_workers=3, prefetch_factor=2)
# Try to lookup from cache.
cache_file = None
if opts.cache:
# Choose cache file name.
args = dict(dataset_kwargs=opts.dataset_kwargs, detector_url=detector_url, detector_kwargs=detector_kwargs, stats_kwargs=stats_kwargs)
md5 = hashlib.md5(repr(sorted(args.items())).encode('utf-8'))
cache_tag = f'{dataset.name}-{get_feature_detector_name(detector_url)}-{md5.hexdigest()}'
cache_file = dnnlib.make_cache_dir_path('gan-metrics', cache_tag + '.pkl')
# Check if the file exists (all processes must agree).
flag = os.path.isfile(cache_file) if opts.rank == 0 else False
if opts.num_gpus > 1:
flag = torch.as_tensor(flag, dtype=torch.float32, device=opts.device)
torch.distributed.broadcast(tensor=flag, src=0)
flag = (float(flag.cpu()) != 0)
# Load.
if flag:
return FeatureStats.load(cache_file)
# Initialize.
num_items = len(dataset)
if max_items is not None:
num_items = min(num_items, max_items)
stats = FeatureStats(max_items=num_items, **stats_kwargs)
progress = opts.progress.sub(tag='dataset features', num_items=num_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
item_subset = [(i * opts.num_gpus + opts.rank) % num_items for i in range((num_items - 1) // opts.num_gpus + 1)]
for images, _labels in torch.utils.data.DataLoader(dataset=dataset, sampler=item_subset, batch_size=batch_size, **data_loader_kwargs):
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
features = detector(images.to(opts.device), **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
# Save to cache.
if cache_file is not None and opts.rank == 0:
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
temp_file = cache_file + '.' + uuid.uuid4().hex
stats.save(temp_file)
os.replace(temp_file, cache_file) # atomic
return stats
#----------------------------------------------------------------------------
def compute_feature_stats_for_generator(opts, detector_url, detector_kwargs, rel_lo=0, rel_hi=1, batch_size=64, batch_gen=None, **stats_kwargs):
if batch_gen is None:
batch_gen = min(batch_size, 4)
assert batch_size % batch_gen == 0
# Setup generator and labels.
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
c_iter = iterate_random_labels(opts=opts, batch_size=batch_gen)
# Initialize.
stats = FeatureStats(**stats_kwargs)
assert stats.max_items is not None
progress = opts.progress.sub(tag='generator features', num_items=stats.max_items, rel_lo=rel_lo, rel_hi=rel_hi)
detector = get_feature_detector(url=detector_url, device=opts.device, num_gpus=opts.num_gpus, rank=opts.rank, verbose=progress.verbose)
# Main loop.
while not stats.is_full():
images = []
for _i in range(batch_size // batch_gen):
z = torch.randn([batch_gen, G.z_dim], device=opts.device)
img = G(z=z, c=next(c_iter), **opts.G_kwargs)['image']
img = (img * 127.5 + 128).clamp(0, 255).to(torch.uint8)
images.append(img)
images = torch.cat(images)
if images.shape[1] == 1:
images = images.repeat([1, 3, 1, 1])
features = detector(images, **detector_kwargs)
stats.append_torch(features, num_gpus=opts.num_gpus, rank=opts.rank)
progress.update(stats.num_items)
return stats
#----------------------------------------------------------------------------
| 12,059 | 41.765957 | 167 | py |
pix2pix3D | pix2pix3D-main/metrics/equivariance.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Equivariance metrics (EQ-T, EQ-T_frac, and EQ-R) from the paper
"Alias-Free Generative Adversarial Networks"."""
import copy
import numpy as np
import torch
import torch.fft
from torch_utils.ops import upfirdn2d
from . import metric_utils
#----------------------------------------------------------------------------
# Utilities.
def sinc(x):
y = (x * np.pi).abs()
z = torch.sin(y) / y.clamp(1e-30, float('inf'))
return torch.where(y < 1e-30, torch.ones_like(x), z)
def lanczos_window(x, a):
x = x.abs() / a
return torch.where(x < 1, sinc(x), torch.zeros_like(x))
def rotation_matrix(angle):
angle = torch.as_tensor(angle).to(torch.float32)
mat = torch.eye(3, device=angle.device)
mat[0, 0] = angle.cos()
mat[0, 1] = angle.sin()
mat[1, 0] = -angle.sin()
mat[1, 1] = angle.cos()
return mat
#----------------------------------------------------------------------------
# Apply integer translation to a batch of 2D images. Corresponds to the
# operator T_x in Appendix E.1.
def apply_integer_translation(x, tx, ty):
_N, _C, H, W = x.shape
tx = torch.as_tensor(tx * W).to(dtype=torch.float32, device=x.device)
ty = torch.as_tensor(ty * H).to(dtype=torch.float32, device=x.device)
ix = tx.round().to(torch.int64)
iy = ty.round().to(torch.int64)
z = torch.zeros_like(x)
m = torch.zeros_like(x)
if abs(ix) < W and abs(iy) < H:
y = x[:, :, max(-iy,0) : H+min(-iy,0), max(-ix,0) : W+min(-ix,0)]
z[:, :, max(iy,0) : H+min(iy,0), max(ix,0) : W+min(ix,0)] = y
m[:, :, max(iy,0) : H+min(iy,0), max(ix,0) : W+min(ix,0)] = 1
return z, m
#----------------------------------------------------------------------------
# Apply integer translation to a batch of 2D images. Corresponds to the
# operator T_x in Appendix E.2.
def apply_fractional_translation(x, tx, ty, a=3):
_N, _C, H, W = x.shape
tx = torch.as_tensor(tx * W).to(dtype=torch.float32, device=x.device)
ty = torch.as_tensor(ty * H).to(dtype=torch.float32, device=x.device)
ix = tx.floor().to(torch.int64)
iy = ty.floor().to(torch.int64)
fx = tx - ix
fy = ty - iy
b = a - 1
z = torch.zeros_like(x)
zx0 = max(ix - b, 0)
zy0 = max(iy - b, 0)
zx1 = min(ix + a, 0) + W
zy1 = min(iy + a, 0) + H
if zx0 < zx1 and zy0 < zy1:
taps = torch.arange(a * 2, device=x.device) - b
filter_x = (sinc(taps - fx) * sinc((taps - fx) / a)).unsqueeze(0)
filter_y = (sinc(taps - fy) * sinc((taps - fy) / a)).unsqueeze(1)
y = x
y = upfirdn2d.filter2d(y, filter_x / filter_x.sum(), padding=[b,a,0,0])
y = upfirdn2d.filter2d(y, filter_y / filter_y.sum(), padding=[0,0,b,a])
y = y[:, :, max(b-iy,0) : H+b+a+min(-iy-a,0), max(b-ix,0) : W+b+a+min(-ix-a,0)]
z[:, :, zy0:zy1, zx0:zx1] = y
m = torch.zeros_like(x)
mx0 = max(ix + a, 0)
my0 = max(iy + a, 0)
mx1 = min(ix - b, 0) + W
my1 = min(iy - b, 0) + H
if mx0 < mx1 and my0 < my1:
m[:, :, my0:my1, mx0:mx1] = 1
return z, m
#----------------------------------------------------------------------------
# Construct an oriented low-pass filter that applies the appropriate
# bandlimit with respect to the input and output of the given affine 2D
# image transformation.
def construct_affine_bandlimit_filter(mat, a=3, amax=16, aflt=64, up=4, cutoff_in=1, cutoff_out=1):
assert a <= amax < aflt
mat = torch.as_tensor(mat).to(torch.float32)
# Construct 2D filter taps in input & output coordinate spaces.
taps = ((torch.arange(aflt * up * 2 - 1, device=mat.device) + 1) / up - aflt).roll(1 - aflt * up)
yi, xi = torch.meshgrid(taps, taps)
xo, yo = (torch.stack([xi, yi], dim=2) @ mat[:2, :2].t()).unbind(2)
# Convolution of two oriented 2D sinc filters.
fi = sinc(xi * cutoff_in) * sinc(yi * cutoff_in)
fo = sinc(xo * cutoff_out) * sinc(yo * cutoff_out)
f = torch.fft.ifftn(torch.fft.fftn(fi) * torch.fft.fftn(fo)).real
# Convolution of two oriented 2D Lanczos windows.
wi = lanczos_window(xi, a) * lanczos_window(yi, a)
wo = lanczos_window(xo, a) * lanczos_window(yo, a)
w = torch.fft.ifftn(torch.fft.fftn(wi) * torch.fft.fftn(wo)).real
# Construct windowed FIR filter.
f = f * w
# Finalize.
c = (aflt - amax) * up
f = f.roll([aflt * up - 1] * 2, dims=[0,1])[c:-c, c:-c]
f = torch.nn.functional.pad(f, [0, 1, 0, 1]).reshape(amax * 2, up, amax * 2, up)
f = f / f.sum([0,2], keepdim=True) / (up ** 2)
f = f.reshape(amax * 2 * up, amax * 2 * up)[:-1, :-1]
return f
#----------------------------------------------------------------------------
# Apply the given affine transformation to a batch of 2D images.
def apply_affine_transformation(x, mat, up=4, **filter_kwargs):
_N, _C, H, W = x.shape
mat = torch.as_tensor(mat).to(dtype=torch.float32, device=x.device)
# Construct filter.
f = construct_affine_bandlimit_filter(mat, up=up, **filter_kwargs)
assert f.ndim == 2 and f.shape[0] == f.shape[1] and f.shape[0] % 2 == 1
p = f.shape[0] // 2
# Construct sampling grid.
theta = mat.inverse()
theta[:2, 2] *= 2
theta[0, 2] += 1 / up / W
theta[1, 2] += 1 / up / H
theta[0, :] *= W / (W + p / up * 2)
theta[1, :] *= H / (H + p / up * 2)
theta = theta[:2, :3].unsqueeze(0).repeat([x.shape[0], 1, 1])
g = torch.nn.functional.affine_grid(theta, x.shape, align_corners=False)
# Resample image.
y = upfirdn2d.upsample2d(x=x, f=f, up=up, padding=p)
z = torch.nn.functional.grid_sample(y, g, mode='bilinear', padding_mode='zeros', align_corners=False)
# Form mask.
m = torch.zeros_like(y)
c = p * 2 + 1
m[:, :, c:-c, c:-c] = 1
m = torch.nn.functional.grid_sample(m, g, mode='nearest', padding_mode='zeros', align_corners=False)
return z, m
#----------------------------------------------------------------------------
# Apply fractional rotation to a batch of 2D images. Corresponds to the
# operator R_\alpha in Appendix E.3.
def apply_fractional_rotation(x, angle, a=3, **filter_kwargs):
angle = torch.as_tensor(angle).to(dtype=torch.float32, device=x.device)
mat = rotation_matrix(angle)
return apply_affine_transformation(x, mat, a=a, amax=a*2, **filter_kwargs)
#----------------------------------------------------------------------------
# Modify the frequency content of a batch of 2D images as if they had undergo
# fractional rotation -- but without actually rotating them. Corresponds to
# the operator R^*_\alpha in Appendix E.3.
def apply_fractional_pseudo_rotation(x, angle, a=3, **filter_kwargs):
angle = torch.as_tensor(angle).to(dtype=torch.float32, device=x.device)
mat = rotation_matrix(-angle)
f = construct_affine_bandlimit_filter(mat, a=a, amax=a*2, up=1, **filter_kwargs)
y = upfirdn2d.filter2d(x=x, f=f)
m = torch.zeros_like(y)
c = f.shape[0] // 2
m[:, :, c:-c, c:-c] = 1
return y, m
#----------------------------------------------------------------------------
# Compute the selected equivariance metrics for the given generator.
def compute_equivariance_metrics(opts, num_samples, batch_size, translate_max=0.125, rotate_max=1, compute_eqt_int=False, compute_eqt_frac=False, compute_eqr=False):
assert compute_eqt_int or compute_eqt_frac or compute_eqr
# Setup generator and labels.
G = copy.deepcopy(opts.G).eval().requires_grad_(False).to(opts.device)
I = torch.eye(3, device=opts.device)
M = getattr(getattr(getattr(G, 'synthesis', None), 'input', None), 'transform', None)
if M is None:
raise ValueError('Cannot compute equivariance metrics; the given generator does not support user-specified image transformations')
c_iter = metric_utils.iterate_random_labels(opts=opts, batch_size=batch_size)
# Sampling loop.
sums = None
progress = opts.progress.sub(tag='eq sampling', num_items=num_samples)
for batch_start in range(0, num_samples, batch_size * opts.num_gpus):
progress.update(batch_start)
s = []
# Randomize noise buffers, if any.
for name, buf in G.named_buffers():
if name.endswith('.noise_const'):
buf.copy_(torch.randn_like(buf))
# Run mapping network.
z = torch.randn([batch_size, G.z_dim], device=opts.device)
c = next(c_iter)
ws = G.mapping(z=z, c=c)
# Generate reference image.
M[:] = I
orig = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs)
# Integer translation (EQ-T).
if compute_eqt_int:
t = (torch.rand(2, device=opts.device) * 2 - 1) * translate_max
t = (t * G.img_resolution).round() / G.img_resolution
M[:] = I
M[:2, 2] = -t
img = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs)
ref, mask = apply_integer_translation(orig, t[0], t[1])
s += [(ref - img).square() * mask, mask]
# Fractional translation (EQ-T_frac).
if compute_eqt_frac:
t = (torch.rand(2, device=opts.device) * 2 - 1) * translate_max
M[:] = I
M[:2, 2] = -t
img = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs)
ref, mask = apply_fractional_translation(orig, t[0], t[1])
s += [(ref - img).square() * mask, mask]
# Rotation (EQ-R).
if compute_eqr:
angle = (torch.rand([], device=opts.device) * 2 - 1) * (rotate_max * np.pi)
M[:] = rotation_matrix(-angle)
img = G.synthesis(ws=ws, noise_mode='const', **opts.G_kwargs)
ref, ref_mask = apply_fractional_rotation(orig, angle)
pseudo, pseudo_mask = apply_fractional_pseudo_rotation(img, angle)
mask = ref_mask * pseudo_mask
s += [(ref - pseudo).square() * mask, mask]
# Accumulate results.
s = torch.stack([x.to(torch.float64).sum() for x in s])
sums = sums + s if sums is not None else s
progress.update(num_samples)
# Compute PSNRs.
if opts.num_gpus > 1:
torch.distributed.all_reduce(sums)
sums = sums.cpu()
mses = sums[0::2] / sums[1::2]
psnrs = np.log10(2) * 20 - mses.log10() * 10
psnrs = tuple(psnrs.numpy())
return psnrs[0] if len(psnrs) == 1 else psnrs
#----------------------------------------------------------------------------
| 10,982 | 39.677778 | 165 | py |
pix2pix3D | pix2pix3D-main/metrics/perceptual_path_length.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Perceptual Path Length (PPL) from the paper "A Style-Based Generator
Architecture for Generative Adversarial Networks". Matches the original
implementation by Karras et al. at
https://github.com/NVlabs/stylegan/blob/master/metrics/perceptual_path_length.py"""
import copy
import numpy as np
import torch
from . import metric_utils
#----------------------------------------------------------------------------
# Spherical interpolation of a batch of vectors.
def slerp(a, b, t):
a = a / a.norm(dim=-1, keepdim=True)
b = b / b.norm(dim=-1, keepdim=True)
d = (a * b).sum(dim=-1, keepdim=True)
p = t * torch.acos(d)
c = b - d * a
c = c / c.norm(dim=-1, keepdim=True)
d = a * torch.cos(p) + c * torch.sin(p)
d = d / d.norm(dim=-1, keepdim=True)
return d
#----------------------------------------------------------------------------
class PPLSampler(torch.nn.Module):
def __init__(self, G, G_kwargs, epsilon, space, sampling, crop, vgg16):
assert space in ['z', 'w']
assert sampling in ['full', 'end']
super().__init__()
self.G = copy.deepcopy(G)
self.G_kwargs = G_kwargs
self.epsilon = epsilon
self.space = space
self.sampling = sampling
self.crop = crop
self.vgg16 = copy.deepcopy(vgg16)
def forward(self, c):
# Generate random latents and interpolation t-values.
t = torch.rand([c.shape[0]], device=c.device) * (1 if self.sampling == 'full' else 0)
z0, z1 = torch.randn([c.shape[0] * 2, self.G.z_dim], device=c.device).chunk(2)
# Interpolate in W or Z.
if self.space == 'w':
w0, w1 = self.G.mapping(z=torch.cat([z0,z1]), c=torch.cat([c,c])).chunk(2)
wt0 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2))
wt1 = w0.lerp(w1, t.unsqueeze(1).unsqueeze(2) + self.epsilon)
else: # space == 'z'
zt0 = slerp(z0, z1, t.unsqueeze(1))
zt1 = slerp(z0, z1, t.unsqueeze(1) + self.epsilon)
wt0, wt1 = self.G.mapping(z=torch.cat([zt0,zt1]), c=torch.cat([c,c])).chunk(2)
# Randomize noise buffers.
for name, buf in self.G.named_buffers():
if name.endswith('.noise_const'):
buf.copy_(torch.randn_like(buf))
# Generate images.
img = self.G.synthesis(ws=torch.cat([wt0,wt1]), noise_mode='const', force_fp32=True, **self.G_kwargs)
# Center crop.
if self.crop:
assert img.shape[2] == img.shape[3]
c = img.shape[2] // 8
img = img[:, :, c*3 : c*7, c*2 : c*6]
# Downsample to 256x256.
factor = self.G.img_resolution // 256
if factor > 1:
img = img.reshape([-1, img.shape[1], img.shape[2] // factor, factor, img.shape[3] // factor, factor]).mean([3, 5])
# Scale dynamic range from [-1,1] to [0,255].
img = (img + 1) * (255 / 2)
if self.G.img_channels == 1:
img = img.repeat([1, 3, 1, 1])
# Evaluate differential LPIPS.
lpips_t0, lpips_t1 = self.vgg16(img, resize_images=False, return_lpips=True).chunk(2)
dist = (lpips_t0 - lpips_t1).square().sum(1) / self.epsilon ** 2
return dist
#----------------------------------------------------------------------------
def compute_ppl(opts, num_samples, epsilon, space, sampling, crop, batch_size):
vgg16_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/vgg16.pkl'
vgg16 = metric_utils.get_feature_detector(vgg16_url, num_gpus=opts.num_gpus, rank=opts.rank, verbose=opts.progress.verbose)
# Setup sampler and labels.
sampler = PPLSampler(G=opts.G, G_kwargs=opts.G_kwargs, epsilon=epsilon, space=space, sampling=sampling, crop=crop, vgg16=vgg16)
sampler.eval().requires_grad_(False).to(opts.device)
c_iter = metric_utils.iterate_random_labels(opts=opts, batch_size=batch_size)
# Sampling loop.
dist = []
progress = opts.progress.sub(tag='ppl sampling', num_items=num_samples)
for batch_start in range(0, num_samples, batch_size * opts.num_gpus):
progress.update(batch_start)
x = sampler(next(c_iter))
for src in range(opts.num_gpus):
y = x.clone()
if opts.num_gpus > 1:
torch.distributed.broadcast(y, src=src)
dist.append(y)
progress.update(num_samples)
# Compute PPL.
if opts.rank != 0:
return float('nan')
dist = torch.cat(dist)[:num_samples].cpu().numpy()
lo = np.percentile(dist, 1, interpolation='lower')
hi = np.percentile(dist, 99, interpolation='higher')
ppl = np.extract(np.logical_and(dist >= lo, dist <= hi), dist).mean()
return float(ppl)
#----------------------------------------------------------------------------
| 5,370 | 40.960938 | 131 | py |
pix2pix3D | pix2pix3D-main/metrics/metric_main.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Main API for computing and reporting quality metrics."""
import os
import time
import json
import torch
import dnnlib
from . import metric_utils
from . import frechet_inception_distance
from . import kernel_inception_distance
from . import precision_recall
from . import perceptual_path_length
from . import inception_score
from . import equivariance
#----------------------------------------------------------------------------
_metric_dict = dict() # name => fn
def register_metric(fn):
assert callable(fn)
_metric_dict[fn.__name__] = fn
return fn
def is_valid_metric(metric):
return metric in _metric_dict
def list_valid_metrics():
return list(_metric_dict.keys())
#----------------------------------------------------------------------------
def calc_metric(metric, **kwargs): # See metric_utils.MetricOptions for the full list of arguments.
assert is_valid_metric(metric)
opts = metric_utils.MetricOptions(**kwargs)
# Calculate.
start_time = time.time()
results = _metric_dict[metric](opts)
total_time = time.time() - start_time
# Broadcast results.
for key, value in list(results.items()):
if opts.num_gpus > 1:
value = torch.as_tensor(value, dtype=torch.float64, device=opts.device)
torch.distributed.broadcast(tensor=value, src=0)
value = float(value.cpu())
results[key] = value
# Decorate with metadata.
return dnnlib.EasyDict(
results = dnnlib.EasyDict(results),
metric = metric,
total_time = total_time,
total_time_str = dnnlib.util.format_time(total_time),
num_gpus = opts.num_gpus,
)
#----------------------------------------------------------------------------
def report_metric(result_dict, run_dir=None, snapshot_pkl=None):
metric = result_dict['metric']
assert is_valid_metric(metric)
if run_dir is not None and snapshot_pkl is not None:
snapshot_pkl = os.path.relpath(snapshot_pkl, run_dir)
jsonl_line = json.dumps(dict(result_dict, snapshot_pkl=snapshot_pkl, timestamp=time.time()))
print(jsonl_line)
if run_dir is not None and os.path.isdir(run_dir):
with open(os.path.join(run_dir, f'metric-{metric}.jsonl'), 'at') as f:
f.write(jsonl_line + '\n')
#----------------------------------------------------------------------------
# Recommended metrics.
@register_metric
def fid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
fid = frechet_inception_distance.compute_fid(opts, max_real=None, num_gen=50000)
return dict(fid50k_full=fid)
@register_metric
def kid50k_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
kid = kernel_inception_distance.compute_kid(opts, max_real=1000000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k_full=kid)
@register_metric
def pr50k3_full(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
precision, recall = precision_recall.compute_pr(opts, max_real=200000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_full_precision=precision, pr50k3_full_recall=recall)
@register_metric
def ppl2_wend(opts):
ppl = perceptual_path_length.compute_ppl(opts, num_samples=50000, epsilon=1e-4, space='w', sampling='end', crop=False, batch_size=2)
return dict(ppl2_wend=ppl)
@register_metric
def eqt50k_int(opts):
opts.G_kwargs.update(force_fp32=True)
psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqt_int=True)
return dict(eqt50k_int=psnr)
@register_metric
def eqt50k_frac(opts):
opts.G_kwargs.update(force_fp32=True)
psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqt_frac=True)
return dict(eqt50k_frac=psnr)
@register_metric
def eqr50k(opts):
opts.G_kwargs.update(force_fp32=True)
psnr = equivariance.compute_equivariance_metrics(opts, num_samples=50000, batch_size=4, compute_eqr=True)
return dict(eqr50k=psnr)
#----------------------------------------------------------------------------
# Legacy metrics.
@register_metric
def fid50k(opts):
opts.dataset_kwargs.update(max_size=None)
fid = frechet_inception_distance.compute_fid(opts, max_real=50000, num_gen=50000)
return dict(fid50k=fid)
@register_metric
def kid50k(opts):
opts.dataset_kwargs.update(max_size=None)
kid = kernel_inception_distance.compute_kid(opts, max_real=50000, num_gen=50000, num_subsets=100, max_subset_size=1000)
return dict(kid50k=kid)
@register_metric
def pr50k3(opts):
opts.dataset_kwargs.update(max_size=None)
precision, recall = precision_recall.compute_pr(opts, max_real=50000, num_gen=50000, nhood_size=3, row_batch_size=10000, col_batch_size=10000)
return dict(pr50k3_precision=precision, pr50k3_recall=recall)
@register_metric
def is50k(opts):
opts.dataset_kwargs.update(max_size=None, xflip=False)
mean, std = inception_score.compute_is(opts, num_gen=50000, num_splits=10)
return dict(is50k_mean=mean, is50k_std=std)
#----------------------------------------------------------------------------
| 5,789 | 36.115385 | 147 | py |
pix2pix3D | pix2pix3D-main/metrics/precision_recall.py | # SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
"""Precision/Recall (PR) from the paper "Improved Precision and Recall
Metric for Assessing Generative Models". Matches the original implementation
by Kynkaanniemi et al. at
https://github.com/kynkaat/improved-precision-and-recall-metric/blob/master/precision_recall.py"""
import torch
from . import metric_utils
#----------------------------------------------------------------------------
def compute_distances(row_features, col_features, num_gpus, rank, col_batch_size):
assert 0 <= rank < num_gpus
num_cols = col_features.shape[0]
num_batches = ((num_cols - 1) // col_batch_size // num_gpus + 1) * num_gpus
col_batches = torch.nn.functional.pad(col_features, [0, 0, 0, -num_cols % num_batches]).chunk(num_batches)
dist_batches = []
for col_batch in col_batches[rank :: num_gpus]:
dist_batch = torch.cdist(row_features.unsqueeze(0), col_batch.unsqueeze(0))[0]
for src in range(num_gpus):
dist_broadcast = dist_batch.clone()
if num_gpus > 1:
torch.distributed.broadcast(dist_broadcast, src=src)
dist_batches.append(dist_broadcast.cpu() if rank == 0 else None)
return torch.cat(dist_batches, dim=1)[:, :num_cols] if rank == 0 else None
#----------------------------------------------------------------------------
def compute_pr(opts, max_real, num_gen, nhood_size, row_batch_size, col_batch_size):
detector_url = 'https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/metrics/vgg16.pkl'
detector_kwargs = dict(return_features=True)
real_features = metric_utils.compute_feature_stats_for_dataset(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=0, capture_all=True, max_items=max_real).get_all_torch().to(torch.float16).to(opts.device)
gen_features = metric_utils.compute_feature_stats_for_generator(
opts=opts, detector_url=detector_url, detector_kwargs=detector_kwargs,
rel_lo=0, rel_hi=1, capture_all=True, max_items=num_gen).get_all_torch().to(torch.float16).to(opts.device)
results = dict()
for name, manifold, probes in [('precision', real_features, gen_features), ('recall', gen_features, real_features)]:
kth = []
for manifold_batch in manifold.split(row_batch_size):
dist = compute_distances(row_features=manifold_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
kth.append(dist.to(torch.float32).kthvalue(nhood_size + 1).values.to(torch.float16) if opts.rank == 0 else None)
kth = torch.cat(kth) if opts.rank == 0 else None
pred = []
for probes_batch in probes.split(row_batch_size):
dist = compute_distances(row_features=probes_batch, col_features=manifold, num_gpus=opts.num_gpus, rank=opts.rank, col_batch_size=col_batch_size)
pred.append((dist <= kth).any(dim=1) if opts.rank == 0 else None)
results[name] = float(torch.cat(pred).to(torch.float32).mean() if opts.rank == 0 else 'nan')
return results['precision'], results['recall']
#----------------------------------------------------------------------------
| 3,758 | 56.830769 | 159 | py |
pix2pix3D | pix2pix3D-main/applications/extract_mesh.py | import sys
sys.path.append('./')
import os
import re
from typing import List, Optional, Tuple, Union
import click
import dnnlib
import numpy as np
import PIL.Image
import torch
from tqdm import tqdm
import legacy
from camera_utils import LookAtPoseSampler
from matplotlib import pyplot as plt
from pathlib import Path
import json
from training.utils import color_mask, color_list
from tqdm import tqdm
import imageio
import argparse
import trimesh
import pyrender
import mcubes
os.environ["PYOPENGL_PLATFORM"] = "egl"
def init_conditional_dataset_kwargs(data, mask_data, data_type, resolution=None):
try:
if data_type =='seg':
dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageSegFolderDataset', path=data, mask_path=mask_data, data_type=data_type, use_labels=True, max_size=None, xflip=False, resolution=resolution)
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs) # Subclass of training.dataset.Dataset.
dataset_kwargs.resolution = dataset_obj.resolution # Be explicit about resolution.
dataset_kwargs.use_labels = dataset_obj.has_labels # Be explicit about labels.
dataset_kwargs.max_size = len(dataset_obj) # Be explicit about dataset size.
return dataset_kwargs, dataset_obj.name
elif data_type == 'edge':
dataset_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageEdgeFolderDataset', path=data, mask_path=mask_data, data_type=data_type, use_labels=True, max_size=None, xflip=False)
dataset_obj = dnnlib.util.construct_class_by_name(**dataset_kwargs) # Subclass of training.dataset.Dataset.
dataset_kwargs.resolution = dataset_obj.resolution # Be explicit about resolution.
dataset_kwargs.use_labels = dataset_obj.has_labels # Be explicit about labels.
dataset_kwargs.max_size = len(dataset_obj) # Be explicit about dataset size.
return dataset_kwargs, dataset_obj.name
else:
raise click.ClickException(f'Unknown data_type: {data_type}')
except IOError as err:
raise click.ClickException(f'--data: {err}')
def get_sigma_field_np(nerf, styles, resolution=512, block_resolution=64):
# return numpy array of forwarded sigma value
# bound = (nerf.rendering_kwargs['ray_end'] - nerf.rendering_kwargs['ray_start']) * 0.5
bound = nerf.rendering_kwargs['box_warp'] * 0.5
X = torch.linspace(-bound, bound, resolution).split(block_resolution)
sigma_np = np.zeros([resolution, resolution, resolution], dtype=np.float32)
for xi, xs in enumerate(X):
for yi, ys in enumerate(X):
for zi, zs in enumerate(X):
xx, yy, zz = torch.meshgrid(xs, ys, zs)
pts = torch.stack([xx, yy, zz], dim=-1).unsqueeze(0).to(styles.device) # B, H, H, H, C
block_shape = [1, len(xs), len(ys), len(zs)]
out = nerf.sample_mixed(pts.reshape(1,-1,3), None, ws=styles, noise_mode='const')
feat_out, sigma_out = out['rgb'], out['sigma']
sigma_np[xi * block_resolution: xi * block_resolution + len(xs), \
yi * block_resolution: yi * block_resolution + len(ys), \
zi * block_resolution: zi * block_resolution + len(zs)] = sigma_out.reshape(block_shape[1:]).detach().cpu().numpy()
# print(feat_out.shape)
return sigma_np, bound
def extract_geometry(nerf, styles, resolution, threshold):
# print('threshold: {}'.format(threshold))
u, bound = get_sigma_field_np(nerf, styles, resolution)
vertices, faces = mcubes.marching_cubes(u, threshold)
# vertices, faces, normals, values = skimage.measure.marching_cubes(
# u, level=10
# )
b_min_np = np.array([-bound, -bound, -bound])
b_max_np = np.array([ bound, bound, bound])
vertices = vertices / (resolution - 1.0) * (b_max_np - b_min_np)[None, :] + b_min_np[None, :]
return vertices.astype('float32'), faces
def main():
# Parse arguments
parser = argparse.ArgumentParser(description='Generate samples from a trained model')
parser.add_argument('--network', help='Path to the network pickle file', required=True)
parser.add_argument('--outdir', help='Directory to save the output', required=True)
parser.add_argument('--input_id', type=int, default=0, help='Input label map id', required=False)
parser.add_argument('--data_dir', default='data/', help='Directory to the data', required=False)
parser.add_argument('--input', help='input label map', required=False)
parser.add_argument('--cfg', help='Base Configuration: seg2face, seg2cat, edge2car', required=True)
args = parser.parse_args()
device = 'cuda'
# Load the network
with dnnlib.util.open_url(args.network) as f:
G = legacy.load_network_pkl(f)['G_ema'].eval().to(device)
if args.cfg == 'seg2cat' or args.cfg == 'seg2face':
neural_rendering_resolution = 128
pitch_range, yaw_range = 0.25, 0.35
data_type = 'seg'
# Initialize pose sampler.
forward_cam2world_pose = LookAtPoseSampler.sample(3.14/2, 3.14/2, torch.tensor(G.rendering_kwargs['avg_camera_pivot'], device=device),
radius=G.rendering_kwargs['avg_camera_radius'], device=device)
focal_length = 4.2647 # shapenet has higher FOV
intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device)
forward_pose = torch.cat([forward_cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
elif args.cfg == 'edge2car':
neural_rendering_resolution = 64
pitch_range, yaw_range = np.pi / 2, np.pi
data_type= 'edge'
forward_cam2world_pose = LookAtPoseSampler.sample(3.14/2, 3.14/2, torch.tensor(G.rendering_kwargs['avg_camera_pivot'], device=device),
radius=G.rendering_kwargs['avg_camera_radius'], device=device)
focal_length = 1.7074 # shapenet has higher FOV
intrinsics = torch.tensor([[focal_length, 0, 0.5], [0, focal_length, 0.5], [0, 0, 1]], device=device)
forward_pose = torch.cat([forward_cam2world_pose.reshape(-1, 16), intrinsics.reshape(-1, 9)], 1)
else:
print('Invalid cfg')
return
save_dir = Path(args.outdir)
# Load the input label map
if args.input is not None:
input_label = PIL.Image.open(args.input)
if args.cfg == 'seg2cat' or args.cfg == 'seg2face':
input_label = np.array(input_label).astype(np.uint8)
input_label = torch.from_numpy(input_label).unsqueeze(0).unsqueeze(0).to(device)
# Save the visualized input label map
PIL.Image.fromarray(color_mask(input_label[0,0].cpu().numpy()).astype(np.uint8)).save(save_dir / f'{args.cfg}_input.png')
elif args.cfg == 'edge2car':
input_label = np.array(input_label).astype(np.float32)[..., 0]
input_label = -(torch.tensor(input_label).to(torch.float32) / 127.5 - 1).unsqueeze(0).unsqueeze(0).to(device)
input_pose = forward_pose.to(device)
elif args.input_id is not None:
if args.cfg == 'seg2cat':
data_path = Path(args.data_dir) / 'afhq_v2_train_cat_512.zip'
mask_data = Path(args.data_dir) / 'afhqcat_seg_6c.zip'
elif args.cfg == 'edge2car':
data_path = Path(args.data_dir) / 'cars_128.zip'
mask_data = Path(args.data_dir) / 'shapenet_car_contour.zip'
elif args.cfg == 'seg2face':
data_path = Path(args.data_dir) / 'celebamask_test.zip'
mask_data = Path(args.data_dir) / 'celebamask_test_label.zip'
dataset_kwargs, dataset_name = init_conditional_dataset_kwargs(str(data_path), str(mask_data), data_type)
dataset = dnnlib.util.construct_class_by_name(**dataset_kwargs)
batch = dataset[args.input_id]
save_dir = Path(args.outdir)
# Save the input label map
if args.cfg == 'seg2cat' or args.cfg == 'seg2face':
PIL.Image.fromarray(color_mask(batch['mask'][0]).astype(np.uint8)).save(save_dir / f'{args.cfg}_{args.input_id}_input.png')
elif args.cfg == 'edge2car':
PIL.Image.fromarray((255 - batch['mask'][0]).astype(np.uint8)).save(save_dir / f'{args.cfg}_{args.input_id}_input.png')
input_pose = torch.tensor(batch['pose']).unsqueeze(0).to(device)
if args.cfg == 'seg2cat' or args.cfg == 'seg2face':
input_label = torch.tensor(batch['mask']).unsqueeze(0).to(device)
elif args.cfg == 'edge2car':
input_label = -(torch.tensor(batch['mask']).to(torch.float32) / 127.5 - 1).unsqueeze(0).to(device)
# Generate videos
z = torch.from_numpy(np.random.RandomState(int(0)).randn(1, G.z_dim).astype('float32')).to(device)
with torch.no_grad():
ws = G.mapping(z, input_pose, {'mask': input_label, 'pose': input_pose})
mesh_trimesh = trimesh.Trimesh(*extract_geometry(G, ws, resolution=512, threshold=50.))
if args.cfg == 'seg2cat' or args.cfg == 'seg2face':
verts_np = np.array(mesh_trimesh.vertices)
colors = torch.zeros((verts_np.shape[0], 3), device=device)
semantic_colors = torch.zeros((verts_np.shape[0], 6), device=device)
samples_color = torch.tensor(verts_np, device=device).unsqueeze(0).float()
head = 0
max_batch = 10000000
with tqdm(total = verts_np.shape[0]) as pbar:
with torch.no_grad():
while head < verts_np.shape[0]:
torch.manual_seed(0)
out = G.sample_mixed(samples_color[:, head:head+max_batch], None, ws, truncation_psi=1, noise_mode='const')
# sigma = out['sigma']
colors[head:head+max_batch, :] = out['rgb'][0,:,:3]
seg = out['rgb'][0, :, 32:32+6]
semantic_colors[head:head+max_batch, :] = seg
# semantics[:, head:head+max_batch] = out['semantic']
head += max_batch
pbar.update(max_batch)
semantic_colors = torch.tensor(color_list)[torch.argmax(semantic_colors, dim=-1)]
mesh_trimesh.visual.vertex_colors = semantic_colors.cpu().numpy().astype(np.uint8)
# Save mesh.
mesh_trimesh.export(os.path.join(save_dir, f'semantic_mesh.ply'))
elif args.cfg == 'edge2car':
# Save mesh.
mesh_trimesh.export(os.path.join(save_dir, f'{args.cfg}_mesh.ply'))
mesh = pyrender.Mesh.from_trimesh(mesh_trimesh)
light = pyrender.SpotLight(color=np.ones(3), intensity=3.0,
innerConeAngle=np.pi/4)
r = pyrender.OffscreenRenderer(512, 512)
if args.cfg == 'seg2cat' or args.cfg == 'seg2face':
camera = pyrender.OrthographicCamera(xmag=0.3, ymag=0.3)
elif args.cfg == 'edge2car':
camera = pyrender.OrthographicCamera(xmag=0.6, ymag=0.6)
frames_mesh = []
num_frames = 120
for frame_idx in tqdm(range(num_frames)):
scene = pyrender.Scene()
scene.add(mesh)
if args.cfg == 'seg2cat' or args.cfg == 'seg2face':
camera_pose = LookAtPoseSampler.sample(3.14/2 + yaw_range * np.sin(2 * 3.14 * frame_idx / num_frames),
3.14/2 -0.05 + pitch_range * np.cos(2 * 3.14 * frame_idx / num_frames),
torch.tensor(G.rendering_kwargs['avg_camera_pivot'], device=device), radius=1, device=device)
elif args.cfg == 'edge2car':
camera_pose = LookAtPoseSampler.sample(-3.14/2 + yaw_range * np.sin(2 * 3.14 * frame_idx / num_frames),
3.14/2 -0.05 + pitch_range * np.cos(2 * 3.14 * frame_idx / num_frames),
torch.tensor(G.rendering_kwargs['avg_camera_pivot'], device=device), radius=1.2, device=device)
camera_pose = camera_pose.reshape(4, 4).cpu().numpy().copy()
camera_pose[:, 1] = -camera_pose[:, 1]
camera_pose[:, 2] = -camera_pose[:, 2]
scene.add(camera, pose=camera_pose)
scene.add(light, pose=camera_pose)
color, depth = r.render(scene)
frames_mesh.append(color)
imageio.mimsave(os.path.join(save_dir, f'rendered_mesh.gif'), frames_mesh, fps=60)
r.delete()
if __name__ == '__main__':
main() | 12,502 | 45.827715 | 218 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.