repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/mlp_attention.py | import torch
from torch import nn
from torch.nn import init
from src.module.attention.attention import Attention
class MlpAttention(Attention):
def __init__(self, query_size, key_size, out_size=100, dropout=0):
super(MlpAttention, self).__init__(dropout)
self.query_projection = nn.Linear(query_size, out_size)
self.key_projection = nn.Linear(key_size, out_size)
self.v = nn.Parameter(torch.FloatTensor(out_size, 1))
init.xavier_uniform_(self.v)
def _score(self, query, key):
"""
query: FloatTensor (batch_size, num_queries, query_size)
key: FloatTensor (batch_size, time_step, key_size)
"""
batch_size, num_queries, time_step, out_size = query.size(0), query.size(1), key.size(1), self.v.size(0)
query = self.query_projection(query).unsqueeze(-1).expand(batch_size, num_queries, time_step, out_size)
key = self.key_projection(key).unsqueeze(1).expand(batch_size, num_queries, time_step, out_size)
score = torch.tanh(query + key).matmul(self.v).squeeze(-1)
return score | 1,092 | 44.541667 | 112 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/attention/no_query_attention.py | import torch
from torch import nn
from torch.nn import init
class NoQueryAttention(nn.Module):
def __init__(self, query_size, attention):
super(NoQueryAttention, self).__init__()
self.query_size = query_size
self.query = nn.Parameter(torch.Tensor(1, query_size))
init.xavier_uniform_(self.query)
self.attention = attention
def forward(self, key, value, mask=None):
batch_size = key.size(0)
query = self.query.expand(batch_size, self.query_size)
return self.attention(query, key, value, mask) | 566 | 32.352941 | 62 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/utils/squash.py | import torch
def squash(x, dim=-1):
squared = torch.sum(x * x, dim=dim, keepdim=True)
scale = torch.sqrt(squared) / (1.0 + squared)
return scale * x | 161 | 26 | 53 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/src/module/utils/loss.py | import torch
from torch import nn
import torch.nn.functional as F
class CapsuleLoss(nn.Module):
def __init__(self, smooth=0.1, lamda=0.6):
super(CapsuleLoss, self).__init__()
self.smooth = smooth
self.lamda = lamda
def forward(self, input, target):
one_hot = torch.zeros_like(input).to(input.device)
one_hot = one_hot.scatter(1, target.unsqueeze(-1), 1)
a = torch.max(torch.zeros_like(input).to(input.device), 1 - self.smooth - input)
b = torch.max(torch.zeros_like(input).to(input.device), input - self.smooth)
loss = one_hot * a * a + self.lamda * (1 - one_hot) * b * b
loss = loss.sum(dim=1, keepdim=False)
return loss.mean()
# CrossEntropyLoss for Label Smoothing Regularization
class CrossEntropyLoss_LSR(nn.Module):
def __init__(self, para_LSR=0.2):
super(CrossEntropyLoss_LSR, self).__init__()
self.para_LSR = para_LSR
self.logSoftmax = nn.LogSoftmax(dim=-1)
def _toOneHot_smooth(self, label, batchsize, classes):
prob = self.para_LSR * 1.0 / classes
one_hot_label = torch.zeros(batchsize, classes) + prob
for i in range(batchsize):
index = label[i]
one_hot_label[i, index] += (1.0 - self.para_LSR)
return one_hot_label
def forward(self, pre, label, size_average=True):
b, c = pre.size()
one_hot_label = self._toOneHot_smooth(label, b, c).to(pre.device)
loss = torch.sum(-one_hot_label * self.logSoftmax(pre), dim=1)
if size_average:
return torch.mean(loss)
else:
return torch.sum(loss)
class SmoothCrossEntropy(nn.Module):
def __init__(self, smooth=0.08):
super(SmoothCrossEntropy, self).__init__()
self.kldiv = nn.KLDivLoss()
self.smooth = smooth
def forward(self, input, target):
one_hot = torch.zeros_like(input).to(input.device)
one_hot = one_hot.scatter(1, target.unsqueeze(-1), 1)
target = (1 - self.smooth) * one_hot + self.smooth / (input.size(1) - 1) * (1 - one_hot)
# target = target + torch.rand_like(target).to(target.device) * 0.001
input = input - input.max(dim=1, keepdim=True)[0]
loss = -target * F.log_softmax(input, dim=-1)
return loss.mean() | 2,309 | 38.152542 | 96 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/test.py | import torch
import os
from train import make_aspect_term_model, make_aspect_category_model
from train.make_data import make_term_test_data, make_category_test_data
from train.eval import eval
def test(config):
mode = config['mode']
if mode == 'term':
model = make_aspect_term_model.make_model(config)
else:
model = make_aspect_category_model.make_model(config)
model = model.cuda()
model_path = os.path.join(config['base_path'], 'checkpoints/%s.pth' % config['aspect_' + mode + '_model']['type'])
model.load_state_dict(torch.load(model_path))
if mode == 'term':
test_loader = make_term_test_data(config)
else:
test_loader = make_category_test_data(config)
test_accuracy = eval(model, test_loader)
print('test:\taccuracy: %.4f' % (test_accuracy)) | 819 | 38.047619 | 118 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/make_aspect_term_model.py | import torch
from torch import nn
import numpy as np
import os
import yaml
from pytorch_pretrained_bert import BertModel
from src.aspect_term_model.recurrent_capsnet import RecurrentCapsuleNetwork
from src.aspect_term_model.bert_capsnet import BertCapsuleNetwork
def make_model(config):
model_type = config['aspect_term_model']['type']
if model_type == 'recurrent_capsnet':
return make_recurrent_capsule_network(config)
elif model_type == 'bert_capsnet':
return make_bert_capsule_network(config)
else:
raise ValueError('No Supporting.')
def make_bert_capsule_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_term_model'][config['aspect_term_model']['type']]
bert = BertModel.from_pretrained('bert-base-uncased')
model = BertCapsuleNetwork(
bert=bert,
bert_size=config['bert_size'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_recurrent_capsule_network(config):
embedding = make_embedding(config)
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_term_model'][config['aspect_term_model']['type']]
model = RecurrentCapsuleNetwork(
embedding=embedding,
num_layers=config['num_layers'],
capsule_size=config['capsule_size'],
bidirectional=config['bidirectional'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_embedding(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
vocab_size = log['vocab_size']
config = config['aspect_term_model'][config['aspect_term_model']['type']]
embed_size = config['embed_size']
embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size)
glove = np.load(os.path.join(base_path, 'processed/glove.npy'))
embedding.weight.data.copy_(torch.tensor(glove))
return embedding | 2,462 | 38.725806 | 83 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/make_data.py | import os
from torch.utils.data import DataLoader
from data_process.dataset import ABSADataset
input_list = {
'recurrent_capsnet': ['context', 'aspect'],
'bert_capsnet': ['bert_token', 'bert_segment']
}
def make_term_data(config):
base_path = config['base_path']
train_path = os.path.join(base_path, 'processed/train.npz')
val_path = os.path.join(base_path, 'processed/val.npz')
train_data = ABSADataset(train_path, input_list[config['aspect_term_model']['type']])
val_data = ABSADataset(val_path, input_list[config['aspect_term_model']['type']])
config = config['aspect_term_model'][config['aspect_term_model']['type']]
train_loader = DataLoader(
dataset=train_data,
batch_size=config['batch_size'],
shuffle=True,
pin_memory=True
)
val_loader = DataLoader(
dataset=val_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return train_loader, val_loader
def make_term_test_data(config):
base_path = config['base_path']
test_path = os.path.join(base_path, 'processed/test.npz')
test_data = ABSADataset(test_path, input_list[config['aspect_term_model']['type']])
config = config['aspect_term_model'][config['aspect_term_model']['type']]
test_loader = DataLoader(
dataset=test_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return test_loader
def make_category_data(config):
model_type = config['aspect_category_model']['type']
if 'bert' in model_type:
i_list = ['bert_token', 'bert_segment']
else:
i_list = ['sentence', 'aspect']
base_path = config['base_path']
train_path = os.path.join(base_path, 'processed/train.npz')
val_path = os.path.join(base_path, 'processed/val.npz')
train_data = ABSADataset(train_path, i_list)
val_data = ABSADataset(val_path, i_list)
config = config['aspect_category_model'][config['aspect_category_model']['type']]
train_loader = DataLoader(
dataset=train_data,
batch_size=config['batch_size'],
shuffle=True,
pin_memory=True
)
val_loader = DataLoader(
dataset=val_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return train_loader, val_loader
def make_category_test_data(config):
model_type = config['aspect_category_model']['type']
if 'bert' in model_type:
i_list = ['bert_token', 'bert_segment']
else:
i_list = ['sentence', 'aspect']
base_path = config['base_path']
test_path = os.path.join(base_path, 'processed/test.npz')
test_data = ABSADataset(test_path, i_list)
config = config['aspect_category_model'][config['aspect_category_model']['type']]
test_loader = DataLoader(
dataset=test_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return test_loader
def make_distill_data(config):
base_path = config['base_path']
train_path = os.path.join(base_path, 'processed/train.npz')
val_path = os.path.join(base_path, 'processed/val.npz')
train_data = ABSADataset(train_path, ['context', 'aspect', 'bert_token', 'bert_segment'])
val_data = ABSADataset(val_path, input_list[config['aspect_term_model']['type']])
config = config['aspect_term_model'][config['aspect_term_model']['type']]
train_loader = DataLoader(
dataset=train_data,
batch_size=config['batch_size'],
shuffle=True,
pin_memory=True
)
val_loader = DataLoader(
dataset=val_data,
batch_size=config['batch_size'],
shuffle=False,
pin_memory=True
)
return train_loader, val_loader | 3,771 | 34.252336 | 93 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/make_optimizer.py | from torch import optim
import adabound
def make_optimizer(config, model):
mode = config['mode']
config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
lr = config['learning_rate']
weight_decay = config['weight_decay']
opt = {
'sgd': optim.SGD,
'adadelta': optim.Adadelta,
'adam': optim.Adam,
'adamax': optim.Adamax,
'adagrad': optim.Adagrad,
'asgd': optim.ASGD,
'rmsprop': optim.RMSprop,
'adabound': adabound.AdaBound
}
if 'momentum' in config:
optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay, momentum=config['momentum'])
else:
optimizer = opt[config['optimizer']](model.parameters(), lr=lr, weight_decay=weight_decay)
return optimizer | 831 | 35.173913 | 127 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/eval.py | import torch
def eval(model, data_loader, criterion=None):
total_samples = 0
correct_samples = 0
total_loss = 0
model.eval()
with torch.no_grad():
for data in data_loader:
input0, input1, label = data
input0, input1, label = input0.cuda(), input1.cuda(), label.cuda()
logit = model(input0, input1)
loss = criterion(logit, label).item() if criterion is not None else 0
total_samples += input0.size(0)
pred = logit.argmax(dim=1)
correct_samples += (label == pred).long().sum().item()
total_loss += loss * input0.size(0)
accuracy = correct_samples / total_samples
avg_loss = total_loss / total_samples
if criterion is not None:
return accuracy, avg_loss
else:
return accuracy | 829 | 35.086957 | 81 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/make_aspect_category_model.py | import torch
from torch import nn
import numpy as np
import os
import yaml
from pytorch_pretrained_bert import BertModel
from src.aspect_category_model.recurrent_capsnet import RecurrentCapsuleNetwork
from src.aspect_category_model.bert_capsnet import BertCapsuleNetwork
def make_model(config):
model_type = config['aspect_category_model']['type']
if model_type == 'recurrent_capsnet':
return make_recurrent_capsule_network(config)
elif model_type == 'bert_capsnet':
return make_bert_capsule_network(config)
else:
raise ValueError('No Supporting.')
def make_bert_capsule_network(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_category_model'][config['aspect_category_model']['type']]
bert = BertModel.from_pretrained('bert-base-uncased')
model = BertCapsuleNetwork(
bert=bert,
bert_size=config['bert_size'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_recurrent_capsule_network(config):
embedding = make_embedding(config)
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
config = config['aspect_category_model'][config['aspect_category_model']['type']]
aspect_embedding = nn.Embedding(num_embeddings=8, embedding_dim=config['embed_size'])
model = RecurrentCapsuleNetwork(
embedding=embedding,
aspect_embedding=aspect_embedding,
num_layers=config['num_layers'],
bidirectional=config['bidirectional'],
capsule_size=config['capsule_size'],
dropout=config['dropout'],
num_categories=log['num_categories']
)
model.load_sentiment(os.path.join(base_path, 'processed/sentiment_matrix.npy'))
return model
def make_embedding(config):
base_path = os.path.join(config['base_path'])
log_path = os.path.join(base_path, 'log/log.yml')
log = yaml.safe_load(open(log_path))
vocab_size = log['vocab_size']
config = config['aspect_category_model'][config['aspect_category_model']['type']]
embed_size = config['embed_size']
embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=embed_size)
glove = np.load(os.path.join(base_path, 'processed/glove.npy'))
embedding.weight.data.copy_(torch.tensor(glove))
return embedding | 2,631 | 40.125 | 89 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/train/train.py | import torch
from torch import nn
from torch import optim
from train import make_aspect_term_model, make_aspect_category_model
from train.make_data import make_term_data, make_category_data
from train.make_optimizer import make_optimizer
from train.eval import eval
import os
import time
import pickle
from src.module.utils.loss import CapsuleLoss
# torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
def train(config):
mode = config['mode']
if mode == 'term':
model = make_aspect_term_model.make_model(config)
train_loader, val_loader = make_term_data(config)
else:
model = make_aspect_category_model.make_model(config)
train_loader, val_loader = make_category_data(config)
model = model.cuda()
base_path = config['base_path']
model_path = os.path.join(base_path, 'checkpoints/%s.pth' % config['aspect_' + mode + '_model']['type'])
if not os.path.exists(os.path.dirname(model_path)):
os.makedirs(os.path.dirname(model_path))
with open(os.path.join(base_path, 'processed/index2word.pickle'), 'rb') as handle:
index2word = pickle.load(handle)
criterion = CapsuleLoss()
optimizer = make_optimizer(config, model)
max_val_accuracy = 0
min_val_loss = 100
global_step = 0
config = config['aspect_' + mode + '_model'][config['aspect_' + mode + '_model']['type']]
for epoch in range(config['num_epoches']):
total_loss = 0
total_samples = 0
correct_samples = 0
start = time.time()
for i, data in enumerate(train_loader):
global_step += 1
model.train()
input0, input1, label = data
input0, input1, label = input0.cuda(), input1.cuda(), label.cuda()
optimizer.zero_grad()
logit = model(input0, input1)
loss = criterion(logit, label)
batch_size = input0.size(0)
total_loss += batch_size * loss.item()
total_samples += batch_size
pred = logit.argmax(dim=1)
correct_samples += (label == pred).long().sum().item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
if i % 10 == 0 and i > 0:
train_loss = total_loss / total_samples
train_accuracy = correct_samples / total_samples
total_loss = 0
total_samples = 0
correct_samples = 0
val_accuracy, val_loss = eval(model, val_loader, criterion)
print('[epoch %2d] [step %3d] train_loss: %.4f train_acc: %.4f val_loss: %.4f val_acc: %.4f'
% (epoch, i, train_loss, train_accuracy, val_loss, val_accuracy))
if val_accuracy > max_val_accuracy:
max_val_accuracy = val_accuracy
# torch.save(aspect_term_model.state_dict(), model_path)
if val_loss < min_val_loss:
min_val_loss = val_loss
if epoch > 0:
torch.save(model.state_dict(), model_path)
end = time.time()
print('time: %.4fs' % (end - start))
print('max_val_accuracy:', max_val_accuracy) | 3,271 | 42.052632 | 108 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/data_process/utils.py | import os
import numpy as np
import random
from xml.etree.ElementTree import parse
from pytorch_pretrained_bert import BertModel, BertTokenizer
from data_process.vocab import Vocab
from src.module.utils.constants import UNK, PAD_INDEX, ASPECT_INDEX
import spacy
import re
import json
url = re.compile('(<url>.*</url>)')
spacy_en = spacy.load('en')
def check(x):
return len(x) >= 1 and not x.isspace()
def tokenizer(text):
tokens = [tok.text for tok in spacy_en.tokenizer(url.sub('@URL@', text))]
return list(filter(check, tokens))
bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
def parse_sentence_term(path, lowercase=False):
tree = parse(path)
sentences = tree.getroot()
data = []
split_char = '__split__'
for sentence in sentences:
text = sentence.find('text')
if text is None:
continue
text = text.text
if lowercase:
text = text.lower()
aspectTerms = sentence.find('aspectTerms')
if aspectTerms is None:
continue
for aspectTerm in aspectTerms:
term = aspectTerm.get('term')
if lowercase:
term = term.lower()
polarity = aspectTerm.get('polarity')
start = aspectTerm.get('from')
end = aspectTerm.get('to')
piece = text + split_char + term + split_char + polarity + split_char + start + split_char + end
data.append(piece)
return data
def parse_sentence_category(path, lowercase=False):
tree = parse(path)
sentences = tree.getroot()
data = []
split_char = '__split__'
for sentence in sentences:
text = sentence.find('text')
if text is None:
continue
text = text.text
if lowercase:
text = text.lower()
aspectCategories = sentence.find('aspectCategories')
if aspectCategories is None:
continue
for aspectCategory in aspectCategories:
category = aspectCategory.get('category')
polarity = aspectCategory.get('polarity')
piece = text + split_char + category + split_char + polarity
data.append(piece)
return data
def category_filter(data, remove_list):
remove_set = set(remove_list)
filtered_data = []
for text in data:
if not text.split('__split__')[2] in remove_set:
filtered_data.append(text)
return filtered_data
def build_vocab(data, max_size, min_freq):
if max_size == 'None':
max_size = None
vocab = Vocab()
for piece in data:
text = piece.split('__split__')[0]
text = tokenizer(text)
vocab.add_list(text)
return vocab.get_vocab(max_size=max_size, min_freq=min_freq)
def save_term_data(data, word2index, path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
sentence = []
aspect = []
label = []
context = []
bert_token = []
bert_segment = []
td_left = []
td_right = []
f = lambda x: word2index[x] if x in word2index else word2index[UNK]
g = lambda x: list(map(f, tokenizer(x)))
d = {
'positive': 0,
'negative': 1,
'neutral': 2,
'conflict': 3
}
for piece in data:
text, term, polarity, start, end = piece.split('__split__')
start, end = int(start), int(end)
assert text[start: end] == term
sentence.append(g(text))
aspect.append(g(term))
label.append(d[polarity])
left_part = g(text[:start])
right_part = g(text[end:])
context.append(left_part + [ASPECT_INDEX] + right_part)
bert_sentence = bert_tokenizer.tokenize(text)
bert_aspect = bert_tokenizer.tokenize(term)
bert_token.append(bert_tokenizer.convert_tokens_to_ids(['[CLS]'] + bert_sentence + ['[SEP]'] + bert_aspect + ['[SEP]']))
bert_segment.append([0] * (len(bert_sentence) + 2) + [1] * (len(bert_aspect) + 1))
td_left.append(g(text[:end]))
td_right.append(g(text[start:])[::-1])
assert len(bert_token[-1]) == len(bert_segment[-1])
max_length = lambda x: max([len(y) for y in x])
sentence_max_len = max_length(sentence)
aspect_max_len = max_length(aspect)
context_max_len = max_length(context)
bert_max_len = max_length(bert_token)
td_left_max_len = max_length(td_left)
td_right_max_len = max_length(td_right)
num = len(data)
for i in range(num):
sentence[i].extend([0] * (sentence_max_len - len(sentence[i])))
aspect[i].extend([0] * (aspect_max_len - len(aspect[i])))
context[i].extend([0] * (context_max_len - len(context[i])))
bert_token[i].extend([0] * (bert_max_len - len(bert_token[i])))
bert_segment[i].extend([0] * (bert_max_len - len(bert_segment[i])))
td_left[i].extend([0] * (td_left_max_len - len(td_left[i])))
td_right[i].extend([0] * (td_right_max_len - len(td_right[i])))
sentence = np.asarray(sentence, dtype=np.int32)
aspect = np.asarray(aspect, dtype=np.int32)
label = np.asarray(label, dtype=np.int32)
context = np.asarray(context, dtype=np.int32)
bert_token = np.asarray(bert_token, dtype=np.int32)
bert_segment = np.asarray(bert_segment, dtype=np.int32)
td_left = np.asarray(td_left, dtype=np.int32)
td_right = np.asarray(td_right, dtype=np.int32)
np.savez(path, sentence=sentence, aspect=aspect, label=label, context=context, bert_token=bert_token, bert_segment=bert_segment,
td_left=td_left, td_right=td_right)
def save_category_data(data, word2index, path):
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
sentence = []
aspect = []
label = []
bert_token = []
bert_segment = []
f = lambda x: word2index[x] if x in word2index else word2index[UNK]
g = lambda x: list(map(f, tokenizer(x)))
d = {
'positive': 0,
'negative': 1,
'neutral': 2,
'conflict': 3
}
cd = {
'food': 0,
'service': 1,
'staff': 2,
'price': 3,
'ambience': 4,
'menu': 5,
'place': 6,
'miscellaneous': 7
}
for piece in data:
text, category, polarity = piece.split('__split__')
sentence.append(g(text))
aspect.append(cd[category])
label.append(d[polarity])
bert_sentence = bert_tokenizer.tokenize(text)
bert_aspect = bert_tokenizer.tokenize(category)
bert_token.append(bert_tokenizer.convert_tokens_to_ids(['[CLS]'] + bert_sentence + ['[SEP]'] + bert_aspect + ['[SEP]']))
bert_segment.append([0] * (len(bert_sentence) + 2) + [1] * (len(bert_aspect) + 1))
assert len(bert_token[-1]) == len(bert_segment[-1])
max_length = lambda x: max([len(y) for y in x])
sentence_max_len = max_length(sentence)
bert_max_len = max_length(bert_token)
num = len(data)
for i in range(num):
sentence[i].extend([0] * (sentence_max_len - len(sentence[i])))
bert_token[i].extend([0] * (bert_max_len - len(bert_token[i])))
bert_segment[i].extend([0] * (bert_max_len - len(bert_segment[i])))
sentence = np.asarray(sentence, dtype=np.int32)
aspect = np.asarray(aspect, dtype=np.int32)
label = np.asarray(label, dtype=np.int32)
bert_token = np.asarray(bert_token, dtype=np.int32)
bert_segment = np.asarray(bert_segment, dtype=np.int32)
np.savez(path, sentence=sentence, aspect=aspect, label=label, bert_token=bert_token, bert_segment=bert_segment)
def analyze_term(data):
num = len(data)
sentence_lens = []
aspect_lens = []
log = {'total': num}
for piece in data:
text, term, polarity, _, _ = piece.split('__split__')
sentence_lens.append(len(tokenizer(text)))
aspect_lens.append(len(tokenizer(term)))
if not polarity in log:
log[polarity] = 0
log[polarity] += 1
log['sentence_max_len'] = max(sentence_lens)
log['sentence_avg_len'] = sum(sentence_lens) / len(sentence_lens)
log['aspect_max_len'] = max(aspect_lens)
log['aspect_avg_len'] = sum(aspect_lens) / len(aspect_lens)
return log
def analyze_category(data):
num = len(data)
sentence_lens = []
log = {'total': num}
for piece in data:
text, category, polarity = piece.split('__split__')
sentence_lens.append(len(tokenizer(text)))
if not polarity in log:
log[polarity] = 0
log[polarity] += 1
log['sentence_max_len'] = max(sentence_lens)
log['sentence_avg_len'] = sum(sentence_lens) / len(sentence_lens)
return log
def load_glove(path, vocab_size, word2index):
if not os.path.isfile(path):
raise IOError('Not a file', path)
glove = np.random.uniform(-0.01, 0.01, [vocab_size, 300])
with open(path, 'r', encoding='utf-8') as f:
for line in f:
content = line.split(' ')
if content[0] in word2index:
glove[word2index[content[0]]] = np.array(list(map(float, content[1:])))
glove[PAD_INDEX, :] = 0
return glove
def load_sentiment_matrix(glove_path, sentiment_path):
sentiment_matrix = np.zeros((3, 300), dtype=np.float32)
sd = json.load(open(sentiment_path, 'r', encoding='utf-8'))
sd['positive'] = set(sd['positive'])
sd['negative'] = set(sd['negative'])
sd['neutral'] = set(sd['neutral'])
with open(glove_path, 'r', encoding='utf-8') as f:
for line in f:
content = line.split(' ')
word = content[0]
vec = np.array(list(map(float, content[1:])))
if word in sd['positive']:
sentiment_matrix[0] += vec
elif word in sd['negative']:
sentiment_matrix[1] += vec
elif word in sd['neutral']:
sentiment_matrix[2] += vec
sentiment_matrix -= sentiment_matrix.mean()
sentiment_matrix = sentiment_matrix / sentiment_matrix.std() * np.sqrt(2.0 / (300.0 + 3.0))
return sentiment_matrix | 10,092 | 36.520446 | 132 | py |
MAMS-for-ABSA | MAMS-for-ABSA-master/data_process/dataset.py | import torch
from torch.utils.data import Dataset
import numpy as np
class ABSADataset(Dataset):
def __init__(self, path, input_list):
super(ABSADataset, self).__init__()
data = np.load(path)
self.data = {}
for key, value in data.items():
self.data[key] = torch.tensor(value).long()
self.len = self.data['label'].size(0)
self.input_list = input_list
def __getitem__(self, index):
return_value = []
for input in self.input_list:
return_value.append(self.data[input][index])
return_value.append(self.data['label'][index])
return return_value
def __len__(self):
return self.len | 702 | 28.291667 | 56 | py |
OpenFWI | OpenFWI-main/pytorch_ssim.py | # From https://github.com/Po-Hsun-Su/pytorch-ssim/blob/master/pytorch_ssim/__init__.py
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average = True):
mu1 = F.conv2d(img1, window, padding = window_size//2, groups = channel)
mu2 = F.conv2d(img2, window, padding = window_size//2, groups = channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1*mu2
sigma1_sq = F.conv2d(img1*img1, window, padding = window_size//2, groups = channel) - mu1_sq
sigma2_sq = F.conv2d(img2*img2, window, padding = window_size//2, groups = channel) - mu2_sq
sigma12 = F.conv2d(img1*img2, window, padding = window_size//2, groups = channel) - mu1_mu2
C1 = 0.01**2
C2 = 0.03**2
ssim_map = ((2*mu1_mu2 + C1)*(2*sigma12 + C2))/((mu1_sq + mu2_sq + C1)*(sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size = 11, size_average = True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size = 11, size_average = True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average) | 2,722 | 35.306667 | 104 | py |
OpenFWI | OpenFWI-main/test.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
import torch.nn as nn
from torch.utils.data import SequentialSampler
from torch.utils.data.dataloader import default_collate
import torchvision
from torchvision.transforms import Compose
import numpy as np
import utils
import network
from vis import *
from dataset import FWIDataset
import transforms as T
import pytorch_ssim
def evaluate(model, criterions, dataloader, device, k, ctx,
vis_path, vis_batch, vis_sample, missing, std):
model.eval()
label_list, label_pred_list= [], [] # store denormalized predcition & gt in numpy
label_tensor, label_pred_tensor = [], [] # store normalized prediction & gt in tensor
if missing or std:
data_list, data_noise_list = [], [] # store original data and noisy/muted data
with torch.no_grad():
batch_idx = 0
for data, label in dataloader:
data = data.type(torch.FloatTensor).to(device, non_blocking=True)
label = label.type(torch.FloatTensor).to(device, non_blocking=True)
label_np = T.tonumpy_denormalize(label, ctx['label_min'], ctx['label_max'], exp=False)
label_list.append(label_np)
label_tensor.append(label)
if missing or std:
# Add gaussian noise
data_noise = torch.clip(data + (std ** 0.5) * torch.randn(data.shape).to(device, non_blocking=True), min=-1, max=1)
# Mute some traces
mute_idx = np.random.choice(data.shape[3], size=missing, replace=False)
data_noise[:, :, :, mute_idx] = data[0, 0, 0, 0]
data_np = T.tonumpy_denormalize(data, ctx['data_min'], ctx['data_max'], k=k)
data_noise_np = T.tonumpy_denormalize(data_noise, ctx['data_min'], ctx['data_max'], k=k)
data_list.append(data_np)
data_noise_list.append(data_noise_np)
pred = model(data_noise)
else:
pred = model(data)
label_pred_np = T.tonumpy_denormalize(pred, ctx['label_min'], ctx['label_max'], exp=False)
label_pred_list.append(label_pred_np)
label_pred_tensor.append(pred)
# Visualization
if vis_path and batch_idx < vis_batch:
for i in range(vis_sample):
plot_velocity(label_pred_np[i, 0], label_np[i, 0], f'{vis_path}/V_{batch_idx}_{i}.png') #, vmin=ctx['label_min'], vmax=ctx['label_max'])
if missing or std:
for ch in [2]: # range(data.shape[1]):
plot_seismic(data_np[i, ch], data_noise_np[i, ch], f'{vis_path}/S_{batch_idx}_{i}_{ch}.png',
vmin=ctx['data_min'] * 0.01, vmax=ctx['data_max'] * 0.01)
batch_idx += 1
label, label_pred = np.concatenate(label_list), np.concatenate(label_pred_list)
label_t, pred_t = torch.cat(label_tensor), torch.cat(label_pred_tensor)
l1 = nn.L1Loss()
l2 = nn.MSELoss()
print(f'MAE: {l1(label_t, pred_t)}')
print(f'MSE: {l2(label_t, pred_t)}')
ssim_loss = pytorch_ssim.SSIM(window_size=11)
print(f'SSIM: {ssim_loss(label_t / 2 + 0.5, pred_t / 2 + 0.5)}') # (-1, 1) to (0, 1)
for name, criterion in criterions.items():
print(f' * Velocity {name}: {criterion(label, label_pred)}')
# print(f' | Velocity 2 layers {name}: {criterion(label[:1000], label_pred[:1000])}')
# print(f' | Velocity 3 layers {name}: {criterion(label[1000:2000], label_pred[1000:2000])}')
# print(f' | Velocity 4 layers {name}: {criterion(label[2000:], label_pred[2000:])}')
def main(args):
print(args)
print("torch version: ", torch.__version__)
print("torchvision version: ", torchvision.__version__)
utils.mkdir(args.output_path)
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
print("Loading data")
print("Loading validation data")
log_data_min = T.log_transform(ctx['data_min'], k=args.k)
log_data_max = T.log_transform(ctx['data_max'], k=args.k)
transform_valid_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(log_data_min, log_data_max),
])
transform_valid_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_valid_data,
transform_label=transform_valid_label
)
else:
dataset_valid = torch.load(args.val_anno)
print("Creating data loaders")
valid_sampler = SequentialSampler(dataset_valid)
dataloader_valid = torch.utils.data.DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print("Creating model")
if args.model not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal, norm=args.norm).to(device)
criterions = {
'MAE': lambda x, y: np.mean(np.abs(x - y)),
'MSE': lambda x, y: np.mean((x - y) ** 2)
}
if args.resume:
print(args.resume)
checkpoint = torch.load(args.resume, map_location='cpu')
model.load_state_dict(network.replace_legacy(checkpoint['model']))
print('Loaded model checkpoint at Epoch {} / Step {}.'.format(checkpoint['epoch'], checkpoint['step']))
if args.vis:
# Create folder to store visualization results
vis_folder = f'visualization_{args.vis_suffix}' if args.vis_suffix else 'visualization'
vis_path = os.path.join(args.output_path, vis_folder)
utils.mkdir(vis_path)
else:
vis_path = None
print("Start testing")
start_time = time.time()
evaluate(model, criterions, dataloader_valid, device, args.k, ctx,
vis_path, args.vis_batch, args.vis_sample, args.missing, args.std)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Testing time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='FCN Testing')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flatfault-b', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=int, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='split_files', help='annotation files location')
parser.add_argument('-v', '--val-anno', default='flatfault_b_val_invnet.txt', help='name of val anno')
parser.add_argument('-o', '--output-path', default='Invnet_models', help='path to parent folder to save checkpoints')
parser.add_argument('-n', '--save-name', default='fcn_l1loss_ffb', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='inverse model name')
parser.add_argument('-no', '--norm', default='bn', help='normalization layer type, support bn, in, ln (default: bn)')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Test related
parser.add_argument('-b', '--batch-size', default=50, type=int)
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--vis', help='visualization option', action="store_true")
parser.add_argument('-vsu','--vis-suffix', default=None, type=str, help='visualization suffix')
parser.add_argument('-vb','--vis-batch', help='number of batch to be visualized', default=0, type=int)
parser.add_argument('-vsa', '--vis-sample', help='number of samples in a batch to be visualized', default=0, type=int)
parser.add_argument('--missing', default=0, type=int, help='number of missing traces')
parser.add_argument('--std', default=0, type=float, help='standard deviation of gaussian noise')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 10,383 | 42.814346 | 156 | py |
OpenFWI | OpenFWI-main/gan_train.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
from torch import nn
from torch.utils.data import RandomSampler, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torchvision.transforms import Compose
import utils
import network
from dataset import FWIDataset
from scheduler import WarmupMultiStepLR
import transforms as T
# Need to use parallel in apex, torch ddp can cause bugs when computing gradient penalty
import apex.parallel as parallel
step = 0
def train_one_epoch(model, model_d, criterion_g, criterion_d, optimizer_g, optimizer_d,
lr_schedulers, dataloader, device, epoch, print_freq, writer, n_critic=5):
global step
model.train()
model_d.train()
# Logger setup
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr_g', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('lr_d', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('samples/s', utils.SmoothedValue(window_size=10, fmt='{value:.3f}'))
header = 'Epoch: [{}]'.format(epoch)
itr = 0 # step in this epoch
max_itr = len(dataloader)
for data, label in metric_logger.log_every(dataloader, print_freq, header):
start_time = time.time()
data, label = data.to(device), label.to(device)
# Update discribminator first
optimizer_d.zero_grad()
with torch.no_grad():
pred = model(data)
loss_d, loss_diff, loss_gp = criterion_d(label, pred, model_d)
loss_d.backward()
optimizer_d.step()
metric_logger.update(loss_diff=loss_diff, loss_gp=loss_gp)
# Update generator occasionally
if ((itr + 1) % n_critic == 0) or (itr == max_itr - 1):
optimizer_g.zero_grad()
pred = model(data)
loss_g, loss_g1v, loss_g2v = criterion_g(pred, label, model_d)
loss_g.backward()
optimizer_g.step()
metric_logger.update(loss_g1v=loss_g1v, loss_g2v=loss_g2v)
batch_size = data.shape[0]
metric_logger.update(lr_g=optimizer_g.param_groups[0]['lr'],
lr_d=optimizer_d.param_groups[0]['lr'])
metric_logger.meters['samples/s'].update(batch_size / (time.time() - start_time))
if writer:
writer.add_scalar('loss_diff', loss_diff, step)
writer.add_scalar('loss_gp', loss_gp, step)
if ((itr + 1) % n_critic == 0) or (itr == max_itr - 1):
writer.add_scalar('loss_g1v', loss_g1v, step)
writer.add_scalar('loss_g2v', loss_g2v, step)
step += 1
itr += 1
for lr_scheduler in lr_schedulers:
lr_scheduler.step()
def evaluate(model, criterion, dataloader, device, writer):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
with torch.no_grad():
for data, label in metric_logger.log_every(dataloader, 20, header):
data = data.to(device, non_blocking=True)
label = label.to(device, non_blocking=True)
pred = model(data)
loss, loss_g1v, loss_g2v = criterion(pred, label)
metric_logger.update(loss=loss.item(),
loss_g1v=loss_g1v.item(), loss_g2v=loss_g2v.item())
# Gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Loss {loss.global_avg:.8f}\n'.format(loss=metric_logger.loss))
if writer:
writer.add_scalar('loss', metric_logger.loss.global_avg, step)
writer.add_scalar('loss_g1v', metric_logger.loss_g1v.global_avg, step)
writer.add_scalar('loss_g2v', metric_logger.loss_g2v.global_avg, step)
return metric_logger.loss.global_avg
def main(args):
global step
print(args)
print('torch version: ', torch.__version__)
print('torchvision version: ', torchvision.__version__)
utils.mkdir(args.output_path) # create folder to store checkpoints
utils.init_distributed_mode(args) # distributed mode initialization
# Set up tensorboard summary writer
train_writer, val_writer = None, None
if args.tensorboard:
utils.mkdir(args.log_path) # create folder to store tensorboard logs
if not args.distributed or (args.rank == 0) and (args.local_rank == 0):
train_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'train'))
val_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'val'))
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
# Create dataset and dataloader
print('Loading data')
print('Loading training data')
log_data_min = T.log_transform(ctx['data_min'], k=args.k)
log_data_max = T.log_transform(ctx['data_max'], k=args.k)
transform_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(log_data_min, log_data_max)
])
transform_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.train_anno[-3:] == 'txt':
dataset_train = FWIDataset(
args.train_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_train = torch.load(args.train_anno)
print('Loading validation data')
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_valid = torch.load(args.val_anno)
print('Creating data loaders')
if args.distributed:
train_sampler = DistributedSampler(dataset_train, shuffle=True)
valid_sampler = DistributedSampler(dataset_valid, shuffle=True)
else:
train_sampler = RandomSampler(dataset_train)
valid_sampler = RandomSampler(dataset_valid)
dataloader_train = DataLoader(
dataset_train, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers,
pin_memory=True, drop_last=True, collate_fn=default_collate)
dataloader_valid = DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print('Creating model')
if args.model not in network.model_dict or args.model_d not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal).to(device)
model_d = network.model_dict[args.model_d]().to(device)
if args.distributed and args.sync_bn:
model = parallel.convert_syncbn_model(model)
model_d = parallel.convert_syncbn_model(model_d)
# Define loss function
l1loss = nn.L1Loss()
l2loss = nn.MSELoss()
def criterion_g(pred, gt, model_d=None):
loss_g1v = l1loss(pred, gt)
loss_g2v = l2loss(pred, gt)
loss = args.lambda_g1v * loss_g1v + args.lambda_g2v * loss_g2v
if model_d is not None:
loss_adv = -torch.mean(model_d(pred))
loss += args.lambda_adv * loss_adv
return loss, loss_g1v, loss_g2v
criterion_d = utils.Wasserstein_GP(device, args.lambda_gp)
# Scale lr according to effective batch size
lr_g = args.lr_g * args.world_size
lr_d = args.lr_d * args.world_size
optimizer_g = torch.optim.AdamW(model.parameters(), lr=lr_g, betas=(0, 0.9), weight_decay=args.weight_decay)
optimizer_d = torch.optim.AdamW(model_d.parameters(), lr=lr_d, betas=(0, 0.9), weight_decay=args.weight_decay)
# Convert scheduler to be per iteration instead of per epoch
warmup_iters = args.lr_warmup_epochs * len(dataloader_train)
lr_milestones = [len(dataloader_train) * m for m in args.lr_milestones]
lr_schedulers = [WarmupMultiStepLR(
optimizer, milestones=lr_milestones, gamma=args.lr_gamma,
warmup_iters=warmup_iters, warmup_factor=1e-5) for optimizer in [optimizer_g, optimizer_d]]
model_without_ddp = model
model_d_without_ddp = model_d
if args.distributed:
model = parallel.DistributedDataParallel(model)
model_d = parallel.DistributedDataParallel(model_d)
model_without_ddp = model.module
model_d_without_ddp = model_d.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model']))
model_d_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model_d']))
optimizer_g.load_state_dict(checkpoint['optimizer_g'])
optimizer_d.load_state_dict(checkpoint['optimizer_d'])
args.start_epoch = checkpoint['epoch'] + 1
step = checkpoint['step']
for i in range(len(lr_schedulers)):
lr_schedulers[i].load_state_dict(checkpoint['lr_schedulers'][i])
for lr_scheduler in lr_schedulers:
lr_scheduler.milestones = lr_milestones
print('Start training')
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, model_d, criterion_g, criterion_d, optimizer_g, optimizer_d,
lr_schedulers, dataloader_train, device, epoch,
args.print_freq, train_writer, args.n_critic)
evaluate(model, criterion_g, dataloader_valid, device, val_writer)
checkpoint = {
'model': model_without_ddp.state_dict(),
'model_d': model_d_without_ddp.state_dict(),
'optimizer_g': optimizer_g.state_dict(),
'optimizer_d': optimizer_d.state_dict(),
'lr_schedulers': [scheduler.state_dict() for scheduler in lr_schedulers],
'epoch': epoch,
'step': step,
'args': args}
# Save checkpoint per epoch
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'checkpoint.pth'))
# Save checkpoint every epoch block
if args.output_path and (epoch + 1) % args.epoch_block == 0:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'model_{}.pth'.format(epoch + 1)))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='GAN Training')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flat', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=str, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='/vast/home/aicyd/Desktop/OpenFWI/src/', help='annotation files location')
parser.add_argument('-t', '--train-anno', default='train_flatvel.json', help='name of train anno')
parser.add_argument('-v', '--val-anno', default='val_flatvel.json', help='name of val anno')
parser.add_argument('-o', '--output-path', default='models', help='path to parent folder to save checkpoints')
parser.add_argument('-l', '--log-path', default='models', help='path to parent folder to save logs')
parser.add_argument('-n', '--save-name', default='gan', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='generator name')
parser.add_argument('-md', '--model-d', default='Discriminator', help='discriminator name')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Training related
parser.add_argument('-nc', '--n_critic', default=5, type=int, help='generator & discriminator update ratio')
parser.add_argument('-b', '--batch-size', default=64, type=int)
parser.add_argument('--lr_g', default=0.0001, type=float, help='initial learning rate of generator')
parser.add_argument('--lr_d', default=0.0001, type=float, help='initial learning rate of discriminator')
parser.add_argument('-lm', '--lr-milestones', nargs='+', default=[], type=int, help='decrease lr on milestones')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', default=1e-4 , type=float, help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--lr-warmup-epochs', default=0, type=int, help='number of warmup epochs')
parser.add_argument('-eb', '--epoch_block', type=int, default=20, help='epochs in a saved block')
parser.add_argument('-nb', '--num_block', type=int, default=25, help='number of saved block')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('--print-freq', default=20, type=int, help='print frequency')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, help='start epoch')
# Loss related
parser.add_argument('-g1v', '--lambda_g1v', type=float, default=100.0)
parser.add_argument('-g2v', '--lambda_g2v', type=float, default=100.0)
parser.add_argument('-adv', '--lambda_adv', type=float, default=1.0)
parser.add_argument('-gp', '--lambda_gp', type=float, default=10.0)
# Distributed training related
parser.add_argument('--sync-bn', action='store_true', help='Use sync batch norm')
parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# Tensorboard related
parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard for logging.')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.log_path = os.path.join(args.log_path, args.save_name, args.suffix or '')
args.train_anno = os.path.join(args.anno_path, args.train_anno)
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.epochs = args.epoch_block * args.num_block
if args.resume:
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 16,662 | 43.553476 | 128 | py |
OpenFWI | OpenFWI-main/network.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import ceil
from collections import OrderedDict
NORM_LAYERS = { 'bn': nn.BatchNorm2d, 'in': nn.InstanceNorm2d, 'ln': nn.LayerNorm }
# Replace the key names in the checkpoint in which legacy network building blocks are used
def replace_legacy(old_dict):
li = []
for k, v in old_dict.items():
k = (k.replace('Conv2DwithBN', 'layers')
.replace('Conv2DwithBN_Tanh', 'layers')
.replace('Deconv2DwithBN', 'layers')
.replace('ResizeConv2DwithBN', 'layers'))
li.append((k, v))
return OrderedDict(li)
class Conv2DwithBN(nn.Module):
def __init__(self, in_fea, out_fea,
kernel_size=3, stride=1, padding=1,
bn=True, relu_slop=0.2, dropout=None):
super(Conv2DwithBN,self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if bn:
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.LeakyReLU(relu_slop, inplace=True))
if dropout:
layers.append(nn.Dropout2d(0.8))
self.Conv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.Conv2DwithBN(x)
class ResizeConv2DwithBN(nn.Module):
def __init__(self, in_fea, out_fea, scale_factor=2, mode='nearest'):
super(ResizeConv2DwithBN, self).__init__()
layers = [nn.Upsample(scale_factor=scale_factor, mode=mode)]
layers.append(nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=3, stride=1, padding=1))
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.ResizeConv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.ResizeConv2DwithBN(x)
class Conv2DwithBN_Tanh(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1):
super(Conv2DwithBN_Tanh, self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
layers.append(nn.BatchNorm2d(num_features=out_fea))
layers.append(nn.Tanh())
self.Conv2DwithBN = nn.Sequential(*layers)
def forward(self, x):
return self.Conv2DwithBN(x)
class ConvBlock(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1, norm='bn', relu_slop=0.2, dropout=None):
super(ConvBlock,self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(relu_slop, inplace=True))
if dropout:
layers.append(nn.Dropout2d(0.8))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ConvBlock_Tanh(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=3, stride=1, padding=1, norm='bn'):
super(ConvBlock_Tanh, self).__init__()
layers = [nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.Tanh())
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class DeconvBlock(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=2, stride=2, padding=0, output_padding=0, norm='bn'):
super(DeconvBlock, self).__init__()
layers = [nn.ConvTranspose2d(in_channels=in_fea, out_channels=out_fea, kernel_size=kernel_size, stride=stride, padding=padding, output_padding=output_padding)]
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class ResizeBlock(nn.Module):
def __init__(self, in_fea, out_fea, scale_factor=2, mode='nearest', norm='bn'):
super(ResizeBlock, self).__init__()
layers = [nn.Upsample(scale_factor=scale_factor, mode=mode)]
layers.append(nn.Conv2d(in_channels=in_fea, out_channels=out_fea, kernel_size=3, stride=1, padding=1))
if norm in NORM_LAYERS:
layers.append(NORM_LAYERS[norm](out_fea))
layers.append(nn.LeakyReLU(0.2, inplace=True))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
# FlatFault/CurveFault
# 1000, 70 -> 70, 70
class InversionNet(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, dim5=512, sample_spatial=1.0, **kwargs):
super(InversionNet, self).__init__()
self.convblock1 = ConvBlock(5, dim1, kernel_size=(7, 1), stride=(2, 1), padding=(3, 0))
self.convblock2_1 = ConvBlock(dim1, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock2_2 = ConvBlock(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock3_1 = ConvBlock(dim2, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock3_2 = ConvBlock(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock4_1 = ConvBlock(dim2, dim3, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock4_2 = ConvBlock(dim3, dim3, kernel_size=(3, 1), padding=(1, 0))
self.convblock5_1 = ConvBlock(dim3, dim3, stride=2)
self.convblock5_2 = ConvBlock(dim3, dim3)
self.convblock6_1 = ConvBlock(dim3, dim4, stride=2)
self.convblock6_2 = ConvBlock(dim4, dim4)
self.convblock7_1 = ConvBlock(dim4, dim4, stride=2)
self.convblock7_2 = ConvBlock(dim4, dim4)
self.convblock8 = ConvBlock(dim4, dim5, kernel_size=(8, ceil(70 * sample_spatial / 8)), padding=0)
self.deconv1_1 = DeconvBlock(dim5, dim5, kernel_size=5)
self.deconv1_2 = ConvBlock(dim5, dim5)
self.deconv2_1 = DeconvBlock(dim5, dim4, kernel_size=4, stride=2, padding=1)
self.deconv2_2 = ConvBlock(dim4, dim4)
self.deconv3_1 = DeconvBlock(dim4, dim3, kernel_size=4, stride=2, padding=1)
self.deconv3_2 = ConvBlock(dim3, dim3)
self.deconv4_1 = DeconvBlock(dim3, dim2, kernel_size=4, stride=2, padding=1)
self.deconv4_2 = ConvBlock(dim2, dim2)
self.deconv5_1 = DeconvBlock(dim2, dim1, kernel_size=4, stride=2, padding=1)
self.deconv5_2 = ConvBlock(dim1, dim1)
self.deconv6 = ConvBlock_Tanh(dim1, 1)
def forward(self,x):
# Encoder Part
x = self.convblock1(x) # (None, 32, 500, 70)
x = self.convblock2_1(x) # (None, 64, 250, 70)
x = self.convblock2_2(x) # (None, 64, 250, 70)
x = self.convblock3_1(x) # (None, 64, 125, 70)
x = self.convblock3_2(x) # (None, 64, 125, 70)
x = self.convblock4_1(x) # (None, 128, 63, 70)
x = self.convblock4_2(x) # (None, 128, 63, 70)
x = self.convblock5_1(x) # (None, 128, 32, 35)
x = self.convblock5_2(x) # (None, 128, 32, 35)
x = self.convblock6_1(x) # (None, 256, 16, 18)
x = self.convblock6_2(x) # (None, 256, 16, 18)
x = self.convblock7_1(x) # (None, 256, 8, 9)
x = self.convblock7_2(x) # (None, 256, 8, 9)
x = self.convblock8(x) # (None, 512, 1, 1)
# Decoder Part
x = self.deconv1_1(x) # (None, 512, 5, 5)
x = self.deconv1_2(x) # (None, 512, 5, 5)
x = self.deconv2_1(x) # (None, 256, 10, 10)
x = self.deconv2_2(x) # (None, 256, 10, 10)
x = self.deconv3_1(x) # (None, 128, 20, 20)
x = self.deconv3_2(x) # (None, 128, 20, 20)
x = self.deconv4_1(x) # (None, 64, 40, 40)
x = self.deconv4_2(x) # (None, 64, 40, 40)
x = self.deconv5_1(x) # (None, 32, 80, 80)
x = self.deconv5_2(x) # (None, 32, 80, 80)
x = F.pad(x, [-5, -5, -5, -5], mode="constant", value=0) # (None, 32, 70, 70) 125, 100
x = self.deconv6(x) # (None, 1, 70, 70)
return x
class FCN4_Deep_Resize_2(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, dim5=512, ratio=1.0, upsample_mode='nearest'):
super(FCN4_Deep_Resize_2, self).__init__()
self.convblock1 = Conv2DwithBN(5, dim1, kernel_size=(7, 1), stride=(2, 1), padding=(3, 0))
self.convblock2_1 = Conv2DwithBN(dim1, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock2_2 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock3_1 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock3_2 = Conv2DwithBN(dim2, dim2, kernel_size=(3, 1), padding=(1, 0))
self.convblock4_1 = Conv2DwithBN(dim2, dim3, kernel_size=(3, 1), stride=(2, 1), padding=(1, 0))
self.convblock4_2 = Conv2DwithBN(dim3, dim3, kernel_size=(3, 1), padding=(1, 0))
self.convblock5_1 = Conv2DwithBN(dim3, dim3, stride=2)
self.convblock5_2 = Conv2DwithBN(dim3, dim3)
self.convblock6_1 = Conv2DwithBN(dim3, dim4, stride=2)
self.convblock6_2 = Conv2DwithBN(dim4, dim4)
self.convblock7_1 = Conv2DwithBN(dim4, dim4, stride=2)
self.convblock7_2 = Conv2DwithBN(dim4, dim4)
self.convblock8 = Conv2DwithBN(dim4, dim5, kernel_size=(8, ceil(70 * ratio / 8)), padding=0)
self.deconv1_1 = ResizeConv2DwithBN(dim5, dim5, scale_factor=5, mode=upsample_mode)
self.deconv1_2 = Conv2DwithBN(dim5, dim5)
self.deconv2_1 = ResizeConv2DwithBN(dim5, dim4, scale_factor=2, mode=upsample_mode)
self.deconv2_2 = Conv2DwithBN(dim4, dim4)
self.deconv3_1 = ResizeConv2DwithBN(dim4, dim3, scale_factor=2, mode=upsample_mode)
self.deconv3_2 = Conv2DwithBN(dim3, dim3)
self.deconv4_1 = ResizeConv2DwithBN(dim3, dim2, scale_factor=2, mode=upsample_mode)
self.deconv4_2 = Conv2DwithBN(dim2, dim2)
self.deconv5_1 = ResizeConv2DwithBN(dim2, dim1, scale_factor=2, mode=upsample_mode)
self.deconv5_2 = Conv2DwithBN(dim1, dim1)
self.deconv6 = Conv2DwithBN_Tanh(dim1, 1)
def forward(self,x):
# Encoder Part
x = self.convblock1(x) # (None, 32, 500, 70)
x = self.convblock2_1(x) # (None, 64, 250, 70)
x = self.convblock2_2(x) # (None, 64, 250, 70)
x = self.convblock3_1(x) # (None, 64, 125, 70)
x = self.convblock3_2(x) # (None, 64, 125, 70)
x = self.convblock4_1(x) # (None, 128, 63, 70)
x = self.convblock4_2(x) # (None, 128, 63, 70)
x = self.convblock5_1(x) # (None, 128, 32, 35)
x = self.convblock5_2(x) # (None, 128, 32, 35)
x = self.convblock6_1(x) # (None, 256, 16, 18)
x = self.convblock6_2(x) # (None, 256, 16, 18)
x = self.convblock7_1(x) # (None, 256, 8, 9)
x = self.convblock7_2(x) # (None, 256, 8, 9)
x = self.convblock8(x) # (None, 512, 1, 1)
# Decoder Part
x = self.deconv1_1(x) # (None, 512, 5, 5)
x = self.deconv1_2(x) # (None, 512, 5, 5)
x = self.deconv2_1(x) # (None, 256, 10, 10)
x = self.deconv2_2(x) # (None, 256, 10, 10)
x = self.deconv3_1(x) # (None, 128, 20, 20)
x = self.deconv3_2(x) # (None, 128, 20, 20)
x = self.deconv4_1(x) # (None, 64, 40, 40)
x = self.deconv4_2(x) # (None, 64, 40, 40)
x = self.deconv5_1(x) # (None, 32, 80, 80)
x = self.deconv5_2(x) # (None, 32, 80, 80)
x = F.pad(x, [-5, -5, -5, -5], mode="constant", value=0) # (None, 32, 70, 70)
x = self.deconv6(x) # (None, 1, 70, 70)
return x
class Discriminator(nn.Module):
def __init__(self, dim1=32, dim2=64, dim3=128, dim4=256, **kwargs):
super(Discriminator, self).__init__()
self.convblock1_1 = ConvBlock(1, dim1, stride=2)
self.convblock1_2 = ConvBlock(dim1, dim1)
self.convblock2_1 = ConvBlock(dim1, dim2, stride=2)
self.convblock2_2 = ConvBlock(dim2, dim2)
self.convblock3_1 = ConvBlock(dim2, dim3, stride=2)
self.convblock3_2 = ConvBlock(dim3, dim3)
self.convblock4_1 = ConvBlock(dim3, dim4, stride=2)
self.convblock4_2 = ConvBlock(dim4, dim4)
self.convblock5 = ConvBlock(dim4, 1, kernel_size=5, padding=0)
def forward(self, x):
x = self.convblock1_1(x)
x = self.convblock1_2(x)
x = self.convblock2_1(x)
x = self.convblock2_2(x)
x = self.convblock3_1(x)
x = self.convblock3_2(x)
x = self.convblock4_1(x)
x = self.convblock4_2(x)
x = self.convblock5(x)
x = x.view(x.shape[0], -1)
return x
class Conv_HPGNN(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size=None, stride=None, padding=None, **kwargs):
super(Conv_HPGNN, self).__init__()
layers = [
ConvBlock(in_fea, out_fea, relu_slop=0.1, dropout=0.8),
ConvBlock(out_fea, out_fea, relu_slop=0.1, dropout=0.8),
]
if kernel_size is not None:
layers.append(nn.MaxPool2d(kernel_size=kernel_size, stride=stride, padding=padding))
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
class Deconv_HPGNN(nn.Module):
def __init__(self, in_fea, out_fea, kernel_size, **kwargs):
super(Deconv_HPGNN, self).__init__()
layers = [
nn.ConvTranspose2d(in_fea, in_fea, kernel_size=kernel_size, stride=2, padding=0),
ConvBlock(in_fea, out_fea, relu_slop=0.1, dropout=0.8),
ConvBlock(out_fea, out_fea, relu_slop=0.1, dropout=0.8)
]
self.layers = nn.Sequential(*layers)
def forward(self, x):
return self.layers(x)
model_dict = {
'InversionNet': InversionNet,
'Discriminator': Discriminator,
'UPFWI': FCN4_Deep_Resize_2
}
| 14,861 | 45.15528 | 167 | py |
OpenFWI | OpenFWI-main/vis.py | import os
import torch
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
# Load colormap for velocity map visualization
rainbow_cmap = ListedColormap(np.load('rainbow256.npy'))
def plot_velocity(output, target, path, vmin=None, vmax=None):
fig, ax = plt.subplots(1, 2, figsize=(11, 5))
if vmin is None or vmax is None:
vmax, vmin = np.max(target), np.min(target)
im = ax[0].matshow(output, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
ax[0].set_title('Prediction', y=1.08)
ax[1].matshow(target, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
ax[1].set_title('Ground Truth', y=1.08)
for axis in ax:
# axis.set_xticks(range(0, 70, 10))
# axis.set_xticklabels(range(0, 1050, 150))
# axis.set_yticks(range(0, 70, 10))
# axis.set_yticklabels(range(0, 1050, 150))
axis.set_xticks(range(0, 70, 10))
axis.set_xticklabels(range(0, 700, 100))
axis.set_yticks(range(0, 70, 10))
axis.set_yticklabels(range(0, 700, 100))
axis.set_ylabel('Depth (m)', fontsize=12)
axis.set_xlabel('Offset (m)', fontsize=12)
fig.colorbar(im, ax=ax, shrink=0.75, label='Velocity(m/s)')
plt.savefig(path)
plt.close('all')
def plot_single_velocity(label, path):
plt.rcParams.update({'font.size': 16})
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
vmax, vmin = np.max(label), np.min(label)
im = ax.matshow(label, cmap=rainbow_cmap, vmin=vmin, vmax=vmax)
# im = ax.matshow(label, cmap="gist_rainbow", vmin=vmin, vmax=vmax)
# nx = label.shape[0]
# ax.set_aspect(aspect=1)
# ax.set_xticks(range(0, nx, int(150//(1050/nx)))[:7])
# ax.set_xticklabels(range(0, 1050, 150))
# ax.set_yticks(range(0, nx, int(150//(1050/nx)))[:7])
# ax.set_yticklabels(range(0, 1050, 150))
# ax.set_title('Offset (m)', y=1.08)
# ax.set_ylabel('Depth (m)', fontsize=18)
fig.colorbar(im, ax=ax, shrink=1.0, label='Velocity(m/s)')
plt.savefig(path)
plt.close('all')
# def plot_seismic(output, target, path, vmin=-1e-5, vmax=1e-5):
# fig, ax = plt.subplots(1, 3, figsize=(15, 6))
# im = ax[0].matshow(output, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[0].set_title('Prediction')
# ax[1].matshow(target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[1].set_title('Ground Truth')
# ax[2].matshow(output - target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
# ax[2].set_title('Difference')
# fig.colorbar(im, ax=ax, format='%.1e')
# plt.savefig(path)
# plt.close('all')
def plot_seismic(output, target, path, vmin=-1e-5, vmax=1e-5):
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
# fig, ax = plt.subplots(1, 2, figsize=(11, 5))
aspect = output.shape[1]/output.shape[0]
im = ax[0].matshow(target, aspect=aspect, cmap='gray', vmin=vmin, vmax=vmax)
ax[0].set_title('Ground Truth')
ax[1].matshow(output, aspect=aspect, cmap='gray', vmin=vmin, vmax=vmax)
ax[1].set_title('Prediction')
ax[2].matshow(output - target, aspect='auto', cmap='gray', vmin=vmin, vmax=vmax)
ax[2].set_title('Difference')
# for axis in ax:
# axis.set_xticks(range(0, 70, 10))
# axis.set_xticklabels(range(0, 1050, 150))
# axis.set_title('Offset (m)', y=1.1)
# axis.set_ylabel('Time (ms)', fontsize=12)
# fig.colorbar(im, ax=ax, shrink=1.0, pad=0.01, label='Amplitude')
fig.colorbar(im, ax=ax, shrink=0.75, label='Amplitude')
plt.savefig(path)
plt.close('all')
def plot_single_seismic(data, path):
nz, nx = data.shape
plt.rcParams.update({'font.size': 18})
vmin, vmax = np.min(data), np.max(data)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
im = ax.matshow(data, aspect='auto', cmap='gray', vmin=vmin * 0.01, vmax=vmax * 0.01)
ax.set_aspect(aspect=nx/nz)
ax.set_xticks(range(0, nx, int(300//(1050/nx)))[:5])
ax.set_xticklabels(range(0, 1050, 300))
ax.set_title('Offset (m)', y=1.08)
ax.set_yticks(range(0, nz, int(200//(1000/nz)))[:5])
ax.set_yticklabels(range(0, 1000, 200))
ax.set_ylabel('Time (ms)', fontsize=18)
fig.colorbar(im, ax=ax, shrink=1.0, pad=0.01, label='Amplitude')
plt.savefig(path)
plt.close('all')
| 4,324 | 38.318182 | 89 | py |
OpenFWI | OpenFWI-main/utils.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
from collections import defaultdict, deque
import datetime
import time
import torch
import torch.distributed as dist
import torch.autograd as autograd
from torch.autograd import Variable
import torch.nn as nn
import errno
import os
import itertools
from torchvision.models import vgg16
import numpy as np
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
if isinstance(iterable, list):
length = max(len(x) for x in iterable)
iterable = [x if len(x) == length else itertools.cycle(x) for x in iterable]
iterable = zip(*iterable)
else:
length = len(iterable)
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(length))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj # <-- yield the batch in for loop
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (length - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, length, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, length, eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {}'.format(header, total_time_str))
# Legacy code
class ContentLoss(nn.Module):
def __init__(self, args):
super(ContentLoss, self).__init__()
names = ['l1', 'l2']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, model, input, target):
pred = model(input)
loss_l1 = self.l1loss(target, pred)
loss_l2 = self.l2loss(target, pred)
loss = loss_l1 * self.lambda_l1 + loss_l2 * self.lambda_l2
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Legacy code
class IdenticalLoss(nn.Module):
def __init__(self, args):
super(IdenticalLoss, self).__init__()
names = ['id1s', 'id2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, model_s2v, model_v2s, input):
mid = model_s2v(input)
pred = model_v2s(mid)
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_id1s, loss_id2s = cal_loss(input, pred)
loss = loss_id1s * self.lambda_id1s + loss_id2s * self.lambda_id2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Implemented according to H-PGNN, not useful
class NMSELoss(nn.Module):
def __init__(self):
super(NMSELoss, self).__init__()
def forward(self, pred, gt):
return torch.mean(((pred - gt) / (torch.amax(gt, (-2, -1), keepdim=True) + 1e-5)) ** 2)
class CycleLoss(nn.Module):
def __init__(self, args):
super(CycleLoss, self).__init__()
names = ['g1v', 'g2v', 'g1s', 'g2s', 'c1v', 'c2v', 'c1s', 'c2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, data, label, pred_s=None, pred_v=None, recon_s=None, recon_v=None):
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_g1v, loss_g2v, loss_g1s, loss_g2s = [0] * 4
if pred_v is not None:
loss_g1v, loss_g2v = cal_loss(pred_v, label)
if pred_s is not None:
loss_g1s, loss_g2s = cal_loss(pred_s, data)
loss_c1v, loss_c2v, loss_c1s , loss_c2s = [0] * 4
if recon_v is not None:
loss_c1v, loss_c2v = cal_loss(recon_v, label)
if recon_s is not None:
loss_c1s, loss_c2s = cal_loss(recon_s, data)
loss = loss_g1v * self.lambda_g1v + loss_g2v * self.lambda_g2v + \
loss_g1s * self.lambda_g1s + loss_g2s * self.lambda_g2s + \
loss_c1v * self.lambda_c1v + loss_c2v * self.lambda_c2v + \
loss_c1s * self.lambda_c1s + loss_c2s * self.lambda_c2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
# Legacy code
class _CycleLoss(nn.Module):
def __init__(self, args):
super(_CycleLoss, self).__init__()
names = ['g1v', 'g2v', 'g1s', 'g2s', 'c1v', 'c2v', 'c1s', 'c2s']
self.loss_names = ['loss_' + n for n in names]
for key in ['lambda_' + n for n in names]:
setattr(self, key, getattr(args, key))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, data, label, pred_s=None, pred_v=None, recon_s=None, recon_v=None):
cal_loss = lambda x, y: (self.l1loss(x, y), self.l2loss(x, y))
loss_g1v, loss_g2v, loss_g1s, loss_g2s = [0] * 4
if pred_v is not None and (self.lambda_g1v != 0 or self.lambda_g2v != 0):
loss_g1v, loss_g2v = cal_loss(pred_v, label)
if pred_s is not None and (self.lambda_g1s != 0 or self.lambda_g2s != 0):
loss_g1s, loss_g2s = cal_loss(pred_s, data)
loss_c1v, loss_c2v, loss_c1s , loss_c2s = [0] * 4
if recon_v is not None and (self.lambda_c1v != 0 or self.lambda_c2v != 0):
loss_c1v, loss_c2v = cal_loss(recon_v, label)
if recon_s is not None and (self.lambda_c1s != 0 or self.lambda_c2s != 0):
loss_c1s, loss_c2s = cal_loss(recon_s, data)
loss = loss_g1v * self.lambda_g1v + loss_g2v * self.lambda_g2v + \
loss_g1s * self.lambda_g1s + loss_g2s * self.lambda_g2s + \
loss_c1v * self.lambda_c1v + loss_c2v * self.lambda_c2v + \
loss_c1s * self.lambda_c1s + loss_c2s * self.lambda_c2s
scope = locals()
return loss, { k: eval(k, scope) for k in self.loss_names }
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.local_rank = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ and args.world_size > 1:
args.rank = int(os.environ['SLURM_PROCID'])
args.local_rank = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.local_rank)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
setup_for_distributed(args.rank == 0)
class Wasserstein_GP(nn.Module):
def __init__(self, device, lambda_gp):
super(Wasserstein_GP, self).__init__()
self.device = device
self.lambda_gp = lambda_gp
def forward(self, real, fake, model):
gradient_penalty = self.compute_gradient_penalty(model, real, fake)
loss_real = torch.mean(model(real))
loss_fake = torch.mean(model(fake))
loss = -loss_real + loss_fake + gradient_penalty * self.lambda_gp
return loss, loss_real-loss_fake, gradient_penalty
def compute_gradient_penalty(self, model, real_samples, fake_samples):
alpha = torch.rand(real_samples.size(0), 1, 1, 1, device=self.device)
interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
d_interpolates = model(interpolates)
gradients = autograd.grad(
outputs=d_interpolates,
inputs=interpolates,
grad_outputs=torch.ones(real_samples.size(0), d_interpolates.size(1)).to(self.device),
create_graph=True,
retain_graph=True,
only_inputs=True,
)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
return gradient_penalty
# Modified from https://gist.github.com/alper111/8233cdb0414b4cb5853f2f730ab95a49
class VGGPerceptualLoss(nn.Module):
def __init__(self, resize=True):
super(VGGPerceptualLoss, self).__init__()
blocks = []
blocks.append(vgg16(pretrained=True).features[:4].eval()) # relu1_2
blocks.append(vgg16(pretrained=True).features[4:9].eval()) # relu2_2
blocks.append(vgg16(pretrained=True).features[9:16].eval()) # relu3_3
blocks.append(vgg16(pretrained=True).features[16:23].eval()) # relu4_3
for bl in blocks:
for p in bl:
p.requires_grad = False
self.blocks = nn.ModuleList(blocks)
self.transform = nn.functional.interpolate
self.resize = resize
self.register_buffer("mean", torch.tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer("std", torch.tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
self.l1loss = nn.L1Loss()
self.l2loss = nn.MSELoss()
def forward(self, input, target, rescale=True, feature_layers=[1]):
input = input.view(-1, 1, input.shape[-2], input.shape[-1]).repeat(1, 3, 1, 1)
target = target.view(-1, 1, target.shape[-2], target.shape[-1]).repeat(1, 3, 1, 1)
if rescale: # from [-1, 1] to [0, 1]
input = input / 2 + 0.5
target = target / 2 + 0.5
input = (input-self.mean) / self.std
target = (target-self.mean) / self.std
if self.resize:
input = self.transform(input, mode='bilinear', size=(224, 224), align_corners=False)
target = self.transform(target, mode='bilinear', size=(224, 224), align_corners=False)
loss_l1, loss_l2 = 0.0, 0.0
x = input
y = target
for i, block in enumerate(self.blocks):
x = block(x)
y = block(y)
if i in feature_layers:
loss_l1 += self.l1loss(x, y)
loss_l2 += self.l2loss(x, y)
return loss_l1, loss_l2
def cal_psnr(gt, data, max_value):
mse = np.mean((gt - data) ** 2)
if (mse == 0):
return 100
return 20 * np.log10(max_value / np.sqrt(mse))
| 17,006 | 34.804211 | 105 | py |
OpenFWI | OpenFWI-main/dataset.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import numpy as np
from torch.utils.data import Dataset
from torchvision.transforms import Compose
import transforms as T
class FWIDataset(Dataset):
''' FWI dataset
For convenience, in this class, a batch refers to a npy file
instead of the batch used during training.
Args:
anno: path to annotation file
preload: whether to load the whole dataset into memory
sample_ratio: downsample ratio for seismic data
file_size: # of samples in each npy file
transform_data|label: transformation applied to data or label
'''
def __init__(self, anno, preload=True, sample_ratio=1, file_size=500,
transform_data=None, transform_label=None):
if not os.path.exists(anno):
print(f'Annotation file {anno} does not exists')
self.preload = preload
self.sample_ratio = sample_ratio
self.file_size = file_size
self.transform_data = transform_data
self.transform_label = transform_label
with open(anno, 'r') as f:
self.batches = f.readlines()
if preload:
self.data_list, self.label_list = [], []
for batch in self.batches:
data, label = self.load_every(batch)
self.data_list.append(data)
if label is not None:
self.label_list.append(label)
# Load from one line
def load_every(self, batch):
batch = batch.split('\t')
data_path = batch[0] if len(batch) > 1 else batch[0][:-1]
data = np.load(data_path)[:, :, ::self.sample_ratio, :]
data = data.astype('float32')
if len(batch) > 1:
label_path = batch[1][:-1]
label = np.load(label_path)
label = label.astype('float32')
else:
label = None
return data, label
def __getitem__(self, idx):
batch_idx, sample_idx = idx // self.file_size, idx % self.file_size
if self.preload:
data = self.data_list[batch_idx][sample_idx]
label = self.label_list[batch_idx][sample_idx] if len(self.label_list) != 0 else None
else:
data, label = self.load_every(self.batches[batch_idx])
data = data[sample_idx]
label = label[sample_idx] if label is not None else None
if self.transform_data:
data = self.transform_data(data)
if self.transform_label and label is not None:
label = self.transform_label(label)
return data, label if label is not None else np.array([])
def __len__(self):
return len(self.batches) * self.file_size
if __name__ == '__main__':
transform_data = Compose([
T.LogTransform(k=1),
T.MinMaxNormalize(T.log_transform(-61, k=1), T.log_transform(120, k=1))
])
transform_label = Compose([
T.MinMaxNormalize(2000, 6000)
])
dataset = FWIDataset(f'relevant_files/temp.txt', transform_data=transform_data, transform_label=transform_label, file_size=1)
data, label = dataset[0]
print(data.shape)
print(label is None)
| 3,920 | 37.441176 | 129 | py |
OpenFWI | OpenFWI-main/scheduler.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
from bisect import bisect_right
# Scheduler adopted from the original repo
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=5,
warmup_method="linear",
last_epoch=-1,
):
if not milestones == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr *
warmup_factor *
self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
| 2,380 | 35.075758 | 105 | py |
OpenFWI | OpenFWI-main/train.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import os
import sys
import time
import datetime
import json
import torch
from torch import nn
from torch.utils.data import RandomSampler, DataLoader
from torch.utils.data.dataloader import default_collate
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel
from torch.utils.tensorboard import SummaryWriter
import torchvision
from torchvision.transforms import Compose
import utils
import network
from dataset import FWIDataset
from scheduler import WarmupMultiStepLR
import transforms as T
step = 0
def train_one_epoch(model, criterion, optimizer, lr_scheduler,
dataloader, device, epoch, print_freq, writer):
global step
model.train()
# Logger setup
metric_logger = utils.MetricLogger(delimiter=' ')
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}'))
metric_logger.add_meter('samples/s', utils.SmoothedValue(window_size=10, fmt='{value:.3f}'))
header = 'Epoch: [{}]'.format(epoch)
for data, label in metric_logger.log_every(dataloader, print_freq, header):
start_time = time.time()
optimizer.zero_grad()
data, label = data.to(device), label.to(device)
output = model(data)
loss, loss_g1v, loss_g2v = criterion(output, label)
loss.backward()
optimizer.step()
loss_val = loss.item()
loss_g1v_val = loss_g1v.item()
loss_g2v_val = loss_g2v.item()
batch_size = data.shape[0]
metric_logger.update(loss=loss_val, loss_g1v=loss_g1v_val,
loss_g2v=loss_g2v_val, lr=optimizer.param_groups[0]['lr'])
metric_logger.meters['samples/s'].update(batch_size / (time.time() - start_time))
if writer:
writer.add_scalar('loss', loss_val, step)
writer.add_scalar('loss_g1v', loss_g1v_val, step)
writer.add_scalar('loss_g2v', loss_g2v_val, step)
step += 1
lr_scheduler.step()
def evaluate(model, criterion, dataloader, device, writer):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Test:'
with torch.no_grad():
for data, label in metric_logger.log_every(dataloader, 20, header):
data = data.to(device, non_blocking=True)
label = label.to(device, non_blocking=True)
output = model(data)
loss, loss_g1v, loss_g2v = criterion(output, label)
metric_logger.update(loss=loss.item(),
loss_g1v=loss_g1v.item(),
loss_g2v=loss_g2v.item())
# Gather the stats from all processes
metric_logger.synchronize_between_processes()
print(' * Loss {loss.global_avg:.8f}\n'.format(loss=metric_logger.loss))
if writer:
writer.add_scalar('loss', metric_logger.loss.global_avg, step)
writer.add_scalar('loss_g1v', metric_logger.loss_g1v.global_avg, step)
writer.add_scalar('loss_g2v', metric_logger.loss_g2v.global_avg, step)
return metric_logger.loss.global_avg
def main(args):
global step
print(args)
print('torch version: ', torch.__version__)
print('torchvision version: ', torchvision.__version__)
utils.mkdir(args.output_path) # create folder to store checkpoints
utils.init_distributed_mode(args) # distributed mode initialization
# Set up tensorboard summary writer
train_writer, val_writer = None, None
if args.tensorboard:
utils.mkdir(args.log_path) # create folder to store tensorboard logs
if not args.distributed or (args.rank == 0) and (args.local_rank == 0):
train_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'train'))
val_writer = SummaryWriter(os.path.join(args.output_path, 'logs', 'val'))
device = torch.device(args.device)
torch.backends.cudnn.benchmark = True
with open('dataset_config.json') as f:
try:
ctx = json.load(f)[args.dataset]
except KeyError:
print('Unsupported dataset.')
sys.exit()
if args.file_size is not None:
ctx['file_size'] = args.file_size
# Create dataset and dataloader
print('Loading data')
print('Loading training data')
# Normalize data and label to [-1, 1]
transform_data = Compose([
T.LogTransform(k=args.k),
T.MinMaxNormalize(T.log_transform(ctx['data_min'], k=args.k), T.log_transform(ctx['data_max'], k=args.k))
])
transform_label = Compose([
T.MinMaxNormalize(ctx['label_min'], ctx['label_max'])
])
if args.train_anno[-3:] == 'txt':
dataset_train = FWIDataset(
args.train_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_train = torch.load(args.train_anno)
print('Loading validation data')
if args.val_anno[-3:] == 'txt':
dataset_valid = FWIDataset(
args.val_anno,
preload=True,
sample_ratio=args.sample_temporal,
file_size=ctx['file_size'],
transform_data=transform_data,
transform_label=transform_label
)
else:
dataset_valid = torch.load(args.val_anno)
print('Creating data loaders')
if args.distributed:
train_sampler = DistributedSampler(dataset_train, shuffle=True)
valid_sampler = DistributedSampler(dataset_valid, shuffle=True)
else:
train_sampler = RandomSampler(dataset_train)
valid_sampler = RandomSampler(dataset_valid)
dataloader_train = DataLoader(
dataset_train, batch_size=args.batch_size,
sampler=train_sampler, num_workers=args.workers,
pin_memory=True, drop_last=True, collate_fn=default_collate)
dataloader_valid = DataLoader(
dataset_valid, batch_size=args.batch_size,
sampler=valid_sampler, num_workers=args.workers,
pin_memory=True, collate_fn=default_collate)
print('Creating model')
if args.model not in network.model_dict:
print('Unsupported model.')
sys.exit()
model = network.model_dict[args.model](upsample_mode=args.up_mode,
sample_spatial=args.sample_spatial, sample_temporal=args.sample_temporal).to(device)
if args.distributed and args.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
# Define loss function
l1loss = nn.L1Loss()
l2loss = nn.MSELoss()
def criterion(pred, gt):
loss_g1v = l1loss(pred, gt)
loss_g2v = l2loss(pred, gt)
loss = args.lambda_g1v * loss_g1v + args.lambda_g2v * loss_g2v
return loss, loss_g1v, loss_g2v
# Scale lr according to effective batch size
lr = args.lr * args.world_size
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, betas=(0.9, 0.999), weight_decay=args.weight_decay)
# Convert scheduler to be per iteration instead of per epoch
warmup_iters = args.lr_warmup_epochs * len(dataloader_train)
lr_milestones = [len(dataloader_train) * m for m in args.lr_milestones]
lr_scheduler = WarmupMultiStepLR(
optimizer, milestones=lr_milestones, gamma=args.lr_gamma,
warmup_iters=warmup_iters, warmup_factor=1e-5)
model_without_ddp = model
if args.distributed:
model = DistributedDataParallel(model, device_ids=[args.local_rank])
model_without_ddp = model.module
if args.resume:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(network.replace_legacy(checkpoint['model']))
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
step = checkpoint['step']
lr_scheduler.milestones=lr_milestones
print('Start training')
start_time = time.time()
best_loss = 10
chp=1
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
train_one_epoch(model, criterion, optimizer, lr_scheduler, dataloader_train,
device, epoch, args.print_freq, train_writer)
loss = evaluate(model, criterion, dataloader_valid, device, val_writer)
checkpoint = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict(),
'epoch': epoch,
'step': step,
'args': args}
# Save checkpoint per epoch
if loss < best_loss:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'checkpoint.pth'))
print('saving checkpoint at epoch: ', epoch)
chp = epoch
best_loss = loss
# Save checkpoint every epoch block
print('current best loss: ', best_loss)
print('current best epoch: ', chp)
if args.output_path and (epoch + 1) % args.epoch_block == 0:
utils.save_on_master(
checkpoint,
os.path.join(args.output_path, 'model_{}.pth'.format(epoch + 1)))
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='FCN Training')
parser.add_argument('-d', '--device', default='cuda', help='device')
parser.add_argument('-ds', '--dataset', default='flatfault-b', type=str, help='dataset name')
parser.add_argument('-fs', '--file-size', default=None, type=int, help='number of samples in each npy file')
# Path related
parser.add_argument('-ap', '--anno-path', default='split_files', help='annotation files location')
parser.add_argument('-t', '--train-anno', default='flatfault_b_train_invnet.txt', help='name of train anno')
parser.add_argument('-v', '--val-anno', default='flatfault_b_val_invnet.txt', help='name of val anno')
parser.add_argument('-o', '--output-path', default='Invnet_models', help='path to parent folder to save checkpoints')
parser.add_argument('-l', '--log-path', default='Invnet_models', help='path to parent folder to save logs')
parser.add_argument('-n', '--save-name', default='fcn_l1loss_ffb', help='folder name for this experiment')
parser.add_argument('-s', '--suffix', type=str, default=None, help='subfolder name for this run')
# Model related
parser.add_argument('-m', '--model', type=str, help='inverse model name')
parser.add_argument('-um', '--up-mode', default=None, help='upsampling layer mode such as "nearest", "bicubic", etc.')
parser.add_argument('-ss', '--sample-spatial', type=float, default=1.0, help='spatial sampling ratio')
parser.add_argument('-st', '--sample-temporal', type=int, default=1, help='temporal sampling ratio')
# Training related
parser.add_argument('-b', '--batch-size', default=256, type=int)
parser.add_argument('--lr', default=0.0001, type=float, help='initial learning rate')
parser.add_argument('-lm', '--lr-milestones', nargs='+', default=[], type=int, help='decrease lr on milestones')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
parser.add_argument('--weight-decay', default=1e-4 , type=float, help='weight decay (default: 1e-4)')
parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma')
parser.add_argument('--lr-warmup-epochs', default=0, type=int, help='number of warmup epochs')
parser.add_argument('-eb', '--epoch_block', type=int, default=40, help='epochs in a saved block')
parser.add_argument('-nb', '--num_block', type=int, default=3, help='number of saved block')
parser.add_argument('-j', '--workers', default=16, type=int, help='number of data loading workers (default: 16)')
parser.add_argument('--k', default=1, type=float, help='k in log transformation')
parser.add_argument('--print-freq', default=50, type=int, help='print frequency')
parser.add_argument('-r', '--resume', default=None, help='resume from checkpoint')
parser.add_argument('--start-epoch', default=0, type=int, help='start epoch')
# Loss related
parser.add_argument('-g1v', '--lambda_g1v', type=float, default=1.0)
parser.add_argument('-g2v', '--lambda_g2v', type=float, default=1.0)
# Distributed training related
parser.add_argument('--sync-bn', action='store_true', help='Use sync batch norm')
parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes')
parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training')
# Tensorboard related
parser.add_argument('--tensorboard', action='store_true', help='Use tensorboard for logging.')
args = parser.parse_args()
args.output_path = os.path.join(args.output_path, args.save_name, args.suffix or '')
args.log_path = os.path.join(args.log_path, args.save_name, args.suffix or '')
args.train_anno = os.path.join(args.anno_path, args.train_anno)
args.val_anno = os.path.join(args.anno_path, args.val_anno)
args.epochs = args.epoch_block * args.num_block
if args.resume:
args.resume = os.path.join(args.output_path, args.resume)
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 14,469 | 41.558824 | 122 | py |
OpenFWI | OpenFWI-main/transforms.py | # © 2022. Triad National Security, LLC. All rights reserved.
# This program was produced under U.S. Government contract 89233218CNA000001 for Los Alamos
# National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S.
# Department of Energy/National Nuclear Security Administration. All rights in the program are
# reserved by Triad National Security, LLC, and the U.S. Department of Energy/National Nuclear
# Security Administration. The Government is granted for itself and others acting on its behalf a
# nonexclusive, paid-up, irrevocable worldwide license in this material to reproduce, prepare
# derivative works, distribute copies to the public, perform publicly and display publicly, and to permit
# others to do so.
import torch
import numpy as np
import random
from sklearn.decomposition import PCA
def crop(vid, i, j, h, w):
return vid[..., i:(i + h), j:(j + w)]
def center_crop(vid, output_size):
h, w = vid.shape[-2:]
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return crop(vid, i, j, th, tw)
def hflip(vid):
return vid.flip(dims=(-1,))
# NOTE: for those functions, which generally expect mini-batches, we keep them
# as non-minibatch so that they are applied as if they were 4d (thus image).
# this way, we only apply the transformation in the spatial domain
def resize(vid, size, interpolation='bilinear'):
# NOTE: using bilinear interpolation because we don't work on minibatches
# at this level
scale = None
if isinstance(size, int):
scale = float(size) / min(vid.shape[-2:])
size = None
return torch.nn.functional.interpolate(
vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False)
def random_resize(vid, size, random_factor, interpolation='bilinear'):
# NOTE: using bilinear interpolation because we don't work on minibatches
# at this level
scale = None
r = 1 + random.random() * (random_factor - 1)
if isinstance(size, int):
scale = float(size) / min(vid.shape[-2:]) * r
size = None
else:
size = tuple([int(elem * r) for elem in list(size)])
return torch.nn.functional.interpolate(
vid, size=size, scale_factor=scale, mode=interpolation, align_corners=False)
def pad(vid, padding, fill=0, padding_mode="constant"):
# NOTE: don't want to pad on temporal dimension, so let as non-batch
# (4d) before padding. This works as expected
return torch.nn.functional.pad(vid, padding, value=fill, mode=padding_mode)
def to_normalized_float_tensor(vid):
return vid.permute(3, 0, 1, 2).to(torch.float32) / 255
def normalize(vid, mean, std):
shape = (-1,) + (1,) * (vid.dim() - 1)
mean = torch.as_tensor(mean).reshape(shape)
std = torch.as_tensor(std).reshape(shape)
return (vid - mean) / std
def minmax_normalize(vid, vmin, vmax, scale=2):
vid -= vmin
vid /= (vmax - vmin)
return (vid - 0.5) * 2 if scale == 2 else vid
def minmax_denormalize(vid, vmin, vmax, scale=2):
if scale == 2:
vid = vid / 2 + 0.5
return vid * (vmax - vmin) + vmin
def add_noise(data, snr):
sig_avg_power_db = 10*np.log10(np.mean(data**2))
noise_avg_power_db = sig_avg_power_db - snr
noise_avg_power = 10**(noise_avg_power_db/10)
noise = np.random.normal(0, np.sqrt(noise_avg_power), data.shape)
noisy_data = data + noise
return noisy_data
def log_transform(data, k=1, c=0):
return (np.log1p(np.abs(k * data) + c)) * np.sign(data)
def log_transform_tensor(data, k=1, c=0):
return (torch.log1p(torch.abs(k * data) + c)) * torch.sign(data)
def exp_transform(data, k=1, c=0):
return (np.expm1(np.abs(data)) - c) * np.sign(data) / k
def tonumpy_denormalize(vid, vmin, vmax, exp=True, k=1, c=0, scale=2):
if exp:
vmin = log_transform(vmin, k=k, c=c)
vmax = log_transform(vmax, k=k, c=c)
vid = minmax_denormalize(vid.cpu().numpy(), vmin, vmax, scale)
return exp_transform(vid, k=k, c=c) if exp else vid
# Class interface
class RandomCrop(object):
def __init__(self, size):
self.size = size
@staticmethod
def get_params(vid, output_size):
"""Get parameters for ``crop`` for a random crop.
"""
h, w = vid.shape[-2:]
th, tw = output_size
if w == tw and h == th:
return 0, 0, h, w
i = random.randint(0, h - th)
j = random.randint(0, w - tw)
return i, j, th, tw
def __call__(self, vid):
i, j, h, w = self.get_params(vid, self.size)
return crop(vid, i, j, h, w)
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return center_crop(vid, self.size)
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return resize(vid, self.size)
class RandomResize(object):
def __init__(self, size, random_factor=1.25):
self.size = size
self.factor = random_factor
def __call__(self, vid):
return random_resize(vid, self.size, self.factor)
class ToFloatTensorInZeroOne(object):
def __call__(self, vid):
return to_normalized_float_tensor(vid)
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, vid):
return normalize(vid, self.mean, self.std)
class MinMaxNormalize(object):
def __init__(self, datamin, datamax, scale=2):
self.datamin = datamin
self.datamax = datamax
self.scale = scale
def __call__(self, vid):
return minmax_normalize(vid, self.datamin, self.datamax, self.scale)
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, vid):
if random.random() < self.p:
return hflip(vid)
return vid
class Pad(object):
def __init__(self, padding, fill=0):
self.padding = padding
self.fill = fill
def __call__(self, vid):
return pad(vid, self.padding, self.fill)
class TemporalDownsample(object):
def __init__(self, rate=1):
self.rate = rate
def __call__(self, vid):
return vid[::self.rate]
class AddNoise(object):
def __init__(self, snr=10):
self.snr = snr
def __call__(self, vid):
return add_noise(vid, self.snr)
class PCD(object):
def __init__(self, n_comp=8):
self.pca = PCA(n_components=n_comp)
def __call__(self, data):
data= data.reshape((data.shape[0], -1))
feat_mean = data.mean(axis=0)
data -= np.tile(feat_mean, (data.shape[0], 1))
pc = self.pca.fit_transform(data)
pc = pc.reshape((-1,))
pc = pc[:, np.newaxis, np.newaxis]
return pc
class StackPCD(object):
def __init__(self, n_comp=(32, 8)):
self.primary_pca = PCA(n_components=n_comp[0])
self.secondary_pca = PCA(n_components=n_comp[1])
def __call__(self, data):
data = np.transpose(data, (0, 2, 1))
primary_pc = []
for sample in data:
feat_mean = sample.mean(axis=0)
sample -= np.tile(feat_mean, (sample.shape[0], 1))
primary_pc.append(self.primary_pca.fit_transform(sample))
primary_pc = np.array(primary_pc)
data = primary_pc.reshape((data.shape[0], -1))
feat_mean = data.mean(axis=0)
data -= np.tile(feat_mean, (data.shape[0], 1))
secondary_pc = self.secondary_pca.fit_transform(data)
secondary_pc = secondary_pc.reshape((-1,))
secondary_pc = pc[:, np.newaxis, np.newaxis]
return secondary_pc
class LogTransform(object):
def __init__(self, k=1, c=0):
self.k = k
self.c = c
def __call__(self, data):
return log_transform(data, k=self.k, c=self.c)
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
# def __init__(self, device):
# self.device = device
def __call__(self, sample):
return torch.from_numpy(sample)
| 8,236 | 29.394834 | 105 | py |
mmvae-public | mmvae-public/src/main.py | import argparse
import datetime
import sys
import json
from collections import defaultdict
from pathlib import Path
from tempfile import mkdtemp
import numpy as np
import torch
from torch import optim
import models
import objectives
from utils import Logger, Timer, save_model, save_vars, unpack_data
parser = argparse.ArgumentParser(description='Multi-Modal VAEs')
parser.add_argument('--experiment', type=str, default='', metavar='E',
help='experiment name')
parser.add_argument('--model', type=str, default='mnist_svhn', metavar='M',
choices=[s[4:] for s in dir(models) if 'VAE_' in s],
help='model name (default: mnist_svhn)')
parser.add_argument('--obj', type=str, default='elbo', metavar='O',
choices=['elbo', 'iwae', 'dreg'],
help='objective to use (default: elbo)')
parser.add_argument('--K', type=int, default=20, metavar='K',
help='number of particles to use for iwae/dreg (default: 10)')
parser.add_argument('--looser', action='store_true', default=False,
help='use the looser version of IWAE/DREG')
parser.add_argument('--llik_scaling', type=float, default=0.,
help='likelihood scaling for cub images/svhn modality when running in'
'multimodal setting, set as 0 to use default value')
parser.add_argument('--batch-size', type=int, default=256, metavar='N',
help='batch size for data (default: 256)')
parser.add_argument('--epochs', type=int, default=10, metavar='E',
help='number of epochs to train (default: 10)')
parser.add_argument('--latent-dim', type=int, default=20, metavar='L',
help='latent dimensionality (default: 20)')
parser.add_argument('--num-hidden-layers', type=int, default=1, metavar='H',
help='number of hidden layers in enc and dec (default: 1)')
parser.add_argument('--pre-trained', type=str, default="",
help='path to pre-trained model (train from scratch if empty)')
parser.add_argument('--learn-prior', action='store_true', default=False,
help='learn model prior parameters')
parser.add_argument('--logp', action='store_true', default=False,
help='estimate tight marginal likelihood on completion')
parser.add_argument('--print-freq', type=int, default=0, metavar='f',
help='frequency with which to print stats (default: 0)')
parser.add_argument('--no-analytics', action='store_true', default=False,
help='disable plotting analytics')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disable CUDA use')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
# args
args = parser.parse_args()
# random seed
# https://pytorch.org/docs/stable/notes/randomness.html
torch.backends.cudnn.benchmark = True
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# load args from disk if pretrained model path is given
pretrained_path = ""
if args.pre_trained:
pretrained_path = args.pre_trained
args = torch.load(args.pre_trained + '/args.rar')
args.cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
# load model
modelC = getattr(models, 'VAE_{}'.format(args.model))
model = modelC(args).to(device)
if pretrained_path:
print('Loading model {} from {}'.format(model.modelName, pretrained_path))
model.load_state_dict(torch.load(pretrained_path + '/model.rar'))
model._pz_params = model._pz_params
if not args.experiment:
args.experiment = model.modelName
# set up run path
runId = datetime.datetime.now().isoformat()
experiment_dir = Path('../experiments/' + args.experiment)
experiment_dir.mkdir(parents=True, exist_ok=True)
runPath = mkdtemp(prefix=runId, dir=str(experiment_dir))
sys.stdout = Logger('{}/run.log'.format(runPath))
print('Expt:', runPath)
print('RunID:', runId)
# save args to run
with open('{}/args.json'.format(runPath), 'w') as fp:
json.dump(args.__dict__, fp)
# -- also save object because we want to recover these for other things
torch.save(args, '{}/args.rar'.format(runPath))
# preparation for training
optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=1e-3, amsgrad=True)
train_loader, test_loader = model.getDataLoaders(args.batch_size, device=device)
objective = getattr(objectives,
('m_' if hasattr(model, 'vaes') else '')
+ args.obj
+ ('_looser' if (args.looser and args.obj != 'elbo') else ''))
t_objective = getattr(objectives, ('m_' if hasattr(model, 'vaes') else '') + 'iwae')
def train(epoch, agg):
model.train()
b_loss = 0
for i, dataT in enumerate(train_loader):
data = unpack_data(dataT, device=device)
optimizer.zero_grad()
loss = -objective(model, data, K=args.K)
loss.backward()
optimizer.step()
b_loss += loss.item()
if args.print_freq > 0 and i % args.print_freq == 0:
print("iteration {:04d}: loss: {:6.3f}".format(i, loss.item() / args.batch_size))
agg['train_loss'].append(b_loss / len(train_loader.dataset))
print('====> Epoch: {:03d} Train loss: {:.4f}'.format(epoch, agg['train_loss'][-1]))
def test(epoch, agg):
model.eval()
b_loss = 0
with torch.no_grad():
for i, dataT in enumerate(test_loader):
data = unpack_data(dataT, device=device)
loss = -t_objective(model, data, K=args.K)
b_loss += loss.item()
if i == 0:
model.reconstruct(data, runPath, epoch)
if not args.no_analytics:
model.analyse(data, runPath, epoch)
agg['test_loss'].append(b_loss / len(test_loader.dataset))
print('====> Test loss: {:.4f}'.format(agg['test_loss'][-1]))
def estimate_log_marginal(K):
"""Compute an IWAE estimate of the log-marginal likelihood of test data."""
model.eval()
marginal_loglik = 0
with torch.no_grad():
for dataT in test_loader:
data = unpack_data(dataT, device=device)
marginal_loglik += -t_objective(model, data, K).item()
marginal_loglik /= len(test_loader.dataset)
print('Marginal Log Likelihood (IWAE, K = {}): {:.4f}'.format(K, marginal_loglik))
if __name__ == '__main__':
with Timer('MM-VAE') as t:
agg = defaultdict(list)
for epoch in range(1, args.epochs + 1):
train(epoch, agg)
test(epoch, agg)
save_model(model, runPath + '/model.rar')
save_vars(agg, runPath + '/losses.rar')
model.generate(runPath, epoch)
if args.logp: # compute as tight a marginal likelihood as possible
estimate_log_marginal(5000)
| 6,968 | 40.482143 | 93 | py |
mmvae-public | mmvae-public/src/vis.py | # visualisation related functions
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
from matplotlib.lines import Line2D
from umap import UMAP
def custom_cmap(n):
"""Create customised colormap for scattered latent plot of n categories.
Returns colormap object and colormap array that contains the RGB value of the colors.
See official matplotlib document for colormap reference:
https://matplotlib.org/examples/color/colormaps_reference.html
"""
# first color is grey from Set1, rest other sensible categorical colourmap
cmap_array = sns.color_palette("Set1", 9)[-1:] + sns.husl_palette(n - 1, h=.6, s=0.7)
cmap = colors.LinearSegmentedColormap.from_list('mmdgm_cmap', cmap_array)
return cmap, cmap_array
def embed_umap(data):
"""data should be on cpu, numpy"""
embedding = UMAP(metric='euclidean',
n_neighbors=40,
# angular_rp_forest=True,
# random_state=torch.initial_seed(),
transform_seed=torch.initial_seed())
return embedding.fit_transform(data)
def plot_embeddings(emb, emb_l, labels, filepath):
cmap_obj, cmap_arr = custom_cmap(n=len(labels))
plt.figure()
plt.scatter(emb[:, 0], emb[:, 1], c=emb_l, cmap=cmap_obj, s=25, alpha=0.2, edgecolors='none')
l_elems = [Line2D([0], [0], marker='o', color=cm, label=l, alpha=0.5, linestyle='None')
for (cm, l) in zip(cmap_arr, labels)]
plt.legend(frameon=False, loc=2, handles=l_elems)
plt.savefig(filepath, bbox_inches='tight')
plt.close()
def tensor_to_df(tensor, ax_names=None):
assert tensor.ndim == 2, "Can only currently convert 2D tensors to dataframes"
df = pd.DataFrame(data=tensor, columns=np.arange(tensor.shape[1]))
return df.melt(value_vars=df.columns,
var_name=('variable' if ax_names is None else ax_names[0]),
value_name=('value' if ax_names is None else ax_names[1]))
def tensors_to_df(tensors, head=None, keys=None, ax_names=None):
dfs = [tensor_to_df(tensor, ax_names=ax_names) for tensor in tensors]
df = pd.concat(dfs, keys=(np.arange(len(tensors)) if keys is None else keys))
df.reset_index(level=0, inplace=True)
if head is not None:
df.rename(columns={'level_0': head}, inplace=True)
return df
def plot_kls_df(df, filepath):
_, cmap_arr = custom_cmap(df[df.columns[0]].nunique() + 1)
with sns.plotting_context("notebook", font_scale=2.0):
g = sns.FacetGrid(df, height=12, aspect=2)
g = g.map(sns.boxplot, df.columns[1], df.columns[2], df.columns[0], palette=cmap_arr[1:],
order=None, hue_order=None)
g = g.set(yscale='log').despine(offset=10)
plt.legend(loc='best', fontsize='22')
plt.savefig(filepath, bbox_inches='tight')
plt.close()
| 2,938 | 39.260274 | 97 | py |
mmvae-public | mmvae-public/src/utils.py | import math
import os
import shutil
import sys
import time
import torch
import torch.distributions as dist
import torch.nn.functional as F
from datasets import CUBImageFt
# Classes
class Constants(object):
eta = 1e-6
log2 = math.log(2)
log2pi = math.log(2 * math.pi)
logceilc = 88 # largest cuda v s.t. exp(v) < inf
logfloorc = -104 # smallest cuda v s.t. exp(v) > 0
# https://stackoverflow.com/questions/14906764/how-to-redirect-stdout-to-both-file-and-console-with-scripting
class Logger(object):
def __init__(self, filename, mode="a"):
self.terminal = sys.stdout
self.log = open(filename, mode)
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
# this flush method is needed for python 3 compatibility.
# this handles the flush command by doing nothing.
# you might want to specify some extra behavior here.
pass
class Timer:
def __init__(self, name):
self.name = name
def __enter__(self):
self.begin = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.elapsed = self.end - self.begin
self.elapsedH = time.gmtime(self.elapsed)
print('====> [{}] Time: {:7.3f}s or {}'
.format(self.name,
self.elapsed,
time.strftime("%H:%M:%S", self.elapsedH)))
# Functions
def save_vars(vs, filepath):
"""
Saves variables to the given filepath in a safe manner.
"""
if os.path.exists(filepath):
shutil.copyfile(filepath, '{}.old'.format(filepath))
torch.save(vs, filepath)
def save_model(model, filepath):
"""
To load a saved model, simply use
`model.load_state_dict(torch.load('path-to-saved-model'))`.
"""
save_vars(model.state_dict(), filepath)
if hasattr(model, 'vaes'):
for vae in model.vaes:
fdir, fext = os.path.splitext(filepath)
save_vars(vae.state_dict(), fdir + '_' + vae.modelName + fext)
def is_multidata(dataB):
return isinstance(dataB, list) or isinstance(dataB, tuple)
def unpack_data(dataB, device='cuda'):
# dataB :: (Tensor, Idx) | [(Tensor, Idx)]
""" Unpacks the data batch object in an appropriate manner to extract data """
if is_multidata(dataB):
if torch.is_tensor(dataB[0]):
if torch.is_tensor(dataB[1]):
return dataB[0].to(device) # mnist, svhn, cubI
elif is_multidata(dataB[1]):
return dataB[0].to(device), dataB[1][0].to(device) # cubISft
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[1])))
elif is_multidata(dataB[0]):
return [d.to(device) for d in list(zip(*dataB))[0]] # mnist-svhn, cubIS
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB[0])))
elif torch.is_tensor(dataB):
return dataB.to(device)
else:
raise RuntimeError('Invalid data format {} -- check your dataloader!'.format(type(dataB)))
def get_mean(d, K=100):
"""
Extract the `mean` parameter for given distribution.
If attribute not available, estimate from samples.
"""
try:
mean = d.mean
except NotImplementedError:
samples = d.rsample(torch.Size([K]))
mean = samples.mean(0)
return mean
def log_mean_exp(value, dim=0, keepdim=False):
return torch.logsumexp(value, dim, keepdim=keepdim) - math.log(value.size(dim))
def kl_divergence(d1, d2, K=100):
"""Computes closed-form KL if available, else computes a MC estimate."""
if (type(d1), type(d2)) in torch.distributions.kl._KL_REGISTRY:
return torch.distributions.kl_divergence(d1, d2)
else:
samples = d1.rsample(torch.Size([K]))
return (d1.log_prob(samples) - d2.log_prob(samples)).mean(0)
def pdist(sample_1, sample_2, eps=1e-5):
"""Compute the matrix of all squared pairwise distances. Code
adapted from the torch-two-sample library (added batching).
You can find the original implementation of this function here:
https://github.com/josipd/torch-two-sample/blob/master/torch_two_sample/util.py
Arguments
---------
sample_1 : torch.Tensor or Variable
The first sample, should be of shape ``(batch_size, n_1, d)``.
sample_2 : torch.Tensor or Variable
The second sample, should be of shape ``(batch_size, n_2, d)``.
norm : float
The l_p norm to be used.
batched : bool
whether data is batched
Returns
-------
torch.Tensor or Variable
Matrix of shape (batch_size, n_1, n_2). The [i, j]-th entry is equal to
``|| sample_1[i, :] - sample_2[j, :] ||_p``."""
if len(sample_1.shape) == 2:
sample_1, sample_2 = sample_1.unsqueeze(0), sample_2.unsqueeze(0)
B, n_1, n_2 = sample_1.size(0), sample_1.size(1), sample_2.size(1)
norms_1 = torch.sum(sample_1 ** 2, dim=-1, keepdim=True)
norms_2 = torch.sum(sample_2 ** 2, dim=-1, keepdim=True)
norms = (norms_1.expand(B, n_1, n_2)
+ norms_2.transpose(1, 2).expand(B, n_1, n_2))
distances_squared = norms - 2 * sample_1.matmul(sample_2.transpose(1, 2))
return torch.sqrt(eps + torch.abs(distances_squared)).squeeze() # batch x K x latent
def NN_lookup(emb_h, emb, data):
indices = pdist(emb.to(emb_h.device), emb_h).argmin(dim=0)
# indices = torch.tensor(cosine_similarity(emb, emb_h.cpu().numpy()).argmax(0)).to(emb_h.device).squeeze()
return data[indices]
class FakeCategorical(dist.Distribution):
support = dist.constraints.real
has_rsample = True
def __init__(self, locs):
self.logits = locs
self._batch_shape = self.logits.shape
@property
def mean(self):
return self.logits
def sample(self, sample_shape=torch.Size()):
with torch.no_grad():
return self.rsample(sample_shape)
def rsample(self, sample_shape=torch.Size()):
return self.logits.expand([*sample_shape, *self.logits.shape]).contiguous()
def log_prob(self, value):
# value of shape (K, B, D)
lpx_z = -F.cross_entropy(input=self.logits.view(-1, self.logits.size(-1)),
target=value.expand(self.logits.size()[:-1]).long().view(-1),
reduction='none',
ignore_index=0)
return lpx_z.view(*self.logits.shape[:-1])
# it is inevitable to have the word embedding dimension summed up in
# cross-entropy loss ($\sum -gt_i \log(p_i)$ with most gt_i = 0, We adopt the
# operationally equivalence here, which is summing up the sentence dimension
# in objective.
| 6,857 | 32.950495 | 110 | py |
mmvae-public | mmvae-public/src/objectives.py | # objectives of choice
import torch
from numpy import prod
from utils import log_mean_exp, is_multidata, kl_divergence
# helper to vectorise computation
def compute_microbatch_split(x, K):
""" Checks if batch needs to be broken down further to fit in memory. """
B = x[0].size(0) if is_multidata(x) else x.size(0)
S = sum([1.0 / (K * prod(_x.size()[1:])) for _x in x]) if is_multidata(x) \
else 1.0 / (K * prod(x.size()[1:]))
S = int(1e8 * S) # float heuristic for 12Gb cuda memory
assert (S > 0), "Cannot fit individual data in memory, consider smaller K"
return min(B, S)
def elbo(model, x, K=1):
"""Computes E_{p(x)}[ELBO] """
qz_x, px_z, _ = model(x)
lpx_z = px_z.log_prob(x).view(*px_z.batch_shape[:2], -1) * model.llik_scaling
kld = kl_divergence(qz_x, model.pz(*model.pz_params))
return (lpx_z.sum(-1) - kld.sum(-1)).mean(0).sum()
def _iwae(model, x, K):
"""IWAE estimate for log p_\theta(x) -- fully vectorised."""
qz_x, px_z, zs = model(x, K)
lpz = model.pz(*model.pz_params).log_prob(zs).sum(-1)
lpx_z = px_z.log_prob(x).view(*px_z.batch_shape[:2], -1) * model.llik_scaling
lqz_x = qz_x.log_prob(zs).sum(-1)
return lpz + lpx_z.sum(-1) - lqz_x
def iwae(model, x, K):
"""Computes an importance-weighted ELBO estimate for log p_\theta(x)
Iterates over the batch as necessary.
"""
S = compute_microbatch_split(x, K)
lw = torch.cat([_iwae(model, _x, K) for _x in x.split(S)], 1) # concat on batch
return log_mean_exp(lw).sum()
def _dreg(model, x, K):
"""DREG estimate for log p_\theta(x) -- fully vectorised."""
_, px_z, zs = model(x, K)
lpz = model.pz(*model.pz_params).log_prob(zs).sum(-1)
lpx_z = px_z.log_prob(x).view(*px_z.batch_shape[:2], -1) * model.llik_scaling
qz_x = model.qz_x(*[p.detach() for p in model.qz_x_params]) # stop-grad for \phi
lqz_x = qz_x.log_prob(zs).sum(-1)
lw = lpz + lpx_z.sum(-1) - lqz_x
return lw, zs
def dreg(model, x, K, regs=None):
"""Computes a doubly-reparameterised importance-weighted ELBO estimate for log p_\theta(x)
Iterates over the batch as necessary.
"""
S = compute_microbatch_split(x, K)
lw, zs = zip(*[_dreg(model, _x, K) for _x in x.split(S)])
lw = torch.cat(lw, 1) # concat on batch
zs = torch.cat(zs, 1) # concat on batch
with torch.no_grad():
grad_wt = (lw - torch.logsumexp(lw, 0, keepdim=True)).exp()
if zs.requires_grad:
zs.register_hook(lambda grad: grad_wt.unsqueeze(-1) * grad)
return (grad_wt * lw).sum()
# multi-modal variants
def m_elbo_naive(model, x, K=1):
"""Computes E_{p(x)}[ELBO] for multi-modal vae --- NOT EXPOSED"""
qz_xs, px_zs, zss = model(x)
lpx_zs, klds = [], []
for r, qz_x in enumerate(qz_xs):
kld = kl_divergence(qz_x, model.pz(*model.pz_params))
klds.append(kld.sum(-1))
for d, px_z in enumerate(px_zs[r]):
lpx_z = px_z.log_prob(x[d]) * model.vaes[d].llik_scaling
lpx_zs.append(lpx_z.view(*px_z.batch_shape[:2], -1).sum(-1))
obj = (1 / len(model.vaes)) * (torch.stack(lpx_zs).sum(0) - torch.stack(klds).sum(0))
return obj.mean(0).sum()
def m_elbo(model, x, K=1):
"""Computes importance-sampled m_elbo (in notes3) for multi-modal vae """
qz_xs, px_zs, zss = model(x)
lpx_zs, klds = [], []
for r, qz_x in enumerate(qz_xs):
kld = kl_divergence(qz_x, model.pz(*model.pz_params))
klds.append(kld.sum(-1))
for d in range(len(px_zs)):
lpx_z = px_zs[d][d].log_prob(x[d]).view(*px_zs[d][d].batch_shape[:2], -1)
lpx_z = (lpx_z * model.vaes[d].llik_scaling).sum(-1)
if d == r:
lwt = torch.tensor(0.0)
else:
zs = zss[d].detach()
lwt = (qz_x.log_prob(zs) - qz_xs[d].log_prob(zs).detach()).sum(-1)
lpx_zs.append(lwt.exp() * lpx_z)
obj = (1 / len(model.vaes)) * (torch.stack(lpx_zs).sum(0) - torch.stack(klds).sum(0))
return obj.mean(0).sum()
def _m_iwae(model, x, K=1):
"""IWAE estimate for log p_\theta(x) for multi-modal vae -- fully vectorised"""
qz_xs, px_zs, zss = model(x, K)
lws = []
for r, qz_x in enumerate(qz_xs):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x.log_prob(zss[r]).sum(-1) for qz_x in qz_xs]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return torch.cat(lws) # (n_modality * n_samples) x batch_size, batch_size
def m_iwae(model, x, K=1):
"""Computes iwae estimate for log p_\theta(x) for multi-modal vae """
S = compute_microbatch_split(x, K)
x_split = zip(*[_x.split(S) for _x in x])
lw = [_m_iwae(model, _x, K) for _x in x_split]
lw = torch.cat(lw, 1) # concat on batch
return log_mean_exp(lw).sum()
def _m_iwae_looser(model, x, K=1):
"""IWAE estimate for log p_\theta(x) for multi-modal vae -- fully vectorised
This version is the looser bound---with the average over modalities outside the log
"""
qz_xs, px_zs, zss = model(x, K)
lws = []
for r, qz_x in enumerate(qz_xs):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x.log_prob(zss[r]).sum(-1) for qz_x in qz_xs]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return torch.stack(lws) # (n_modality * n_samples) x batch_size, batch_size
def m_iwae_looser(model, x, K=1):
"""Computes iwae estimate for log p_\theta(x) for multi-modal vae
This version is the looser bound---with the average over modalities outside the log
"""
S = compute_microbatch_split(x, K)
x_split = zip(*[_x.split(S) for _x in x])
lw = [_m_iwae_looser(model, _x, K) for _x in x_split]
lw = torch.cat(lw, 2) # concat on batch
return log_mean_exp(lw, dim=1).mean(0).sum()
def _m_dreg(model, x, K=1):
"""DERG estimate for log p_\theta(x) for multi-modal vae -- fully vectorised"""
qz_xs, px_zs, zss = model(x, K)
qz_xs_ = [vae.qz_x(*[p.detach() for p in vae.qz_x_params]) for vae in model.vaes]
lws = []
for r, vae in enumerate(model.vaes):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x_.log_prob(zss[r]).sum(-1) for qz_x_ in qz_xs_]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return torch.cat(lws), torch.cat(zss)
def m_dreg(model, x, K=1):
"""Computes dreg estimate for log p_\theta(x) for multi-modal vae """
S = compute_microbatch_split(x, K)
x_split = zip(*[_x.split(S) for _x in x])
lw, zss = zip(*[_m_dreg(model, _x, K) for _x in x_split])
lw = torch.cat(lw, 1) # concat on batch
zss = torch.cat(zss, 1) # concat on batch
with torch.no_grad():
grad_wt = (lw - torch.logsumexp(lw, 0, keepdim=True)).exp()
if zss.requires_grad:
zss.register_hook(lambda grad: grad_wt.unsqueeze(-1) * grad)
return (grad_wt * lw).sum()
def _m_dreg_looser(model, x, K=1):
"""DERG estimate for log p_\theta(x) for multi-modal vae -- fully vectorised
This version is the looser bound---with the average over modalities outside the log
"""
qz_xs, px_zs, zss = model(x, K)
qz_xs_ = [vae.qz_x(*[p.detach() for p in vae.qz_x_params]) for vae in model.vaes]
lws = []
for r, vae in enumerate(model.vaes):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x_.log_prob(zss[r]).sum(-1) for qz_x_ in qz_xs_]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return torch.stack(lws), torch.stack(zss)
def m_dreg_looser(model, x, K=1):
"""Computes dreg estimate for log p_\theta(x) for multi-modal vae
This version is the looser bound---with the average over modalities outside the log
"""
S = compute_microbatch_split(x, K)
x_split = zip(*[_x.split(S) for _x in x])
lw, zss = zip(*[_m_dreg_looser(model, _x, K) for _x in x_split])
lw = torch.cat(lw, 2) # concat on batch
zss = torch.cat(zss, 2) # concat on batch
with torch.no_grad():
grad_wt = (lw - torch.logsumexp(lw, 1, keepdim=True)).exp()
if zss.requires_grad:
zss.register_hook(lambda grad: grad_wt.unsqueeze(-1) * grad)
return (grad_wt * lw).mean(0).sum()
| 9,267 | 40.375 | 95 | py |
mmvae-public | mmvae-public/src/datasets.py | import io
import json
import os
import pickle
from collections import Counter, OrderedDict
from collections import defaultdict
import numpy as np
import torch
import torch.nn as nn
from nltk.tokenize import sent_tokenize, word_tokenize
from torch.utils.data import Dataset
from torchvision import transforms, models, datasets
class OrderedCounter(Counter, OrderedDict):
"""Counter that remembers the order elements are first encountered."""
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
class CUBSentences(Dataset):
def __init__(self, root_data_dir, split, transform=None, **kwargs):
"""split: 'trainval' or 'test' """
super().__init__()
self.data_dir = os.path.join(root_data_dir, 'cub')
self.split = split
self.max_sequence_length = kwargs.get('max_sequence_length', 32)
self.min_occ = kwargs.get('min_occ', 3)
self.transform = transform
os.makedirs(os.path.join(root_data_dir, "lang_emb"), exist_ok=True)
self.gen_dir = os.path.join(self.data_dir, "oc:{}_msl:{}".
format(self.min_occ, self.max_sequence_length))
if split == 'train':
self.raw_data_path = os.path.join(self.data_dir, 'text_trainvalclasses.txt')
elif split == 'test':
self.raw_data_path = os.path.join(self.data_dir, 'text_testclasses.txt')
else:
raise Exception("Only train or test split is available")
os.makedirs(self.gen_dir, exist_ok=True)
self.data_file = 'cub.{}.s{}'.format(split, self.max_sequence_length)
self.vocab_file = 'cub.vocab'
if not os.path.exists(os.path.join(self.gen_dir, self.data_file)):
print("Data file not found for {} split at {}. Creating new... (this may take a while)".
format(split.upper(), os.path.join(self.gen_dir, self.data_file)))
self._create_data()
else:
self._load_data()
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
sent = self.data[str(idx)]['idx']
if self.transform is not None:
sent = self.transform(sent)
return sent, self.data[str(idx)]['length']
@property
def vocab_size(self):
return len(self.w2i)
@property
def pad_idx(self):
return self.w2i['<pad>']
@property
def eos_idx(self):
return self.w2i['<eos>']
@property
def unk_idx(self):
return self.w2i['<unk>']
def get_w2i(self):
return self.w2i
def get_i2w(self):
return self.i2w
def _load_data(self, vocab=True):
with open(os.path.join(self.gen_dir, self.data_file), 'rb') as file:
self.data = json.load(file)
if vocab:
self._load_vocab()
def _load_vocab(self):
if not os.path.exists(os.path.join(self.gen_dir, self.vocab_file)):
self._create_vocab()
with open(os.path.join(self.gen_dir, self.vocab_file), 'r') as vocab_file:
vocab = json.load(vocab_file)
self.w2i, self.i2w = vocab['w2i'], vocab['i2w']
def _create_data(self):
if self.split == 'train' and not os.path.exists(os.path.join(self.gen_dir, self.vocab_file)):
self._create_vocab()
else:
self._load_vocab()
with open(self.raw_data_path, 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
data = defaultdict(dict)
pad_count = 0
for i, line in enumerate(sentences):
words = word_tokenize(line)
tok = words[:self.max_sequence_length - 1]
tok = tok + ['<eos>']
length = len(tok)
if self.max_sequence_length > length:
tok.extend(['<pad>'] * (self.max_sequence_length - length))
pad_count += 1
idx = [self.w2i.get(w, self.w2i['<exc>']) for w in tok]
id = len(data)
data[id]['tok'] = tok
data[id]['idx'] = idx
data[id]['length'] = length
print("{} out of {} sentences are truncated with max sentence length {}.".
format(len(sentences) - pad_count, len(sentences), self.max_sequence_length))
with io.open(os.path.join(self.gen_dir, self.data_file), 'wb') as data_file:
data = json.dumps(data, ensure_ascii=False)
data_file.write(data.encode('utf8', 'replace'))
self._load_data(vocab=False)
def _create_vocab(self):
assert self.split == 'train', "Vocablurary can only be created for training file."
with open(self.raw_data_path, 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
occ_register = OrderedCounter()
w2i = dict()
i2w = dict()
special_tokens = ['<exc>', '<pad>', '<eos>']
for st in special_tokens:
i2w[len(w2i)] = st
w2i[st] = len(w2i)
texts = []
unq_words = []
for i, line in enumerate(sentences):
words = word_tokenize(line)
occ_register.update(words)
texts.append(words)
for w, occ in occ_register.items():
if occ > self.min_occ and w not in special_tokens:
i2w[len(w2i)] = w
w2i[w] = len(w2i)
else:
unq_words.append(w)
assert len(w2i) == len(i2w)
print("Vocablurary of {} keys created, {} words are excluded (occurrence <= {})."
.format(len(w2i), len(unq_words), self.min_occ))
vocab = dict(w2i=w2i, i2w=i2w)
with io.open(os.path.join(self.gen_dir, self.vocab_file), 'wb') as vocab_file:
data = json.dumps(vocab, ensure_ascii=False)
vocab_file.write(data.encode('utf8', 'replace'))
with open(os.path.join(self.gen_dir, 'cub.unique'), 'wb') as unq_file:
pickle.dump(np.array(unq_words), unq_file)
with open(os.path.join(self.gen_dir, 'cub.all'), 'wb') as a_file:
pickle.dump(occ_register, a_file)
self._load_vocab()
class CUBImageFt(Dataset):
def __init__(self, root_data_dir, split, device):
"""split: 'trainval' or 'test' """
super().__init__()
self.data_dir = os.path.join(root_data_dir, 'cub')
self.data_file = os.path.join(self.data_dir, split)
self.gen_dir = os.path.join(self.data_dir, 'resnet101_2048')
self.gen_ft_file = os.path.join(self.gen_dir, '{}.ft'.format(split))
self.gen_data_file = os.path.join(self.gen_dir, '{}.data'.format(split))
self.split = split
tx = transforms.Compose([
transforms.Resize(224),
transforms.ToTensor()
])
self.dataset = datasets.ImageFolder(self.data_file, transform=tx)
os.makedirs(self.gen_dir, exist_ok=True)
if not os.path.exists(self.gen_ft_file):
print("Data file not found for CUB image features at `{}`. "
"Extracting resnet101 features from CUB image dataset... "
"(this may take a while)".format(self.gen_ft_file))
self._create_ft_mat(device)
else:
self._load_ft_mat()
def __len__(self):
return len(self.ft_mat)
def __getitem__(self, idx):
return self.ft_mat[idx]
def _load_ft_mat(self):
self.ft_mat = torch.load(self.gen_ft_file)
def _load_data(self):
self.data_mat = torch.load(self.gen_data_file)
def _create_ft_mat(self, device):
resnet = models.resnet101(pretrained=True)
modules = list(resnet.children())[:-1]
self.model = nn.Sequential(*modules)
self.model.eval()
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
loader = torch.utils.data.DataLoader(self.dataset, batch_size=256,
shuffle=False, **kwargs)
with torch.no_grad():
ft_mat = torch.cat([self.model(data[0]).squeeze() for data in loader])
torch.save(ft_mat, self.gen_ft_file)
del ft_mat
data_mat = torch.cat([data[0].squeeze() for data in loader])
torch.save(data_mat, self.gen_data_file)
self._load_ft_mat()
| 8,431 | 32.19685 | 101 | py |
mmvae-public | mmvae-public/src/report/analyse_cub.py | """Calculate cross and joint coherence of language and image generation on CUB dataset using CCA."""
import argparse
import os
import sys
import torch
import torch.nn.functional as F
# relative import hack (sorry)
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir) # for system user
os.chdir(parentdir) # for pycharm user
import models
from utils import Logger, Timer, unpack_data
from helper import cca, fetch_emb, fetch_weights, fetch_pc, apply_weights, apply_pc
# variables
RESET = True
USE_PCA = True
maxSentLen = 32
minOccur = 3
lenEmbedding = 300
lenWindow = 3
fBase = 96
vocab_dir = '../data/cub/oc:{}_sl:{}_s:{}_w:{}'.format(minOccur, maxSentLen, lenEmbedding, lenWindow)
batch_size = 256
# args
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Analysing MM-DGM results')
parser.add_argument('--save-dir', type=str, default=".",
metavar='N', help='save directory of results')
parser.add_argument('--no-cuda', action='store_true', default=True,
help='disables CUDA use')
cmds = parser.parse_args()
runPath = cmds.save_dir
sys.stdout = Logger('{}/analyse.log'.format(runPath))
args = torch.load(runPath + '/args.rar')
# cuda stuff
needs_conversion = cmds.no_cuda and args.cuda
conversion_kwargs = {'map_location': lambda st, loc: st} if needs_conversion else {}
args.cuda = not cmds.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
torch.manual_seed(args.seed)
forward_args = {'drop_modality': True} if args.model == 'mcubISft' else {}
# load trained model
modelC = getattr(models, 'VAE_{}'.format(args.model))
model = modelC(args)
if args.cuda:
model.cuda()
model.load_state_dict(torch.load(runPath + '/model.rar', **conversion_kwargs), strict=False)
train_loader, test_loader = model.getDataLoaders(batch_size, device=device)
N = len(test_loader.dataset)
# generate word embeddings and sentence weighting
emb_path = os.path.join(vocab_dir, 'cub.emb')
weights_path = os.path.join(vocab_dir, 'cub.weights')
vocab_path = os.path.join(vocab_dir, 'cub.vocab')
pc_path = os.path.join(vocab_dir, 'cub.pc')
emb = fetch_emb(lenWindow, minOccur, emb_path, vocab_path, RESET)
weights = fetch_weights(weights_path, vocab_path, RESET, a=1e-3)
emb = torch.from_numpy(emb).to(device)
weights = torch.from_numpy(weights).to(device).type(emb.dtype)
u = fetch_pc(emb, weights, train_loader, pc_path, RESET)
# set up word to sentence functions
fn_to_emb = lambda data, emb=emb, weights=weights, u=u: \
apply_pc(apply_weights(emb, weights, data), u)
def calculate_corr(images, embeddings):
global RESET
if not os.path.exists(runPath + '/images_mean.pt') or RESET:
generate_cca_projection()
RESET = False
im_mean = torch.load(runPath + '/images_mean.pt')
emb_mean = torch.load(runPath + '/emb_mean.pt')
im_proj = torch.load(runPath + '/im_proj.pt')
emb_proj = torch.load(runPath + '/emb_proj.pt')
with torch.no_grad():
corr = F.cosine_similarity((images - im_mean) @ im_proj,
(embeddings - emb_mean) @ emb_proj).mean()
return corr
def generate_cca_projection():
images, sentences = [torch.cat(l) for l in zip(*[(d[0], d[1][0]) for d in train_loader])]
emb = fn_to_emb(sentences.int())
corr, (im_proj, emb_proj) = cca([images, emb], k=40)
print("Largest eigen value from CCA: {:.3f}".format(corr[0]))
torch.save(images.mean(dim=0), runPath + '/images_mean.pt')
torch.save(emb.mean(dim=0), runPath + '/emb_mean.pt')
torch.save(im_proj, runPath + '/im_proj.pt')
torch.save(emb_proj, runPath + '/emb_proj.pt')
def cross_coherence():
model.eval()
with torch.no_grad():
i2t = []
s2i = []
gt = []
for i, dataT in enumerate(test_loader):
# get the inputs
images, sentences = unpack_data(dataT, device=device)
if images.shape[0] != batch_size:
break
_, px_zs, _ = model([images, sentences], K=1, **forward_args)
cross_sentences = px_zs[0][1].mean.argmax(dim=-1).squeeze(0)
cross_images = px_zs[1][0].mean.squeeze(0)
# calculate correlation with CCA:
i2t.append(calculate_corr(images, fn_to_emb(cross_sentences)))
s2i.append(calculate_corr(cross_images, fn_to_emb(sentences.int())))
gt.append(calculate_corr(images, fn_to_emb(sentences.int())))
print("Coherence score: \nground truth {:10.9f}, \nimage to sentence {:10.9f}, "
"\nsentence to image {:10.9f}".format(sum(gt) / len(gt),
sum(i2t) / len(gt),
sum(s2i) / len(gt)))
def joint_coherence():
model.eval()
with torch.no_grad():
pzs = model.pz(*model.pz_params).sample([1000])
gen_images = model.vaes[0].dec(pzs)[0].squeeze(1)
gen_sentences = model.vaes[1].dec(pzs)[0].argmax(dim=-1).squeeze(1)
score = calculate_corr(gen_images, fn_to_emb(gen_sentences))
print("joint generation {:10.9f}".format(score))
if __name__ == '__main__':
with Timer('MM-VAE analysis') as t:
print('-' * 89)
cross_coherence()
print('-' * 89)
joint_coherence()
| 5,427 | 36.694444 | 101 | py |
mmvae-public | mmvae-public/src/report/analyse_ms.py | """Calculate cross and joint coherence of trained model on MNIST-SVHN dataset.
Train and evaluate a linear model for latent space digit classification."""
import argparse
import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
# relative import hacks (sorry)
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir) # for bash user
os.chdir(parentdir) # for pycharm user
import models
from helper import Latent_Classifier, SVHN_Classifier, MNIST_Classifier
from utils import Logger, Timer
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Analysing MM-DGM results')
parser.add_argument('--save-dir', type=str, default="",
metavar='N', help='save directory of results')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA use')
cmds = parser.parse_args()
runPath = cmds.save_dir
sys.stdout = Logger('{}/ms_acc.log'.format(runPath))
args = torch.load(runPath + '/args.rar')
# cuda stuff
needs_conversion = cmds.no_cuda and args.cuda
conversion_kwargs = {'map_location': lambda st, loc: st} if needs_conversion else {}
args.cuda = not cmds.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
torch.manual_seed(args.seed)
modelC = getattr(models, 'VAE_{}'.format(args.model))
model = modelC(args)
if args.cuda:
model.cuda()
model.load_state_dict(torch.load(runPath + '/model.rar', **conversion_kwargs), strict=False)
B = 256 # rough batch size heuristic
train_loader, test_loader = model.getDataLoaders(B, device=device)
N = len(test_loader.dataset)
def classify_latents(epochs, option):
model.eval()
vae = unpack_model(option)
if '_' not in args.model:
epochs *= 10 # account for the fact the mnist-svhn has more examples (roughly x10)
classifier = Latent_Classifier(args.latent_dim, 10).to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(classifier.parameters(), lr=0.001)
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
total_iters = len(train_loader)
print('\n====> Epoch: {:03d} '.format(epoch))
for i, data in enumerate(train_loader):
# get the inputs
x, targets = unpack_data_mlp(data, option)
x, targets = x.to(device), targets.to(device)
with torch.no_grad():
qz_x_params = vae.enc(x)
zs = vae.qz_x(*qz_x_params).rsample()
optimizer.zero_grad()
outputs = classifier(zs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if (i + 1) % 1000 == 0:
print('iteration {:04d}/{:d}: loss: {:6.3f}'.format(i + 1, total_iters, running_loss / 1000))
running_loss = 0.0
print('Finished Training, calculating test loss...')
classifier.eval()
total = 0
correct = 0
with torch.no_grad():
for i, data in enumerate(test_loader):
x, targets = unpack_data_mlp(data, option)
x, targets = x.to(device), targets.to(device)
qz_x_params = vae.enc(x)
zs = vae.qz_x(*qz_x_params).rsample()
outputs = classifier(zs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('The classifier correctly classified {} out of {} examples. Accuracy: '
'{:.2f}%'.format(correct, total, correct / total * 100))
def _maybe_train_or_load_digit_classifier_img(path, epochs):
options = [o for o in ['mnist', 'svhn'] if not os.path.exists(path.format(o))]
for option in options:
print("Cannot find trained {} digit classifier in {}, training...".
format(option, path.format(option)))
classifier = globals()['{}_Classifier'.format(option.upper())]().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(classifier.parameters(), lr=0.001)
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
total_iters = len(train_loader)
print('\n====> Epoch: {:03d} '.format(epoch))
for i, data in enumerate(train_loader):
# get the inputs
x, targets = unpack_data_mlp(data, option)
x, targets = x.to(device), targets.to(device)
optimizer.zero_grad()
outputs = classifier(x)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if (i + 1) % 1000 == 0:
print('iteration {:04d}/{:d}: loss: {:6.3f}'.format(i + 1, total_iters, running_loss / 1000))
running_loss = 0.0
print('Finished Training, calculating test loss...')
classifier.eval()
total = 0
correct = 0
with torch.no_grad():
for i, data in enumerate(test_loader):
x, targets = unpack_data_mlp(data, option)
x, targets = x.to(device), targets.to(device)
outputs = classifier(x)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('The classifier correctly classified {} out of {} examples. Accuracy: '
'{:.2f}%'.format(correct, total, correct / total * 100))
torch.save(classifier.state_dict(), path.format(option))
mnist_net, svhn_net = MNIST_Classifier().to(device), SVHN_Classifier().to(device)
mnist_net.load_state_dict(torch.load(path.format('mnist')))
svhn_net.load_state_dict(torch.load(path.format('svhn')))
return mnist_net, svhn_net
def cross_coherence(epochs):
model.eval()
mnist_net, svhn_net = _maybe_train_or_load_digit_classifier_img("../data/{}_model.pt", epochs=epochs)
mnist_net.eval()
svhn_net.eval()
total = 0
corr_m = 0
corr_s = 0
with torch.no_grad():
for i, data in enumerate(test_loader):
mnist, svhn, targets = unpack_data_mlp(data, option='both')
mnist, svhn, targets = mnist.to(device), svhn.to(device), targets.to(device)
_, px_zs, _ = model([mnist, svhn], 1)
mnist_mnist = mnist_net(px_zs[1][0].mean.squeeze(0))
svhn_svhn = svhn_net(px_zs[0][1].mean.squeeze(0))
_, pred_m = torch.max(mnist_mnist.data, 1)
_, pred_s = torch.max(svhn_svhn.data, 1)
total += targets.size(0)
corr_m += (pred_m == targets).sum().item()
corr_s += (pred_s == targets).sum().item()
print('Cross coherence: \n SVHN -> MNIST {:.2f}% \n MNIST -> SVHN {:.2f}%'.format(
corr_m / total * 100, corr_s / total * 100))
def joint_coherence():
model.eval()
mnist_net, svhn_net = MNIST_Classifier().to(device), SVHN_Classifier().to(device)
mnist_net.load_state_dict(torch.load('../data/mnist_model.pt'))
svhn_net.load_state_dict(torch.load('../data/svhn_model.pt'))
mnist_net.eval()
svhn_net.eval()
total = 0
corr = 0
with torch.no_grad():
pzs = model.pz(*model.pz_params).sample([10000])
mnist = model.vaes[0].dec(pzs)
svhn = model.vaes[1].dec(pzs)
mnist_mnist = mnist_net(mnist[0].squeeze(1))
svhn_svhn = svhn_net(svhn[0].squeeze(1))
_, pred_m = torch.max(mnist_mnist.data, 1)
_, pred_s = torch.max(svhn_svhn.data, 1)
total += pred_m.size(0)
corr += (pred_m == pred_s).sum().item()
print('Joint coherence: {:.2f}%'.format(corr / total * 100))
def unpack_data_mlp(dataB, option='both'):
if len(dataB[0]) == 2:
if option == 'both':
return dataB[0][0], dataB[1][0], dataB[1][1]
elif option == 'svhn':
return dataB[1][0], dataB[1][1]
elif option == 'mnist':
return dataB[0][0], dataB[0][1]
else:
return dataB
def unpack_model(option='svhn'):
if 'mnist_svhn' in args.model:
return model.vaes[1] if option == 'svhn' else model.vaes[0]
else:
return model
if __name__ == '__main__':
with Timer('MM-VAE analysis') as t:
print('-' * 25 + 'latent classification accuracy' + '-' * 25)
print("Calculating latent classification accuracy for single MNIST VAE...")
classify_latents(epochs=30, option='mnist')
# #
print("\n Calculating latent classification accuracy for single SVHN VAE...")
classify_latents(epochs=30, option='svhn')
#
print('\n' + '-' * 45 + 'cross coherence' + '-' * 45)
cross_coherence(epochs=30)
#
print('\n' + '-' * 45 + 'joint coherence' + '-' * 45)
joint_coherence()
| 9,192 | 36.831276 | 113 | py |
mmvae-public | mmvae-public/src/report/helper.py | import json
import os
import pickle
from collections import Counter, OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gensim.models import FastText
from nltk.tokenize import sent_tokenize, word_tokenize
from scipy.linalg import eig
from skimage.filters import threshold_yen as threshold
class OrderedCounter(Counter, OrderedDict):
"""Counter that remembers the order elements are first encountered."""
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
def __reduce__(self):
return self.__class__, (OrderedDict(self),)
def cca(views, k=None, eps=1e-12):
"""Compute (multi-view) CCA
Args:
views (list): list of views where each view `v_i` is of size `N x o_i`
k (int): joint projection dimension | if None, find using Otsu
eps (float): regulariser [default: 1e-12]
Returns:
correlations: correlations along each of the k dimensions
projections: projection matrices for each view
"""
V = len(views) # number of views
N = views[0].size(0) # number of observations (same across views)
os = [v.size(1) for v in views]
kmax = np.min(os)
ocum = np.cumsum([0] + os)
os_sum = sum(os)
A, B = np.zeros([os_sum, os_sum]), np.zeros([os_sum, os_sum])
for i in range(V):
v_i = views[i]
v_i_bar = v_i - v_i.mean(0).expand_as(v_i) # centered, N x o_i
C_ij = (1.0 / (N - 1)) * torch.mm(v_i_bar.t(), v_i_bar)
# A[ocum[i]:ocum[i + 1], ocum[i]:ocum[i + 1]] = C_ij
B[ocum[i]:ocum[i + 1], ocum[i]:ocum[i + 1]] = C_ij
for j in range(i + 1, V):
v_j = views[j] # N x o_j
v_j_bar = v_j - v_j.mean(0).expand_as(v_j) # centered
C_ij = (1.0 / (N - 1)) * torch.mm(v_i_bar.t(), v_j_bar)
A[ocum[i]:ocum[i + 1], ocum[j]:ocum[j + 1]] = C_ij
A[ocum[j]:ocum[j + 1], ocum[i]:ocum[i + 1]] = C_ij.t()
A[np.diag_indices_from(A)] += eps
B[np.diag_indices_from(B)] += eps
eigenvalues, eigenvectors = eig(A, B)
# TODO: sanity check to see that all eigenvalues are e+0i
idx = eigenvalues.argsort()[::-1] # sort descending
eigenvalues = eigenvalues[idx] # arrange in descending order
if k is None:
t = threshold(eigenvalues.real[:kmax])
k = np.abs(np.asarray(eigenvalues.real[0::10]) - t).argmin() * 10 # closest k % 10 == 0 idx
print('k unspecified, (auto-)choosing:', k)
eigenvalues = eigenvalues[idx[:k]]
eigenvectors = eigenvectors[:, idx[:k]]
correlations = torch.from_numpy(eigenvalues.real).type_as(views[0])
proj_matrices = torch.split(torch.from_numpy(eigenvectors.real).type_as(views[0]), os)
return correlations, proj_matrices
def fetch_emb(lenWindow, minOccur, emb_path, vocab_path, RESET):
if not os.path.exists(emb_path) or RESET:
with open('../data/cub/text_trainvalclasses.txt', 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
texts = []
for i, line in enumerate(sentences):
words = word_tokenize(line)
texts.append(words)
model = FastText(size=300, window=lenWindow, min_count=minOccur)
model.build_vocab(sentences=texts)
model.train(sentences=texts, total_examples=len(texts), epochs=10)
with open(vocab_path, 'rb') as file:
vocab = json.load(file)
i2w = vocab['i2w']
base = np.ones((300,), dtype=np.float32)
emb = [base * (i - 1) for i in range(3)]
for word in list(i2w.values())[3:]:
emb.append(model[word])
emb = np.array(emb)
with open(emb_path, 'wb') as file:
pickle.dump(emb, file)
else:
with open(emb_path, 'rb') as file:
emb = pickle.load(file)
return emb
def fetch_weights(weights_path, vocab_path, RESET, a=1e-3):
if not os.path.exists(weights_path) or RESET:
with open('../data/cub/text_trainvalclasses.txt', 'r') as file:
text = file.read()
sentences = sent_tokenize(text)
occ_register = OrderedCounter()
for i, line in enumerate(sentences):
words = word_tokenize(line)
occ_register.update(words)
with open(vocab_path, 'r') as file:
vocab = json.load(file)
w2i = vocab['w2i']
weights = np.zeros(len(w2i))
total_occ = sum(list(occ_register.values()))
exc_occ = 0
for w, occ in occ_register.items():
if w in w2i.keys():
weights[w2i[w]] = a / (a + occ / total_occ)
else:
exc_occ += occ
weights[0] = a / (a + exc_occ / total_occ)
with open(weights_path, 'wb') as file:
pickle.dump(weights, file)
else:
with open(weights_path, 'rb') as file:
weights = pickle.load(file)
return weights
def fetch_pc(emb, weights, train_loader, pc_path, RESET):
sentences = torch.cat([d[1][0] for d in train_loader]).int()
emb_dataset = apply_weights(emb, weights, sentences)
if not os.path.exists(pc_path) or RESET:
_, _, V = torch.svd(emb_dataset - emb_dataset.mean(dim=0), some=True)
v = V[:, 0].unsqueeze(-1)
u = v.mm(v.t())
with open(pc_path, 'wb') as file:
pickle.dump(u, file)
else:
with open(pc_path, 'rb') as file:
u = pickle.load(file)
return u
def apply_weights(emb, weights, data):
fn_trun = lambda s: s[:np.where(s == 2)[0][0] + 1] if 2 in s else s
batch_emb = []
for sent_i in data:
emb_stacked = torch.stack([emb[idx] for idx in fn_trun(sent_i)])
weights_stacked = torch.stack([weights[idx] for idx in fn_trun(sent_i)])
batch_emb.append(torch.sum(emb_stacked * weights_stacked.unsqueeze(-1), dim=0) / emb_stacked.shape[0])
return torch.stack(batch_emb, dim=0)
def apply_pc(weighted_emb, u):
return torch.cat([e - torch.matmul(u, e.unsqueeze(-1)).squeeze() for e in weighted_emb.split(2048, 0)])
class Latent_Classifier(nn.Module):
""" Generate latent parameters for SVHN image data. """
def __init__(self, in_n, out_n):
super(Latent_Classifier, self).__init__()
self.mlp = nn.Linear(in_n, out_n)
def forward(self, x):
return self.mlp(x)
class SVHN_Classifier(nn.Module):
def __init__(self):
super(SVHN_Classifier, self).__init__()
self.conv1 = nn.Conv2d(3, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(500, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 500)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
class MNIST_Classifier(nn.Module):
def __init__(self):
super(MNIST_Classifier, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=-1)
| 7,712 | 32.977974 | 110 | py |
mmvae-public | mmvae-public/src/report/calculate_likelihoods.py | """Calculate data marginal likelihood p(x) evaluated on the trained generative model."""
import os
import sys
import argparse
import numpy as np
import torch
from torchvision.utils import save_image
# relative import hacks (sorry)
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir) # for bash user
os.chdir(parentdir) # for pycharm user
import models
from utils import Logger, Timer, unpack_data, log_mean_exp
torch.backends.cudnn.benchmark = True
parser = argparse.ArgumentParser(description='Analysing MM-DGM results')
parser.add_argument('--save-dir', type=str, default="",
metavar='N', help='save directory of results')
parser.add_argument('--iwae-samples', type=int, default=1000, metavar='I',
help='number of samples to estimate marginal log likelihood (default: 1000)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA use')
cmds = parser.parse_args()
runPath = cmds.save_dir
sys.stdout = Logger('{}/llik.log'.format(runPath))
args = torch.load(runPath + '/args.rar')
# cuda stuff
needs_conversion = cmds.no_cuda and args.cuda
conversion_kwargs = {'map_location': lambda st, loc: st} if needs_conversion else {}
args.cuda = not cmds.no_cuda and torch.cuda.is_available()
device = torch.device("cuda" if args.cuda else "cpu")
torch.manual_seed(args.seed)
modelC = getattr(models, 'VAE_{}'.format(args.model))
model = modelC(args)
if args.cuda:
model.cuda()
model.load_state_dict(torch.load(runPath + '/model.rar', **conversion_kwargs), strict=False)
B = 12000 // cmds.iwae_samples # rough batch size heuristic
train_loader, test_loader = model.getDataLoaders(B, device=device)
N = len(test_loader.dataset)
def m_iwae(qz_xs, px_zs, zss, x):
"""IWAE estimate for log p_\theta(x) for multi-modal vae -- fully vectorised"""
lws = []
for r, qz_x in enumerate(qz_xs):
lpz = model.pz(*model.pz_params).log_prob(zss[r]).sum(-1)
lqz_x = log_mean_exp(torch.stack([qz_x.log_prob(zss[r]).sum(-1) for qz_x in qz_xs]))
lpx_z = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1)
.mul(model.vaes[d].llik_scaling).sum(-1)
for d, px_z in enumerate(px_zs[r])]
lpx_z = torch.stack(lpx_z).sum(0)
lw = lpz + lpx_z - lqz_x
lws.append(lw)
return log_mean_exp(torch.cat(lws)).sum()
def iwae(qz_x, px_z, zs, x):
"""IWAE estimate for log p_\theta(x) -- fully vectorised."""
lpz = model.pz(*model.pz_params).log_prob(zs).sum(-1)
lpx_z = px_z.log_prob(x).view(*px_z.batch_shape[:2], -1) * model.llik_scaling
lqz_x = qz_x.log_prob(zs).sum(-1)
return log_mean_exp(lpz + lpx_z.sum(-1) - lqz_x).sum()
@torch.no_grad()
def joint_elbo(K):
model.eval()
llik = 0
obj = locals()[('m_' if hasattr(model, 'vaes') else '') + 'iwae']()
for dataT in test_loader:
data = unpack_data(dataT, device=device)
llik += obj(model, data, K).item()
print('Marginal Log Likelihood of joint {} (IWAE, K = {}): {:.4f}'
.format(model.modelName, K, llik / N))
def cross_iwaes(qz_xs, px_zs, zss, x):
lws = []
for e, _px_zs in enumerate(px_zs): # rows are encoders
lpz = model.pz(*model.pz_params).log_prob(zss[e]).sum(-1)
lqz_x = qz_xs[e].log_prob(zss[e]).sum(-1)
_lpx_zs = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1).sum(-1)
for d, px_z in enumerate(_px_zs)]
lws.append([log_mean_exp(_lpx_z + lpz - lqz_x).sum() for _lpx_z in _lpx_zs])
return lws
def individual_iwaes(qz_xs, px_zs, zss, x):
lws = []
for d, _px_zs in enumerate(np.array(px_zs).T): # rows are decoders now
lw = [px_z.log_prob(x[d]).view(*px_z.batch_shape[:2], -1).sum(-1)
+ model.pz(*model.pz_params).log_prob(zss[e]).sum(-1)
- log_mean_exp(torch.stack([qz_x.log_prob(zss[e]).sum(-1) for qz_x in qz_xs]))
for e, px_z in enumerate(_px_zs)]
lw = torch.cat(lw)
lws.append(log_mean_exp(lw).sum())
return lws
@torch.no_grad()
def m_llik_eval(K):
model.eval()
llik_joint = 0
llik_synergy = np.array([0 for _ in model.vaes])
lliks_cross = np.array([[0 for _ in model.vaes] for _ in model.vaes])
for dataT in test_loader:
data = unpack_data(dataT, device=device)
qz_xs, px_zs, zss = model(data, K)
objs = individual_iwaes(qz_xs, px_zs, zss, data)
objs_cross = cross_iwaes(qz_xs, px_zs, zss, data)
llik_joint += m_iwae(qz_xs, px_zs, zss, data)
llik_synergy = llik_synergy + np.array(objs)
lliks_cross = lliks_cross + np.array(objs_cross)
print('Marginal Log Likelihood of joint {} (IWAE, K = {}): {:.4f}'
.format(model.modelName, K, llik_joint / N))
print('-' * 89)
for i, llik in enumerate(llik_synergy):
print('Marginal Log Likelihood of {} from {} (IWAE, K = {}): {:.4f}'
.format(model.vaes[i].modelName, model.modelName, K, (llik / N).item()))
print('-' * 89)
for e, _lliks_cross in enumerate(lliks_cross):
for d, llik_cross in enumerate(_lliks_cross):
print('Marginal Log Likelihood of {} from {} (IWAE, K = {}): {:.4f}'
.format(model.vaes[d].modelName, model.vaes[e].modelName, K, (llik_cross / N).item()))
print('-' * 89)
@torch.no_grad()
def llik_eval(K):
model.eval()
llik_joint = 0
for dataT in test_loader:
data = unpack_data(dataT, device=device)
qz_xs, px_zs, zss = model(data, K)
llik_joint += iwae(qz_xs, px_zs, zss, data)
print('Marginal Log Likelihood of joint {} (IWAE, K = {}): {:.4f}'
.format(model.modelName, K, llik_joint / N))
@torch.no_grad()
def generate_sparse(D, steps, J):
"""generate `steps` perturbations for all `D` latent dimensions on `J` datapoints. """
model.eval()
for i, dataT in enumerate(test_loader):
data = unpack_data(dataT, require_length=(args.projection == 'Sft'), device=device)
qz_xs, _, zss = model(data, args.K)
for i, (qz_x, zs) in enumerate(zip(qz_xs, zss)):
embs = []
# for delta in torch.linspace(0.01, 0.99, steps=steps):
for delta in torch.linspace(-5, 5, steps=steps):
for d in range(D):
mod_emb = qz_x.mean + torch.zeros_like(qz_x.mean)
mod_emb[:, d] += model.vaes[i].pz(*model.vaes[i].pz_params).stddev[:, d] * delta
embs.append(mod_emb)
embs = torch.stack(embs).transpose(0, 1).contiguous()
for r in range(2):
samples = model.vaes[r].px_z(*model.vaes[r].dec(embs.view(-1, D)[:((J) * steps * D)])).mean
save_image(samples.cpu(), os.path.join(runPath, 'latent-traversals-{}x{}.png'.format(i, r)), nrow=D)
break
if __name__ == '__main__':
with Timer('MM-VAE analysis') as t:
# likelihood evaluation
print('-' * 89)
eval = locals()[('m_' if hasattr(model, 'vaes') else '') + 'llik_eval']
eval(cmds.iwae_samples)
print('-' * 89)
| 7,240 | 38.785714 | 116 | py |
mmvae-public | mmvae-public/src/models/vae_svhn.py | # SVHN model specification
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
from numpy import sqrt
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
from torchvision.utils import save_image, make_grid
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .vae import VAE
# Constants
dataSize = torch.Size([3, 32, 32])
imgChans = dataSize[0]
fBase = 32 # base size of filter channels
# Classes
class Enc(nn.Module):
""" Generate latent parameters for SVHN image data. """
def __init__(self, latent_dim):
super(Enc, self).__init__()
self.enc = nn.Sequential(
# input size: 3 x 32 x 32
nn.Conv2d(imgChans, fBase, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase) x 16 x 16
nn.Conv2d(fBase, fBase * 2, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 2) x 8 x 8
nn.Conv2d(fBase * 2, fBase * 4, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 4) x 4 x 4
)
self.c1 = nn.Conv2d(fBase * 4, latent_dim, 4, 1, 0, bias=True)
self.c2 = nn.Conv2d(fBase * 4, latent_dim, 4, 1, 0, bias=True)
# c1, c2 size: latent_dim x 1 x 1
def forward(self, x):
e = self.enc(x)
lv = self.c2(e).squeeze()
return self.c1(e).squeeze(), F.softmax(lv, dim=-1) * lv.size(-1) + Constants.eta
class Dec(nn.Module):
""" Generate a SVHN image given a sample from the latent space. """
def __init__(self, latent_dim):
super(Dec, self).__init__()
self.dec = nn.Sequential(
nn.ConvTranspose2d(latent_dim, fBase * 4, 4, 1, 0, bias=True),
nn.ReLU(True),
# size: (fBase * 4) x 4 x 4
nn.ConvTranspose2d(fBase * 4, fBase * 2, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 2) x 8 x 8
nn.ConvTranspose2d(fBase * 2, fBase, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase) x 16 x 16
nn.ConvTranspose2d(fBase, imgChans, 4, 2, 1, bias=True),
nn.Sigmoid()
# Output size: 3 x 32 x 32
)
def forward(self, z):
z = z.unsqueeze(-1).unsqueeze(-1) # fit deconv layers
out = self.dec(z.view(-1, *z.size()[-3:]))
out = out.view(*z.size()[:-3], *out.size()[1:])
# consider also predicting the length scale
return out, torch.tensor(0.75).to(z.device) # mean, length scale
class SVHN(VAE):
""" Derive a specific sub-class of a VAE for SVHN """
def __init__(self, params):
super(SVHN, self).__init__(
dist.Laplace, # prior
dist.Laplace, # likelihood
dist.Laplace, # posterior
Enc(params.latent_dim),
Dec(params.latent_dim),
params
)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'svhn'
self.dataSize = dataSize
self.llik_scaling = 1.
@property
def pz_params(self):
return self._pz_params[0], F.softmax(self._pz_params[1], dim=1) * self._pz_params[1].size(-1)
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device='cuda'):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == 'cuda' else {}
tx = transforms.ToTensor()
train = DataLoader(datasets.SVHN('../data', split='train', download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
test = DataLoader(datasets.SVHN('../data', split='test', download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
return train, test
def generate(self, runPath, epoch):
N, K = 64, 9
samples = super(SVHN, self).generate(N, K).cpu()
# wrangle things so they come out tiled
samples = samples.view(K, N, *samples.size()[1:]).transpose(0, 1)
s = [make_grid(t, nrow=int(sqrt(K)), padding=0) for t in samples]
save_image(torch.stack(s),
'{}/gen_samples_{:03d}.png'.format(runPath, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recon = super(SVHN, self).reconstruct(data[:8])
comp = torch.cat([data[:8], recon]).data.cpu()
save_image(comp, '{}/recon_{:03d}.png'.format(runPath, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(SVHN, self).analyse(data, K=10)
labels = ['Prior', self.modelName.lower()]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
| 5,053 | 37 | 101 | py |
mmvae-public | mmvae-public/src/models/mmvae_cub_images_sentences.py | # cub multi-modal model specification
import matplotlib.pyplot as plt
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from numpy import sqrt, prod
from torch.utils.data import DataLoader
from torchnet.dataset import TensorDataset, ResampleDataset
from torchvision.utils import save_image, make_grid
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .mmvae import MMVAE
from .vae_cub_image import CUB_Image
from .vae_cub_sent import CUB_Sentence
# Constants
maxSentLen = 32
minOccur = 3
# This is required because there are 10 captions per image.
# Allows easier reuse of the same image for the corresponding set of captions.
def resampler(dataset, idx):
return idx // 10
class CUB_Image_Sentence(MMVAE):
def __init__(self, params):
super(CUB_Image_Sentence, self).__init__(dist.Laplace, params, CUB_Image, CUB_Sentence)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.vaes[0].llik_scaling = self.vaes[1].maxSentLen / prod(self.vaes[0].dataSize) \
if params.llik_scaling == 0 else params.llik_scaling
for vae in self.vaes:
vae._pz_params = self._pz_params
self.modelName = 'cubIS'
self.i2w = self.vaes[1].load_vocab()
@property
def pz_params(self):
return self._pz_params[0], \
F.softmax(self._pz_params[1], dim=1) * self._pz_params[1].size(1) + Constants.eta
def getDataLoaders(self, batch_size, shuffle=True, device='cuda'):
# load base datasets
t1, s1 = self.vaes[0].getDataLoaders(batch_size, shuffle, device)
t2, s2 = self.vaes[1].getDataLoaders(batch_size, shuffle, device)
kwargs = {'num_workers': 2, 'pin_memory': True} if device == 'cuda' else {}
train_loader = DataLoader(TensorDataset([
ResampleDataset(t1.dataset, resampler, size=len(t1.dataset) * 10),
t2.dataset]), batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(TensorDataset([
ResampleDataset(s1.dataset, resampler, size=len(s1.dataset) * 10),
s2.dataset]), batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def generate(self, runPath, epoch):
N = 8
samples = super(CUB_Image_Sentence, self).generate(N)
images, captions = [sample.data.cpu() for sample in samples]
captions = self._sent_preprocess(captions)
fig = plt.figure(figsize=(8, 6))
for i, (image, caption) in enumerate(zip(images, captions)):
fig = self._imshow(image, caption, i, fig, N)
plt.savefig('{}/gen_samples_{:03d}.png'.format(runPath, epoch))
plt.close()
def reconstruct(self, raw_data, runPath, epoch):
N = 8
recons_mat = super(CUB_Image_Sentence, self).reconstruct([d[:N] for d in raw_data])
fns = [lambda images: images.data.cpu(), lambda sentences: self._sent_preprocess(sentences)]
for r, recons_list in enumerate(recons_mat):
for o, recon in enumerate(recons_list):
data = fns[r](raw_data[r][:N])
recon = fns[o](recon.squeeze())
if r != o:
fig = plt.figure(figsize=(8, 6))
for i, (_data, _recon) in enumerate(zip(data, recon)):
image, caption = (_data, _recon) if r == 0 else (_recon, _data)
fig = self._imshow(image, caption, i, fig, N)
plt.savefig('{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
plt.close()
else:
if r == 0:
comp = torch.cat([data, recon])
save_image(comp, '{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
else:
with open('{}/recon_{}x{}_{:03d}.txt'.format(runPath, r, o, epoch), "w+") as txt_file:
for r_sent, d_sent in zip(recon, data):
txt_file.write('[DATA] ==> {}\n'.format(' '.join(self.i2w[str(i)] for i in d_sent)))
txt_file.write('[RECON] ==> {}\n\n'.format(' '.join(self.i2w[str(i)] for i in r_sent)))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(CUB_Image_Sentence, self).analyse(data, K=10)
labels = ['Prior', *[vae.modelName.lower() for vae in self.vaes]]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
def _sent_preprocess(self, sentences):
"""make sure raw data is always passed as dim=2 to avoid argmax.
last dimension must always be word embedding."""
if len(sentences.shape) > 2:
sentences = sentences.argmax(-1).squeeze()
return [self.vaes[1].fn_trun(s) for s in self.vaes[1].fn_2i(sentences)]
def _imshow(self, image, caption, i, fig, N):
"""Imshow for Tensor."""
ax = fig.add_subplot(N // 2, 4, i * 2 + 1)
ax.axis('off')
image = image.numpy().transpose((1, 2, 0)) #
plt.imshow(image)
ax = fig.add_subplot(N // 2, 4, i * 2 + 2)
pos = ax.get_position()
ax.axis('off')
plt.text(
x=0.5 * (pos.x0 + pos.x1),
y=0.5 * (pos.y0 + pos.y1),
ha='left',
s='{}'.format(
' '.join(self.i2w[str(i)] + '\n' if (n + 1) % 5 == 0
else self.i2w[str(i)] for n, i in enumerate(caption))),
fontsize=6,
verticalalignment='center',
horizontalalignment='center'
)
return fig
| 6,015 | 42.912409 | 119 | py |
mmvae-public | mmvae-public/src/models/vae.py | # Base VAE class definition
import torch
import torch.nn as nn
from utils import get_mean, kl_divergence
from vis import embed_umap, tensors_to_df
class VAE(nn.Module):
def __init__(self, prior_dist, likelihood_dist, post_dist, enc, dec, params):
super(VAE, self).__init__()
self.pz = prior_dist
self.px_z = likelihood_dist
self.qz_x = post_dist
self.enc = enc
self.dec = dec
self.modelName = None
self.params = params
self._pz_params = None # defined in subclass
self._qz_x_params = None # populated in `forward`
self.llik_scaling = 1.0
@property
def pz_params(self):
return self._pz_params
@property
def qz_x_params(self):
if self._qz_x_params is None:
raise NameError("qz_x params not initalised yet!")
return self._qz_x_params
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
# handle merging individual datasets appropriately in sub-class
raise NotImplementedError
def forward(self, x, K=1):
self._qz_x_params = self.enc(x)
qz_x = self.qz_x(*self._qz_x_params)
zs = qz_x.rsample(torch.Size([K]))
px_z = self.px_z(*self.dec(zs))
return qz_x, px_z, zs
def generate(self, N, K):
self.eval()
with torch.no_grad():
pz = self.pz(*self.pz_params)
latents = pz.rsample(torch.Size([N]))
px_z = self.px_z(*self.dec(latents))
data = px_z.sample(torch.Size([K]))
return data.view(-1, *data.size()[3:])
def reconstruct(self, data):
self.eval()
with torch.no_grad():
qz_x = self.qz_x(*self.enc(data))
latents = qz_x.rsample() # no dim expansion
px_z = self.px_z(*self.dec(latents))
recon = get_mean(px_z)
return recon
def analyse(self, data, K):
self.eval()
with torch.no_grad():
qz_x, _, zs = self.forward(data, K=K)
pz = self.pz(*self.pz_params)
zss = [pz.sample(torch.Size([K, data.size(0)])).view(-1, pz.batch_shape[-1]),
zs.view(-1, zs.size(-1))]
zsl = [torch.zeros(zs.size(0)).fill_(i) for i, zs in enumerate(zss)]
kls_df = tensors_to_df(
[kl_divergence(qz_x, pz).cpu().numpy()],
head='KL',
keys=[r'KL$(q(z|x)\,||\,p(z))$'],
ax_names=['Dimensions', r'KL$(q\,||\,p)$']
)
return embed_umap(torch.cat(zss, 0).cpu().numpy()), \
torch.cat(zsl, 0).cpu().numpy(), \
kls_df
| 2,674 | 32.024691 | 89 | py |
mmvae-public | mmvae-public/src/models/vae_cub_image.py | # CUB Image model specification
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from numpy import sqrt
from torchvision import datasets, transforms
from torchvision.utils import make_grid, save_image
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .vae import VAE
# Constants
imgChans = 3
fBase = 64
# Classes
class Enc(nn.Module):
""" Generate latent parameters for CUB image data. """
def __init__(self, latentDim):
super(Enc, self).__init__()
modules = [
# input size: 3 x 128 x 128
nn.Conv2d(imgChans, fBase, 4, 2, 1, bias=True),
nn.ReLU(True),
# input size: 1 x 64 x 64
nn.Conv2d(fBase, fBase * 2, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 2) x 32 x 32
nn.Conv2d(fBase * 2, fBase * 4, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 4) x 16 x 16
nn.Conv2d(fBase * 4, fBase * 8, 4, 2, 1, bias=True),
nn.ReLU(True)]
# size: (fBase * 8) x 4 x 4
self.enc = nn.Sequential(*modules)
self.c1 = nn.Conv2d(fBase * 8, latentDim, 4, 1, 0, bias=True)
self.c2 = nn.Conv2d(fBase * 8, latentDim, 4, 1, 0, bias=True)
# c1, c2 size: latentDim x 1 x 1
def forward(self, x):
e = self.enc(x)
return self.c1(e).squeeze(), F.softplus(self.c2(e)).squeeze() + Constants.eta
class Dec(nn.Module):
""" Generate an image given a sample from the latent space. """
def __init__(self, latentDim):
super(Dec, self).__init__()
modules = [nn.ConvTranspose2d(latentDim, fBase * 8, 4, 1, 0, bias=True),
nn.ReLU(True), ]
modules.extend([
nn.ConvTranspose2d(fBase * 8, fBase * 4, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 4) x 16 x 16
nn.ConvTranspose2d(fBase * 4, fBase * 2, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase * 2) x 32 x 32
nn.ConvTranspose2d(fBase * 2, fBase, 4, 2, 1, bias=True),
nn.ReLU(True),
# size: (fBase) x 64 x 64
nn.ConvTranspose2d(fBase, imgChans, 4, 2, 1, bias=True),
nn.Sigmoid()
# Output size: 3 x 128 x 128
])
self.dec = nn.Sequential(*modules)
def forward(self, z):
z = z.unsqueeze(-1).unsqueeze(-1) # fit deconv layers
out = self.dec(z.view(-1, *z.size()[-3:]))
out = out.view(*z.size()[:-3], *out.size()[1:])
return out, torch.tensor(0.01).to(z.device)
class CUB_Image(VAE):
""" Derive a specific sub-class of a VAE for a CNN sentence model. """
def __init__(self, params):
super(CUB_Image, self).__init__(
dist.Laplace, # prior
dist.Laplace, # likelihood
dist.Laplace, # posterior
Enc(params.latent_dim),
Dec(params.latent_dim),
params
)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'cubI'
self.dataSize = torch.Size([3, 64, 64])
self.llik_scaling = 1.
@property
def pz_params(self):
return self._pz_params[0], F.softplus(self._pz_params[1]) + Constants.eta
# remember that when combining with captions, this should be x10
def getDataLoaders(self, batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
tx = transforms.Compose([transforms.Resize([64, 64]), transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder('../data/cub/train', transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.ImageFolder('../data/cub/test', transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def generate(self, runPath, epoch):
N, K = 64, 9
samples = super(CUB_Image, self).generate(N, K).data.cpu()
# wrangle things so they come out tiled
samples = samples.view(K, N, *samples.size()[1:]).transpose(0, 1)
s = [make_grid(t, nrow=int(sqrt(K)), padding=0) for t in samples.data.cpu()]
save_image(torch.stack(s),
'{}/gen_samples_{:03d}.png'.format(runPath, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recon = super(CUB_Image, self).reconstruct(data[:8])
comp = torch.cat([data[:8], recon])
save_image(comp.data.cpu(), '{}/recon_{:03d}.png'.format(runPath, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(CUB_Image, self).analyse(data, K=10)
labels = ['Prior', self.modelName.lower()]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
| 5,350 | 37.221429 | 91 | py |
mmvae-public | mmvae-public/src/models/vae_mnist.py | # MNIST model specification
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
from numpy import prod, sqrt
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
from torchvision.utils import save_image, make_grid
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .vae import VAE
# Constants
dataSize = torch.Size([1, 28, 28])
data_dim = int(prod(dataSize))
hidden_dim = 400
def extra_hidden_layer():
return nn.Sequential(nn.Linear(hidden_dim, hidden_dim), nn.ReLU(True))
# Classes
class Enc(nn.Module):
""" Generate latent parameters for MNIST image data. """
def __init__(self, latent_dim, num_hidden_layers=1):
super(Enc, self).__init__()
modules = []
modules.append(nn.Sequential(nn.Linear(data_dim, hidden_dim), nn.ReLU(True)))
modules.extend([extra_hidden_layer() for _ in range(num_hidden_layers - 1)])
self.enc = nn.Sequential(*modules)
self.fc21 = nn.Linear(hidden_dim, latent_dim)
self.fc22 = nn.Linear(hidden_dim, latent_dim)
def forward(self, x):
e = self.enc(x.view(*x.size()[:-3], -1)) # flatten data
lv = self.fc22(e)
return self.fc21(e), F.softmax(lv, dim=-1) * lv.size(-1) + Constants.eta
class Dec(nn.Module):
""" Generate an MNIST image given a sample from the latent space. """
def __init__(self, latent_dim, num_hidden_layers=1):
super(Dec, self).__init__()
modules = []
modules.append(nn.Sequential(nn.Linear(latent_dim, hidden_dim), nn.ReLU(True)))
modules.extend([extra_hidden_layer() for _ in range(num_hidden_layers - 1)])
self.dec = nn.Sequential(*modules)
self.fc3 = nn.Linear(hidden_dim, data_dim)
def forward(self, z):
p = self.fc3(self.dec(z))
d = torch.sigmoid(p.view(*z.size()[:-1], *dataSize)) # reshape data
d = d.clamp(Constants.eta, 1 - Constants.eta)
return d, torch.tensor(0.75).to(z.device) # mean, length scale
class MNIST(VAE):
""" Derive a specific sub-class of a VAE for MNIST. """
def __init__(self, params):
super(MNIST, self).__init__(
dist.Laplace, # prior
dist.Laplace, # likelihood
dist.Laplace, # posterior
Enc(params.latent_dim, params.num_hidden_layers),
Dec(params.latent_dim, params.num_hidden_layers),
params
)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'mnist'
self.dataSize = dataSize
self.llik_scaling = 1.
@property
def pz_params(self):
return self._pz_params[0], F.softmax(self._pz_params[1], dim=1) * self._pz_params[1].size(-1)
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
tx = transforms.ToTensor()
train = DataLoader(datasets.MNIST('../data', train=True, download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
test = DataLoader(datasets.MNIST('../data', train=False, download=True, transform=tx),
batch_size=batch_size, shuffle=shuffle, **kwargs)
return train, test
def generate(self, runPath, epoch):
N, K = 64, 9
samples = super(MNIST, self).generate(N, K).cpu()
# wrangle things so they come out tiled
samples = samples.view(K, N, *samples.size()[1:]).transpose(0, 1) # N x K x 1 x 28 x 28
s = [make_grid(t, nrow=int(sqrt(K)), padding=0) for t in samples]
save_image(torch.stack(s),
'{}/gen_samples_{:03d}.png'.format(runPath, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recon = super(MNIST, self).reconstruct(data[:8])
comp = torch.cat([data[:8], recon]).data.cpu()
save_image(comp, '{}/recon_{:03d}.png'.format(runPath, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(MNIST, self).analyse(data, K=10)
labels = ['Prior', self.modelName.lower()]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
| 4,623 | 37.857143 | 101 | py |
mmvae-public | mmvae-public/src/models/vae_cub_sent_ft.py | # Sentence model specification - CUB image feature version
import json
import os
import numpy as np
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.utils.data import DataLoader
from datasets import CUBSentences
from utils import Constants, FakeCategorical
from .vae import VAE
maxSentLen = 32 # max length of any description for birds dataset
minOccur = 3
embeddingDim = 128
lenWindow = 3
fBase = 32
vocabSize = 1590
vocab_path = '../data/cub/oc:{}_sl:{}_s:{}_w:{}/cub.vocab'.format(minOccur, maxSentLen, 300, lenWindow)
# Classes
class Enc(nn.Module):
""" Generate latent parameters for sentence data. """
def __init__(self, latentDim):
super(Enc, self).__init__()
self.embedding = nn.Embedding(vocabSize, embeddingDim, padding_idx=0)
self.enc = nn.Sequential(
# input size: 1 x 32 x 128
nn.Conv2d(1, fBase, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase),
nn.ReLU(True),
# size: (fBase) x 16 x 64
nn.Conv2d(fBase, fBase * 2, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase * 2),
nn.ReLU(True),
# size: (fBase * 2) x 8 x 32
nn.Conv2d(fBase * 2, fBase * 4, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# # size: (fBase * 4) x 4 x 16
nn.Conv2d(fBase * 4, fBase * 8, (1, 4), (1, 2), (0, 1), bias=True),
nn.BatchNorm2d(fBase * 8),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 8
nn.Conv2d(fBase * 8, fBase * 16, (1, 4), (1, 2), (0, 1), bias=True),
nn.BatchNorm2d(fBase * 16),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 4
)
self.c1 = nn.Conv2d(fBase * 16, latentDim, 4, 1, 0, bias=True)
self.c2 = nn.Conv2d(fBase * 16, latentDim, 4, 1, 0, bias=True)
# c1, c2 size: latentDim x 1 x 1
def forward(self, x):
e = self.enc(self.embedding(x.long()).unsqueeze(1))
mu, logvar = self.c1(e).squeeze(), self.c2(e).squeeze()
return mu, F.softplus(logvar) + Constants.eta
class Dec(nn.Module):
""" Generate a sentence given a sample from the latent space. """
def __init__(self, latentDim):
super(Dec, self).__init__()
self.dec = nn.Sequential(
nn.ConvTranspose2d(latentDim, fBase * 16, 4, 1, 0, bias=True),
nn.BatchNorm2d(fBase * 16),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 4
nn.ConvTranspose2d(fBase * 16, fBase * 8, (1, 4), (1, 2), (0, 1), bias=True),
nn.BatchNorm2d(fBase * 8),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 8
nn.ConvTranspose2d(fBase * 8, fBase * 4, (1, 4), (1, 2), (0, 1), bias=True),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 4) x 8 x 32
nn.ConvTranspose2d(fBase * 4, fBase * 2, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase * 2),
nn.ReLU(True),
# size: (fBase * 2) x 16 x 64
nn.ConvTranspose2d(fBase * 2, fBase, 4, 2, 1, bias=True),
nn.BatchNorm2d(fBase),
nn.ReLU(True),
# size: (fBase) x 32 x 128
nn.ConvTranspose2d(fBase, 1, 4, 2, 1, bias=True),
nn.ReLU(True)
# Output size: 1 x 64 x 256
)
# inverts the 'embedding' module upto one-hotness
self.toVocabSize = nn.Linear(embeddingDim, vocabSize)
def forward(self, z):
z = z.unsqueeze(-1).unsqueeze(-1) # fit deconv layers
out = self.dec(z.view(-1, *z.size()[-3:])).view(-1, embeddingDim)
return self.toVocabSize(out).view(*z.size()[:-3], maxSentLen, vocabSize),
class CUB_Sentence_ft(VAE):
""" Derive a specific sub-class of a VAE for a sentence model. """
def __init__(self, params):
super(CUB_Sentence_ft, self).__init__(
prior_dist=dist.Normal,
likelihood_dist=FakeCategorical,
post_dist=dist.Normal,
enc=Enc(params.latent_dim),
dec=Dec(params.latent_dim),
params=params)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'cubSft'
self.llik_scaling = 1.
self.tie_modules()
self.fn_2i = lambda t: t.cpu().numpy().astype(int)
self.fn_trun = lambda s: s[:np.where(s == 2)[0][0] + 1] if 2 in s else s
self.vocab_file = vocab_path
self.maxSentLen = maxSentLen
self.vocabSize = vocabSize
def tie_modules(self):
# This looks dumb, but is actually dumber than you might realise.
# A linear(a, b) module has a [b x a] weight matrix, but an embedding(a, b)
# module has a [a x b] weight matrix. So when we want the transpose at
# decoding time, we just use the weight matrix as is.
self.dec.toVocabSize.weight = self.enc.embedding.weight
@property
def pz_params(self):
return self._pz_params[0], F.softplus(self._pz_params[1]) + Constants.eta
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
tx = lambda data: torch.Tensor(data)
t_data = CUBSentences('../data', split='train', transform=tx, max_sequence_length=maxSentLen)
s_data = CUBSentences('../data', split='test', transform=tx, max_sequence_length=maxSentLen)
train_loader = DataLoader(t_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(s_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def reconstruct(self, data, runPath, epoch):
recon = super(CUB_Sentence_ft, self).reconstruct(data[:8]).argmax(dim=-1).squeeze()
recon, data = self.fn_2i(recon), self.fn_2i(data[:8])
recon, data = [self.fn_trun(r) for r in recon], [self.fn_trun(d) for d in data]
i2w = self.load_vocab()
print("\n Reconstruction examples (excluding <PAD>):")
for r_sent, d_sent in zip(recon[:3], data[:3]):
print('[DATA] ==> {}'.format(' '.join(i2w[str(i)] for i in d_sent)))
print('[RECON] ==> {}\n'.format(' '.join(i2w[str(i)] for i in r_sent)))
with open('{}/recon_{:03d}.txt'.format(runPath, epoch), "w+") as txt_file:
for r_sent, d_sent in zip(recon, data):
txt_file.write('[DATA] ==> {}\n'.format(' '.join(i2w[str(i)] for i in d_sent)))
txt_file.write('[RECON] ==> {}\n\n'.format(' '.join(i2w[str(i)] for i in r_sent)))
def generate(self, runPath, epoch):
N, K = 5, 4
i2w = self.load_vocab()
samples = super(CUB_Sentence_ft, self).generate(N, K).argmax(dim=-1).squeeze()
samples = samples.view(K, N, samples.size(-1)).transpose(0, 1) # N x K x 64
samples = [[self.fn_trun(s) for s in ss] for ss in self.fn_2i(samples)]
# samples = [self.fn_trun(s) for s in samples]
print("\n Generated examples (excluding <PAD>):")
for s_sent in samples[0][:3]:
print('[GEN] ==> {}'.format(' '.join(i2w[str(i)] for i in s_sent if i != 0)))
with open('{}/gen_samples_{:03d}.txt'.format(runPath, epoch), "w+") as txt_file:
for s_sents in samples:
for s_sent in s_sents:
txt_file.write('{}\n'.format(' '.join(i2w[str(i)] for i in s_sent)))
txt_file.write('\n')
def analyse(self, data, runPath, epoch):
pass
def load_vocab(self):
# call dataloader function to create vocab file
if not os.path.exists(self.vocab_file):
_, _ = self.getDataLoaders(256)
with open(self.vocab_file, 'r') as vocab_file:
vocab = json.load(vocab_file)
return vocab['i2w']
| 8,185 | 40.135678 | 103 | py |
mmvae-public | mmvae-public/src/models/mmvae.py | # Base MMVAE class definition
from itertools import combinations
import torch
import torch.nn as nn
from utils import get_mean, kl_divergence
from vis import embed_umap, tensors_to_df
class MMVAE(nn.Module):
def __init__(self, prior_dist, params, *vaes):
super(MMVAE, self).__init__()
self.pz = prior_dist
self.vaes = nn.ModuleList([vae(params) for vae in vaes])
self.modelName = None # filled-in per sub-class
self.params = params
self._pz_params = None # defined in subclass
@property
def pz_params(self):
return self._pz_params
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
# handle merging individual datasets appropriately in sub-class
raise NotImplementedError
def forward(self, x, K=1):
qz_xs, zss = [], []
# initialise cross-modal matrix
px_zs = [[None for _ in range(len(self.vaes))] for _ in range(len(self.vaes))]
for m, vae in enumerate(self.vaes):
qz_x, px_z, zs = vae(x[m], K=K)
qz_xs.append(qz_x)
zss.append(zs)
px_zs[m][m] = px_z # fill-in diagonal
for e, zs in enumerate(zss):
for d, vae in enumerate(self.vaes):
if e != d: # fill-in off-diagonal
px_zs[e][d] = vae.px_z(*vae.dec(zs))
return qz_xs, px_zs, zss
def generate(self, N):
self.eval()
with torch.no_grad():
data = []
pz = self.pz(*self.pz_params)
latents = pz.rsample(torch.Size([N]))
for d, vae in enumerate(self.vaes):
px_z = vae.px_z(*vae.dec(latents))
data.append(px_z.mean.view(-1, *px_z.mean.size()[2:]))
return data # list of generations---one for each modality
def reconstruct(self, data):
self.eval()
with torch.no_grad():
_, px_zs, _ = self.forward(data)
# cross-modal matrix of reconstructions
recons = [[get_mean(px_z) for px_z in r] for r in px_zs]
return recons
def analyse(self, data, K):
self.eval()
with torch.no_grad():
qz_xs, _, zss = self.forward(data, K=K)
pz = self.pz(*self.pz_params)
zss = [pz.sample(torch.Size([K, data[0].size(0)])).view(-1, pz.batch_shape[-1]),
*[zs.view(-1, zs.size(-1)) for zs in zss]]
zsl = [torch.zeros(zs.size(0)).fill_(i) for i, zs in enumerate(zss)]
kls_df = tensors_to_df(
[*[kl_divergence(qz_x, pz).cpu().numpy() for qz_x in qz_xs],
*[0.5 * (kl_divergence(p, q) + kl_divergence(q, p)).cpu().numpy()
for p, q in combinations(qz_xs, 2)]],
head='KL',
keys=[*[r'KL$(q(z|x_{})\,||\,p(z))$'.format(i) for i in range(len(qz_xs))],
*[r'J$(q(z|x_{})\,||\,q(z|x_{}))$'.format(i, j)
for i, j in combinations(range(len(qz_xs)), 2)]],
ax_names=['Dimensions', r'KL$(q\,||\,p)$']
)
return embed_umap(torch.cat(zss, 0).cpu().numpy()), \
torch.cat(zsl, 0).cpu().numpy(), \
kls_df
| 3,238 | 37.105882 | 92 | py |
mmvae-public | mmvae-public/src/models/vae_cub_sent.py | # Sentence model specification - real CUB image version
import os
import json
import numpy as np
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from torch.utils.data import DataLoader
from datasets import CUBSentences
from utils import Constants, FakeCategorical
from .vae import VAE
# Constants
maxSentLen = 32 # max length of any description for birds dataset
minOccur = 3
embeddingDim = 128
lenWindow = 3
fBase = 32
vocabSize = 1590
vocab_path = '../data/cub/oc:{}_sl:{}_s:{}_w:{}/cub.vocab'.format(minOccur, maxSentLen, 300, lenWindow)
# Classes
class Enc(nn.Module):
""" Generate latent parameters for sentence data. """
def __init__(self, latentDim):
super(Enc, self).__init__()
self.embedding = nn.Embedding(vocabSize, embeddingDim, padding_idx=0)
self.enc = nn.Sequential(
# input size: 1 x 32 x 128
nn.Conv2d(1, fBase, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase),
nn.ReLU(True),
# size: (fBase) x 16 x 64
nn.Conv2d(fBase, fBase * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase * 2),
nn.ReLU(True),
# size: (fBase * 2) x 8 x 32
nn.Conv2d(fBase * 2, fBase * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# # size: (fBase * 4) x 4 x 16
nn.Conv2d(fBase * 4, fBase * 4, (1, 4), (1, 2), (0, 1), bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 8
nn.Conv2d(fBase * 4, fBase * 4, (1, 4), (1, 2), (0, 1), bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 4
)
self.c1 = nn.Conv2d(fBase * 4, latentDim, 4, 1, 0, bias=False)
self.c2 = nn.Conv2d(fBase * 4, latentDim, 4, 1, 0, bias=False)
# c1, c2 size: latentDim x 1 x 1
def forward(self, x):
e = self.enc(self.embedding(x.long()).unsqueeze(1))
mu, logvar = self.c1(e).squeeze(), self.c2(e).squeeze()
return mu, F.softplus(logvar) + Constants.eta
class Dec(nn.Module):
""" Generate a sentence given a sample from the latent space. """
def __init__(self, latentDim):
super(Dec, self).__init__()
self.dec = nn.Sequential(
nn.ConvTranspose2d(latentDim, fBase * 4, 4, 1, 0, bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 4
nn.ConvTranspose2d(fBase * 4, fBase * 4, (1, 4), (1, 2), (0, 1), bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 8) x 4 x 8
nn.ConvTranspose2d(fBase * 4, fBase * 4, (1, 4), (1, 2), (0, 1), bias=False),
nn.BatchNorm2d(fBase * 4),
nn.ReLU(True),
# size: (fBase * 4) x 8 x 32
nn.ConvTranspose2d(fBase * 4, fBase * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase * 2),
nn.ReLU(True),
# size: (fBase * 2) x 16 x 64
nn.ConvTranspose2d(fBase * 2, fBase, 4, 2, 1, bias=False),
nn.BatchNorm2d(fBase),
nn.ReLU(True),
# size: (fBase) x 32 x 128
nn.ConvTranspose2d(fBase, 1, 4, 2, 1, bias=False),
nn.ReLU(True)
# Output size: 1 x 64 x 256
)
# inverts the 'embedding' module upto one-hotness
self.toVocabSize = nn.Linear(embeddingDim, vocabSize)
def forward(self, z):
z = z.unsqueeze(-1).unsqueeze(-1) # fit deconv layers
out = self.dec(z.view(-1, *z.size()[-3:])).view(-1, embeddingDim)
return self.toVocabSize(out).view(*z.size()[:-3], maxSentLen, vocabSize),
class CUB_Sentence(VAE):
""" Derive a specific sub-class of a VAE for a sentence model. """
def __init__(self, params):
super(CUB_Sentence, self).__init__(
prior_dist=dist.Normal,
likelihood_dist=FakeCategorical,
post_dist=dist.Normal,
enc=Enc(params.latent_dim),
dec=Dec(params.latent_dim),
params=params)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'cubS'
self.llik_scaling = 1.
self.tie_modules()
self.fn_2i = lambda t: t.cpu().numpy().astype(int)
self.fn_trun = lambda s: s[:np.where(s == 2)[0][0] + 1] if 2 in s else s
self.vocab_file = vocab_path
self.maxSentLen = maxSentLen
self.vocabSize = vocabSize
def tie_modules(self):
# This looks dumb, but is actually dumber than you might realise.
# A linear(a, b) module has a [b x a] weight matrix, but an embedding(a, b)
# module has a [a x b] weight matrix. So when we want the transpose at
# decoding time, we just use the weight matrix as is.
self.dec.toVocabSize.weight = self.enc.embedding.weight
@property
def pz_params(self):
return self._pz_params[0], F.softplus(self._pz_params[1]) + Constants.eta
@staticmethod
def getDataLoaders(batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
tx = lambda data: torch.Tensor(data)
t_data = CUBSentences('../data', split='train', transform=tx, max_sequence_length=maxSentLen)
s_data = CUBSentences('../data', split='test', transform=tx, max_sequence_length=maxSentLen)
train_loader = DataLoader(t_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(s_data, batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def reconstruct(self, data, runPath, epoch):
recon = super(CUB_Sentence, self).reconstruct(data[:8]).argmax(dim=-1).squeeze()
recon, data = self.fn_2i(recon), self.fn_2i(data[:8])
recon, data = [self.fn_trun(r) for r in recon], [self.fn_trun(d) for d in data]
i2w = self.load_vocab()
print("\n Reconstruction examples (excluding <PAD>):")
for r_sent, d_sent in zip(recon[:3], data[:3]):
print('[DATA] ==> {}'.format(' '.join(i2w[str(i)] for i in d_sent)))
print('[RECON] ==> {}\n'.format(' '.join(i2w[str(i)] for i in r_sent)))
with open('{}/recon_{:03d}.txt'.format(runPath, epoch), "w+") as txt_file:
for r_sent, d_sent in zip(recon, data):
txt_file.write('[DATA] ==> {}\n'.format(' '.join(i2w[str(i)] for i in d_sent)))
txt_file.write('[RECON] ==> {}\n\n'.format(' '.join(i2w[str(i)] for i in r_sent)))
def generate(self, runPath, epoch):
N, K = 5, 4
i2w = self.load_vocab()
samples = super(CUB_Sentence, self).generate(N, K).argmax(dim=-1).squeeze()
samples = samples.view(K, N, samples.size(-1)).transpose(0, 1) # N x K x 64
samples = [[self.fn_trun(s) for s in ss] for ss in self.fn_2i(samples)]
# samples = [self.fn_trun(s) for s in samples]
print("\n Generated examples (excluding <PAD>):")
for s_sent in samples[0][:3]:
print('[GEN] ==> {}'.format(' '.join(i2w[str(i)] for i in s_sent if i != 0)))
with open('{}/gen_samples_{:03d}.txt'.format(runPath, epoch), "w+") as txt_file:
for s_sents in samples:
for s_sent in s_sents:
txt_file.write('{}\n'.format(' '.join(i2w[str(i)] for i in s_sent)))
txt_file.write('\n')
def analyse(self, data, runPath, epoch):
pass
def load_vocab(self):
# call dataloader function to create vocab file
if not os.path.exists(self.vocab_file):
_, _ = self.getDataLoaders(256)
with open(self.vocab_file, 'r') as vocab_file:
vocab = json.load(vocab_file)
return vocab['i2w']
| 8,186 | 39.935 | 103 | py |
mmvae-public | mmvae-public/src/models/mmvae_cub_images_sentences_ft.py | # cub multi-modal model specification
import matplotlib.pyplot as plt
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from numpy import sqrt, prod
from torch.utils.data import DataLoader
from torchnet.dataset import TensorDataset, ResampleDataset
from torchvision.utils import save_image, make_grid
from utils import Constants
from vis import plot_embeddings, plot_kls_df
from .mmvae import MMVAE
from .vae_cub_image_ft import CUB_Image_ft
from .vae_cub_sent_ft import CUB_Sentence_ft
# Constants
maxSentLen = 32
minOccur = 3
# This is required because there are 10 captions per image.
# Allows easier reuse of the same image for the corresponding set of captions.
def resampler(dataset, idx):
return idx // 10
class CUB_Image_Sentence_ft(MMVAE):
def __init__(self, params):
super(CUB_Image_Sentence_ft, self).__init__(dist.Normal, params, CUB_Image_ft, CUB_Sentence_ft)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.vaes[0].llik_scaling = self.vaes[1].maxSentLen / prod(self.vaes[0].dataSize) \
if params.llik_scaling == 0 else params.llik_scaling
for vae in self.vaes:
vae._pz_params = self._pz_params
self.modelName = 'cubISft'
self.i2w = self.vaes[1].load_vocab()
@property
def pz_params(self):
return self._pz_params[0], \
F.softplus(self._pz_params[1]) + Constants.eta
def getDataLoaders(self, batch_size, shuffle=True, device='cuda'):
# load base datasets
t1, s1 = self.vaes[0].getDataLoaders(batch_size, shuffle, device)
t2, s2 = self.vaes[1].getDataLoaders(batch_size, shuffle, device)
kwargs = {'num_workers': 2, 'pin_memory': True} if device == 'cuda' else {}
train_loader = DataLoader(TensorDataset([
ResampleDataset(t1.dataset, resampler, size=len(t1.dataset) * 10),
t2.dataset]), batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = DataLoader(TensorDataset([
ResampleDataset(s1.dataset, resampler, size=len(s1.dataset) * 10),
s2.dataset]), batch_size=batch_size, shuffle=shuffle, **kwargs)
return train_loader, test_loader
def generate(self, runPath, epoch):
N = 8
samples = super(CUB_Image_Sentence_ft, self).generate(N)
samples[0] = self.vaes[0].unproject(samples[0], search_split='train')
images, captions = [sample.data.cpu() for sample in samples]
captions = self._sent_preprocess(captions)
fig = plt.figure(figsize=(8, 6))
for i, (image, caption) in enumerate(zip(images, captions)):
fig = self._imshow(image, caption, i, fig, N)
plt.savefig('{}/gen_samples_{:03d}.png'.format(runPath, epoch))
plt.close()
def reconstruct(self, raw_data, runPath, epoch):
N = 8
recons_mat = super(CUB_Image_Sentence_ft, self).reconstruct([d[:N] for d in raw_data])
fns = [lambda images: images.data.cpu(), lambda sentences: self._sent_preprocess(sentences)]
for r, recons_list in enumerate(recons_mat):
for o, recon in enumerate(recons_list):
data = fns[r](raw_data[r][:N])
recon = fns[o](recon.squeeze())
if r != o:
fig = plt.figure(figsize=(8, 6))
for i, (_data, _recon) in enumerate(zip(data, recon)):
image, caption = (_data, _recon) if r == 0 else (_recon, _data)
search_split = 'test' if r == 0 else 'train'
image = self.vaes[0].unproject(image.unsqueeze(0), search_split=search_split)
fig = self._imshow(image, caption, i, fig, N)
plt.savefig('{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
plt.close()
else:
if r == 0:
data_ = self.vaes[0].unproject(data, search_split='test')
recon_ = self.vaes[0].unproject(recon, search_split='train')
comp = torch.cat([data_, recon_])
save_image(comp, '{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
else:
with open('{}/recon_{}x{}_{:03d}.txt'.format(runPath, r, o, epoch), "w+") as txt_file:
for r_sent, d_sent in zip(recon, data):
txt_file.write('[DATA] ==> {}\n'.format(' '.join(self.i2w[str(i)] for i in d_sent)))
txt_file.write('[RECON] ==> {}\n\n'.format(' '.join(self.i2w[str(i)] for i in r_sent)))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(CUB_Image_Sentence_ft, self).analyse(data, K=10)
labels = ['Prior', *[vae.modelName.lower() for vae in self.vaes]]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
def _sent_preprocess(self, sentences):
"""make sure raw data is always passed as dim=2 to avoid argmax.
last dimension must always be word embedding."""
if len(sentences.shape) > 2:
sentences = sentences.argmax(-1).squeeze()
return [self.vaes[1].fn_trun(s) for s in self.vaes[1].fn_2i(sentences)]
def _imshow(self, image, caption, i, fig, N):
"""Imshow for Tensor."""
ax = fig.add_subplot(N // 2, 4, i * 2 + 1)
ax.axis('off')
image = image.numpy().transpose((1, 2, 0)) #
plt.imshow(image)
ax = fig.add_subplot(N // 2, 4, i * 2 + 2)
pos = ax.get_position()
ax.axis('off')
plt.text(
x=0.5 * (pos.x0 + pos.x1),
y=0.5 * (pos.y0 + pos.y1),
ha='left',
s='{}'.format(
' '.join(self.i2w[str(i)] + '\n' if (n + 1) % 5 == 0
else self.i2w[str(i)] for n, i in enumerate(caption))),
fontsize=6,
verticalalignment='center',
horizontalalignment='center'
)
return fig
| 6,432 | 44.302817 | 119 | py |
mmvae-public | mmvae-public/src/models/vae_cub_image_ft.py | # CUB Image feature model specification
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from numpy import sqrt
from torchvision.utils import make_grid, save_image
from datasets import CUBImageFt
from utils import Constants, NN_lookup
from vis import plot_embeddings, plot_kls_df
from .vae import VAE
# Constants
imgChans = 3
fBase = 64
class Enc(nn.Module):
""" Generate latent parameters for CUB image feature. """
def __init__(self, latent_dim, n_c):
super(Enc, self).__init__()
dim_hidden = 256
self.enc = nn.Sequential()
for i in range(int(torch.tensor(n_c / dim_hidden).log2())):
self.enc.add_module("layer" + str(i), nn.Sequential(
nn.Linear(n_c // (2 ** i), n_c // (2 ** (i + 1))),
nn.ELU(inplace=True),
))
# relies on above terminating at dim_hidden
self.fc21 = nn.Linear(dim_hidden, latent_dim)
self.fc22 = nn.Linear(dim_hidden, latent_dim)
def forward(self, x):
e = self.enc(x)
return self.fc21(e), F.softplus(self.fc22(e)) + Constants.eta
class Dec(nn.Module):
""" Generate a CUB image feature given a sample from the latent space. """
def __init__(self, latent_dim, n_c):
super(Dec, self).__init__()
self.n_c = n_c
dim_hidden = 256
self.dec = nn.Sequential()
for i in range(int(torch.tensor(n_c / dim_hidden).log2())):
indim = latent_dim if i == 0 else dim_hidden * i
outdim = dim_hidden if i == 0 else dim_hidden * (2 * i)
self.dec.add_module("out_t" if i == 0 else "layer" + str(i) + "_t", nn.Sequential(
nn.Linear(indim, outdim),
nn.ELU(inplace=True),
))
# relies on above terminating at n_c // 2
self.fc31 = nn.Linear(n_c // 2, n_c)
def forward(self, z):
p = self.dec(z.view(-1, z.size(-1)))
mean = self.fc31(p).view(*z.size()[:-1], -1)
return mean, torch.tensor([0.01]).to(mean.device)
class CUB_Image_ft(VAE):
""" Derive a specific sub-class of a VAE for a CNN sentence model. """
def __init__(self, params):
super(CUB_Image_ft, self).__init__(
dist.Normal, # prior
dist.Laplace, # likelihood
dist.Normal, # posterior
Enc(params.latent_dim, 2048),
Dec(params.latent_dim, 2048),
params
)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.modelName = 'cubIft'
self.dataSize = torch.Size([2048])
self.llik_scaling = 1.
@property
def pz_params(self):
return self._pz_params[0], \
F.softplus(self._pz_params[1]) + Constants.eta
# remember that when combining with captions, this should be x10
def getDataLoaders(self, batch_size, shuffle=True, device="cuda"):
kwargs = {'num_workers': 1, 'pin_memory': True} if device == "cuda" else {}
train_dataset = CUBImageFt('../data', 'train', device)
test_dataset = CUBImageFt('../data', 'test', device)
train_loader = torch.utils.data.DataLoader(train_dataset,
batch_size=batch_size, shuffle=shuffle, **kwargs)
test_loader = torch.utils.data.DataLoader(test_dataset,
batch_size=batch_size, shuffle=shuffle, **kwargs)
train_dataset._load_data()
test_dataset._load_data()
self.unproject = lambda emb_h, search_split='train', \
te=train_dataset.ft_mat, td=train_dataset.data_mat, \
se=test_dataset.ft_mat, sd=test_dataset.data_mat: \
NN_lookup(emb_h, te, td) if search_split == 'train' else NN_lookup(emb_h, se, sd)
return train_loader, test_loader
def generate(self, runPath, epoch):
N, K = 64, 9
samples = super(CUB_Image_ft, self).generate(N, K).data.cpu()
samples = self.unproject(samples, search_split='train')
samples = samples.view(K, N, *samples.size()[1:]).transpose(0, 1)
s = [make_grid(t, nrow=int(sqrt(K)), padding=0) for t in samples.data.cpu()]
save_image(torch.stack(s),
'{}/gen_samples_{:03d}.png'.format(runPath, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recon = super(CUB_Image_ft, self).reconstruct(data[:8])
data_ = self.unproject(data[:8], search_split='test')
recon_ = self.unproject(recon, search_split='train')
comp = torch.cat([data_, recon_])
save_image(comp.data.cpu(), '{}/recon_{:03d}.png'.format(runPath, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(CUB_Image_ft, self).analyse(data, K=10)
labels = ['Prior', self.modelName.lower()]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
| 5,311 | 38.348148 | 100 | py |
mmvae-public | mmvae-public/src/models/mmvae_mnist_svhn.py | # MNIST-SVHN multi-modal model specification
import os
import torch
import torch.distributions as dist
import torch.nn as nn
import torch.nn.functional as F
from numpy import sqrt, prod
from torch.utils.data import DataLoader
from torchnet.dataset import TensorDataset, ResampleDataset
from torchvision.utils import save_image, make_grid
from vis import plot_embeddings, plot_kls_df
from .mmvae import MMVAE
from .vae_mnist import MNIST
from .vae_svhn import SVHN
class MNIST_SVHN(MMVAE):
def __init__(self, params):
super(MNIST_SVHN, self).__init__(dist.Laplace, params, MNIST, SVHN)
grad = {'requires_grad': params.learn_prior}
self._pz_params = nn.ParameterList([
nn.Parameter(torch.zeros(1, params.latent_dim), requires_grad=False), # mu
nn.Parameter(torch.zeros(1, params.latent_dim), **grad) # logvar
])
self.vaes[0].llik_scaling = prod(self.vaes[1].dataSize) / prod(self.vaes[0].dataSize) \
if params.llik_scaling == 0 else params.llik_scaling
self.modelName = 'mnist-svhn'
@property
def pz_params(self):
return self._pz_params[0], F.softmax(self._pz_params[1], dim=1) * self._pz_params[1].size(-1)
def getDataLoaders(self, batch_size, shuffle=True, device='cuda'):
if not (os.path.exists('../data/train-ms-mnist-idx.pt')
and os.path.exists('../data/train-ms-svhn-idx.pt')
and os.path.exists('../data/test-ms-mnist-idx.pt')
and os.path.exists('../data/test-ms-svhn-idx.pt')):
raise RuntimeError('Generate transformed indices with the script in bin')
# get transformed indices
t_mnist = torch.load('../data/train-ms-mnist-idx.pt')
t_svhn = torch.load('../data/train-ms-svhn-idx.pt')
s_mnist = torch.load('../data/test-ms-mnist-idx.pt')
s_svhn = torch.load('../data/test-ms-svhn-idx.pt')
# load base datasets
t1, s1 = self.vaes[0].getDataLoaders(batch_size, shuffle, device)
t2, s2 = self.vaes[1].getDataLoaders(batch_size, shuffle, device)
train_mnist_svhn = TensorDataset([
ResampleDataset(t1.dataset, lambda d, i: t_mnist[i], size=len(t_mnist)),
ResampleDataset(t2.dataset, lambda d, i: t_svhn[i], size=len(t_svhn))
])
test_mnist_svhn = TensorDataset([
ResampleDataset(s1.dataset, lambda d, i: s_mnist[i], size=len(s_mnist)),
ResampleDataset(s2.dataset, lambda d, i: s_svhn[i], size=len(s_svhn))
])
kwargs = {'num_workers': 2, 'pin_memory': True} if device == 'cuda' else {}
train = DataLoader(train_mnist_svhn, batch_size=batch_size, shuffle=shuffle, **kwargs)
test = DataLoader(test_mnist_svhn, batch_size=batch_size, shuffle=shuffle, **kwargs)
return train, test
def generate(self, runPath, epoch):
N = 64
samples_list = super(MNIST_SVHN, self).generate(N)
for i, samples in enumerate(samples_list):
samples = samples.data.cpu()
# wrangle things so they come out tiled
samples = samples.view(N, *samples.size()[1:])
save_image(samples,
'{}/gen_samples_{}_{:03d}.png'.format(runPath, i, epoch),
nrow=int(sqrt(N)))
def reconstruct(self, data, runPath, epoch):
recons_mat = super(MNIST_SVHN, self).reconstruct([d[:8] for d in data])
for r, recons_list in enumerate(recons_mat):
for o, recon in enumerate(recons_list):
_data = data[r][:8].cpu()
recon = recon.squeeze(0).cpu()
# resize mnist to 32 and colour. 0 => mnist, 1 => svhn
_data = _data if r == 1 else resize_img(_data, self.vaes[1].dataSize)
recon = recon if o == 1 else resize_img(recon, self.vaes[1].dataSize)
comp = torch.cat([_data, recon])
save_image(comp, '{}/recon_{}x{}_{:03d}.png'.format(runPath, r, o, epoch))
def analyse(self, data, runPath, epoch):
zemb, zsl, kls_df = super(MNIST_SVHN, self).analyse(data, K=10)
labels = ['Prior', *[vae.modelName.lower() for vae in self.vaes]]
plot_embeddings(zemb, zsl, labels, '{}/emb_umap_{:03d}.png'.format(runPath, epoch))
plot_kls_df(kls_df, '{}/kl_distance_{:03d}.png'.format(runPath, epoch))
def resize_img(img, refsize):
return F.pad(img, (2, 2, 2, 2)).expand(img.size(0), *refsize)
| 4,479 | 45.185567 | 101 | py |
mmvae-public | mmvae-public/bin/make-mnist-svhn-idx.py | import torch
from torchvision import datasets, transforms
def rand_match_on_idx(l1, idx1, l2, idx2, max_d=10000, dm=10):
"""
l*: sorted labels
idx*: indices of sorted labels in original list
"""
_idx1, _idx2 = [], []
for l in l1.unique(): # assuming both have same idxs
l_idx1, l_idx2 = idx1[l1 == l], idx2[l2 == l]
n = min(l_idx1.size(0), l_idx2.size(0), max_d)
l_idx1, l_idx2 = l_idx1[:n], l_idx2[:n]
for _ in range(dm):
_idx1.append(l_idx1[torch.randperm(n)])
_idx2.append(l_idx2[torch.randperm(n)])
return torch.cat(_idx1), torch.cat(_idx2)
if __name__ == '__main__':
max_d = 10000 # maximum number of datapoints per class
dm = 30 # data multiplier: random permutations to match
# get the individual datasets
tx = transforms.ToTensor()
train_mnist = datasets.MNIST('../data', train=True, download=True, transform=tx)
test_mnist = datasets.MNIST('../data', train=False, download=True, transform=tx)
train_svhn = datasets.SVHN('../data', split='train', download=True, transform=tx)
test_svhn = datasets.SVHN('../data', split='test', download=True, transform=tx)
# svhn labels need extra work
train_svhn.labels = torch.LongTensor(train_svhn.labels.squeeze().astype(int)) % 10
test_svhn.labels = torch.LongTensor(test_svhn.labels.squeeze().astype(int)) % 10
mnist_l, mnist_li = train_mnist.targets.sort()
svhn_l, svhn_li = train_svhn.labels.sort()
idx1, idx2 = rand_match_on_idx(mnist_l, mnist_li, svhn_l, svhn_li, max_d=max_d, dm=dm)
print('len train idx:', len(idx1), len(idx2))
torch.save(idx1, '../data/train-ms-mnist-idx.pt')
torch.save(idx2, '../data/train-ms-svhn-idx.pt')
mnist_l, mnist_li = test_mnist.targets.sort()
svhn_l, svhn_li = test_svhn.labels.sort()
idx1, idx2 = rand_match_on_idx(mnist_l, mnist_li, svhn_l, svhn_li, max_d=max_d, dm=dm)
print('len test idx:', len(idx1), len(idx2))
torch.save(idx1, '../data/test-ms-mnist-idx.pt')
torch.save(idx2, '../data/test-ms-svhn-idx.pt')
| 2,087 | 44.391304 | 90 | py |
MinkLoc3D | MinkLoc3D-master/training/train.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import argparse
import torch
from training.trainer import do_train
from misc.utils import MinkLocParams
from datasets.dataset_utils import make_dataloaders
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train Minkowski Net embeddings using BatchHard negative mining')
parser.add_argument('--config', type=str, required=True, help='Path to configuration file')
parser.add_argument('--model_config', type=str, required=True, help='Path to the model-specific configuration file')
parser.add_argument('--debug', dest='debug', action='store_true')
parser.set_defaults(debug=False)
parser.add_argument('--visualize', dest='visualize', action='store_true')
parser.set_defaults(visualize=False)
args = parser.parse_args()
print('Training config path: {}'.format(args.config))
print('Model config path: {}'.format(args.model_config))
print('Debug mode: {}'.format(args.debug))
print('Visualize: {}'.format(args.visualize))
params = MinkLocParams(args.config, args.model_config)
params.print()
if args.debug:
torch.autograd.set_detect_anomaly(True)
dataloaders = make_dataloaders(params, debug=args.debug)
do_train(dataloaders, params, debug=args.debug, visualize=args.visualize)
| 1,341 | 37.342857 | 120 | py |
MinkLoc3D | MinkLoc3D-master/training/trainer.py | # Author: Jacek Komorowski
# Warsaw University of Technology
# Train on Oxford dataset (from PointNetVLAD paper) using BatchHard hard negative mining.
import os
from datetime import datetime
import numpy as np
import torch
import pickle
import tqdm
import pathlib
from torch.utils.tensorboard import SummaryWriter
from eval.evaluate import evaluate, print_eval_stats
from misc.utils import MinkLocParams, get_datetime
from models.loss import make_loss
from models.model_factory import model_factory
def print_stats(stats, phase):
if 'num_pairs' in stats:
# For batch hard contrastive loss
s = '{} - Mean loss: {:.6f} Avg. embedding norm: {:.4f} Pairs per batch (all/non-zero pos/non-zero neg): {:.1f}/{:.1f}/{:.1f}'
print(s.format(phase, stats['loss'], stats['avg_embedding_norm'], stats['num_pairs'],
stats['pos_pairs_above_threshold'], stats['neg_pairs_above_threshold']))
elif 'num_triplets' in stats:
# For triplet loss
s = '{} - Mean loss: {:.6f} Avg. embedding norm: {:.4f} Triplets per batch (all/non-zero): {:.1f}/{:.1f}'
print(s.format(phase, stats['loss'], stats['avg_embedding_norm'], stats['num_triplets'],
stats['num_non_zero_triplets']))
elif 'num_pos' in stats:
s = '{} - Mean loss: {:.6f} Avg. embedding norm: {:.4f} #positives/negatives: {:.1f}/{:.1f}'
print(s.format(phase, stats['loss'], stats['avg_embedding_norm'], stats['num_pos'], stats['num_neg']))
s = ''
l = []
if 'mean_pos_pair_dist' in stats:
s += 'Pos dist (min/mean/max): {:.4f}/{:.4f}/{:.4f} Neg dist (min/mean/max): {:.4f}/{:.4f}/{:.4f}'
l += [stats['min_pos_pair_dist'], stats['mean_pos_pair_dist'], stats['max_pos_pair_dist'],
stats['min_neg_pair_dist'], stats['mean_neg_pair_dist'], stats['max_neg_pair_dist']]
if 'pos_loss' in stats:
if len(s) > 0:
s += ' '
s += 'Pos loss: {:.4f} Neg loss: {:.4f}'
l += [stats['pos_loss'], stats['neg_loss']]
if len(l) > 0:
print(s.format(*l))
def tensors_to_numbers(stats):
stats = {e: stats[e].item() if torch.is_tensor(stats[e]) else stats[e] for e in stats}
return stats
def do_train(dataloaders, params: MinkLocParams, debug=False, visualize=False):
# Create model class
s = get_datetime()
model = model_factory(params)
model_name = 'model_' + params.model_params.model + '_' + s
print('Model name: {}'.format(model_name))
weights_path = create_weights_folder()
model_pathname = os.path.join(weights_path, model_name)
if hasattr(model, 'print_info'):
model.print_info()
else:
n_params = sum([param.nelement() for param in model.parameters()])
print('Number of model parameters: {}'.format(n_params))
# Move the model to the proper device before configuring the optimizer
if torch.cuda.is_available():
device = "cuda"
model.to(device)
else:
device = "cpu"
print('Model device: {}'.format(device))
loss_fn = make_loss(params)
# Training elements
if params.weight_decay is None or params.weight_decay == 0:
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr, weight_decay=params.weight_decay)
if params.scheduler is None:
scheduler = None
else:
if params.scheduler == 'CosineAnnealingLR':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=params.epochs+1,
eta_min=params.min_lr)
elif params.scheduler == 'MultiStepLR':
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, params.scheduler_milestones, gamma=0.1)
else:
raise NotImplementedError('Unsupported LR scheduler: {}'.format(params.scheduler))
###########################################################################
# Initialize TensorBoard writer
###########################################################################
now = datetime.now()
logdir = os.path.join("../tf_logs", now.strftime("%Y%m%d-%H%M%S"))
writer = SummaryWriter(logdir)
###########################################################################
#
###########################################################################
is_validation_set = 'val' in dataloaders
if is_validation_set:
phases = ['train', 'val']
else:
phases = ['train']
# Training statistics
stats = {'train': [], 'val': [], 'eval': []}
for epoch in tqdm.tqdm(range(1, params.epochs + 1)):
for phase in phases:
if phase == 'train':
model.train()
else:
model.eval()
running_stats = [] # running stats for the current epoch
count_batches = 0
for batch, positives_mask, negatives_mask in dataloaders[phase]:
# batch is (batch_size, n_points, 3) tensor
# labels is list with indexes of elements forming a batch
count_batches += 1
batch_stats = {}
if debug and count_batches > 2:
break
batch = {e: batch[e].to(device) for e in batch}
n_positives = torch.sum(positives_mask).item()
n_negatives = torch.sum(negatives_mask).item()
if n_positives == 0 or n_negatives == 0:
# Skip a batch without positives or negatives
print('WARNING: Skipping batch without positive or negative examples')
continue
optimizer.zero_grad()
if visualize:
#visualize_batch(batch)
pass
with torch.set_grad_enabled(phase == 'train'):
# Compute embeddings of all elements
embeddings = model(batch)
loss, temp_stats, _ = loss_fn(embeddings, positives_mask, negatives_mask)
temp_stats = tensors_to_numbers(temp_stats)
batch_stats.update(temp_stats)
batch_stats['loss'] = loss.item()
if phase == 'train':
loss.backward()
optimizer.step()
running_stats.append(batch_stats)
torch.cuda.empty_cache() # Prevent excessive GPU memory consumption by SparseTensors
# ******* PHASE END *******
# Compute mean stats for the epoch
epoch_stats = {}
for key in running_stats[0].keys():
temp = [e[key] for e in running_stats]
epoch_stats[key] = np.mean(temp)
stats[phase].append(epoch_stats)
print_stats(epoch_stats, phase)
# ******* EPOCH END *******
if scheduler is not None:
scheduler.step()
loss_metrics = {'train': stats['train'][-1]['loss']}
if 'val' in phases:
loss_metrics['val'] = stats['val'][-1]['loss']
writer.add_scalars('Loss', loss_metrics, epoch)
if 'num_triplets' in stats['train'][-1]:
nz_metrics = {'train': stats['train'][-1]['num_non_zero_triplets']}
if 'val' in phases:
nz_metrics['val'] = stats['val'][-1]['num_non_zero_triplets']
writer.add_scalars('Non-zero triplets', nz_metrics, epoch)
elif 'num_pairs' in stats['train'][-1]:
nz_metrics = {'train_pos': stats['train'][-1]['pos_pairs_above_threshold'],
'train_neg': stats['train'][-1]['neg_pairs_above_threshold']}
if 'val' in phases:
nz_metrics['val_pos'] = stats['val'][-1]['pos_pairs_above_threshold']
nz_metrics['val_neg'] = stats['val'][-1]['neg_pairs_above_threshold']
writer.add_scalars('Non-zero pairs', nz_metrics, epoch)
if params.batch_expansion_th is not None:
# Dynamic batch expansion
epoch_train_stats = stats['train'][-1]
if 'num_non_zero_triplets' not in epoch_train_stats:
print('WARNING: Batch size expansion is enabled, but the loss function is not supported')
else:
# Ratio of non-zero triplets
rnz = epoch_train_stats['num_non_zero_triplets'] / epoch_train_stats['num_triplets']
if rnz < params.batch_expansion_th:
dataloaders['train'].batch_sampler.expand_batch()
print('')
# Save final model weights
final_model_path = model_pathname + '_final.pth'
torch.save(model.state_dict(), final_model_path)
stats = {'train_stats': stats, 'params': params}
# Evaluate the final model
model.eval()
final_eval_stats = evaluate(model, device, params)
print('Final model:')
print_eval_stats(final_eval_stats)
stats['eval'] = {'final': final_eval_stats}
print('')
# Pickle training stats and parameters
pickle_path = model_pathname + '_stats.pickle'
pickle.dump(stats, open(pickle_path, "wb"))
# Append key experimental metrics to experiment summary file
model_params_name = os.path.split(params.model_params.model_params_path)[1]
config_name = os.path.split(params.params_path)[1]
_, model_name = os.path.split(model_pathname)
prefix = "{}, {}, {}".format(model_params_name, config_name, model_name)
export_eval_stats("experiment_results.txt", prefix, final_eval_stats)
def export_eval_stats(file_name, prefix, eval_stats):
s = prefix
ave_1p_recall_l = []
ave_recall_l = []
# Print results on the final model
with open(file_name, "a") as f:
for ds in ['oxford', 'university', 'residential', 'business']:
ave_1p_recall = eval_stats[ds]['ave_one_percent_recall']
ave_1p_recall_l.append(ave_1p_recall)
ave_recall = eval_stats[ds]['ave_recall'][0]
ave_recall_l.append(ave_recall)
s += ", {:0.2f}, {:0.2f}".format(ave_1p_recall, ave_recall)
mean_1p_recall = np.mean(ave_1p_recall_l)
mean_recall = np.mean(ave_recall_l)
s += ", {:0.2f}, {:0.2f}\n".format(mean_1p_recall, mean_recall)
f.write(s)
def create_weights_folder():
# Create a folder to save weights of trained models
this_file_path = pathlib.Path(__file__).parent.absolute()
temp, _ = os.path.split(this_file_path)
weights_path = os.path.join(temp, 'weights')
if not os.path.exists(weights_path):
os.mkdir(weights_path)
assert os.path.exists(weights_path), 'Cannot create weights folder: {}'.format(weights_path)
return weights_path
| 10,845 | 39.17037 | 139 | py |
MinkLoc3D | MinkLoc3D-master/eval/evaluate.py | # Author: Jacek Komorowski
# Warsaw University of Technology
# Evaluation code adapted from PointNetVlad code: https://github.com/mikacuy/pointnetvlad
from sklearn.neighbors import KDTree
import numpy as np
import pickle
import os
import argparse
import torch
import tqdm
import MinkowskiEngine as ME
import random
from misc.utils import MinkLocParams
from models.model_factory import model_factory
def evaluate(model, device, params, silent=True):
# Run evaluation on all eval datasets
assert len(params.eval_database_files) == len(params.eval_query_files)
stats = {}
for database_file, query_file in zip(params.eval_database_files, params.eval_query_files):
# Extract location name from query and database files
location_name = database_file.split('_')[0]
temp = query_file.split('_')[0]
assert location_name == temp, 'Database location: {} does not match query location: {}'.format(database_file,
query_file)
p = os.path.join(params.dataset_folder, database_file)
with open(p, 'rb') as f:
database_sets = pickle.load(f)
p = os.path.join(params.dataset_folder, query_file)
with open(p, 'rb') as f:
query_sets = pickle.load(f)
temp = evaluate_dataset(model, device, params, database_sets, query_sets, silent=silent)
stats[location_name] = temp
return stats
def evaluate_dataset(model, device, params, database_sets, query_sets, silent=True):
# Run evaluation on a single dataset
recall = np.zeros(25)
count = 0
similarity = []
one_percent_recall = []
database_embeddings = []
query_embeddings = []
model.eval()
for set in tqdm.tqdm(database_sets, disable=silent):
database_embeddings.append(get_latent_vectors(model, set, device, params))
for set in tqdm.tqdm(query_sets, disable=silent):
query_embeddings.append(get_latent_vectors(model, set, device, params))
for i in range(len(query_sets)):
for j in range(len(query_sets)):
if i == j:
continue
pair_recall, pair_similarity, pair_opr = get_recall(i, j, database_embeddings, query_embeddings, query_sets,
database_sets)
recall += np.array(pair_recall)
count += 1
one_percent_recall.append(pair_opr)
for x in pair_similarity:
similarity.append(x)
ave_recall = recall / count
average_similarity = np.mean(similarity)
ave_one_percent_recall = np.mean(one_percent_recall)
stats = {'ave_one_percent_recall': ave_one_percent_recall, 'ave_recall': ave_recall,
'average_similarity': average_similarity}
return stats
def load_pc(file_name, params):
# returns Nx3 matrix
file_path = os.path.join(params.dataset_folder, file_name)
pc = np.fromfile(file_path, dtype=np.float64)
# coords are within -1..1 range in each dimension
assert pc.shape[0] == params.num_points * 3, "Error in point cloud shape: {}".format(file_path)
pc = np.reshape(pc, (pc.shape[0] // 3, 3))
pc = torch.tensor(pc, dtype=torch.float)
return pc
def get_latent_vectors(model, set, device, params):
# Adapted from original PointNetVLAD code
model.eval()
embeddings_l = []
for elem_ndx in set:
x = load_pc(set[elem_ndx]["query"], params)
with torch.no_grad():
# coords are (n_clouds, num_points, channels) tensor
coords = ME.utils.sparse_quantize(coordinates=x,
quantization_size=params.model_params.mink_quantization_size)
bcoords = ME.utils.batched_coordinates([coords])
# Assign a dummy feature equal to 1 to each point
# Coords must be on CPU, features can be on GPU - see MinkowskiEngine documentation
feats = torch.ones((bcoords.shape[0], 1), dtype=torch.float32)
batch = {'coords': bcoords.to(device), 'features': feats.to(device)}
embedding = model(batch)
# embedding is (1, 1024) tensor
if params.normalize_embeddings:
embedding = torch.nn.functional.normalize(embedding, p=2, dim=1) # Normalize embeddings
embedding = embedding.detach().cpu().numpy()
embeddings_l.append(embedding)
embeddings = np.vstack(embeddings_l)
return embeddings
def get_recall(m, n, database_vectors, query_vectors, query_sets, database_sets):
# Original PointNetVLAD code
database_output = database_vectors[m]
queries_output = query_vectors[n]
# When embeddings are normalized, using Euclidean distance gives the same
# nearest neighbour search results as using cosine distance
database_nbrs = KDTree(database_output)
num_neighbors = 25
recall = [0] * num_neighbors
top1_similarity_score = []
one_percent_retrieved = 0
threshold = max(int(round(len(database_output)/100.0)), 1)
num_evaluated = 0
for i in range(len(queries_output)):
# i is query element ndx
query_details = query_sets[n][i] # {'query': path, 'northing': , 'easting': }
true_neighbors = query_details[m]
if len(true_neighbors) == 0:
continue
num_evaluated += 1
distances, indices = database_nbrs.query(np.array([queries_output[i]]), k=num_neighbors)
for j in range(len(indices[0])):
if indices[0][j] in true_neighbors:
if j == 0:
similarity = np.dot(queries_output[i], database_output[indices[0][j]])
top1_similarity_score.append(similarity)
recall[j] += 1
break
if len(list(set(indices[0][0:threshold]).intersection(set(true_neighbors)))) > 0:
one_percent_retrieved += 1
one_percent_recall = (one_percent_retrieved/float(num_evaluated))*100
recall = (np.cumsum(recall)/float(num_evaluated))*100
return recall, top1_similarity_score, one_percent_recall
def print_eval_stats(stats):
for database_name in stats:
print('Dataset: {}'.format(database_name))
t = 'Avg. top 1% recall: {:.2f} Avg. similarity: {:.4f} Avg. recall @N:'
print(t.format(stats[database_name]['ave_one_percent_recall'], stats[database_name]['average_similarity']))
print(stats[database_name]['ave_recall'])
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Evaluate model on PointNetVLAD (Oxford) dataset')
parser.add_argument('--config', type=str, required=True, help='Path to configuration file')
parser.add_argument('--model_config', type=str, required=True, help='Path to the model-specific configuration file')
parser.add_argument('--weights', type=str, required=False, help='Trained model weights')
args = parser.parse_args()
print('Config path: {}'.format(args.config))
print('Model config path: {}'.format(args.model_config))
if args.weights is None:
w = 'RANDOM WEIGHTS'
else:
w = args.weights
print('Weights: {}'.format(w))
print('')
params = MinkLocParams(args.config, args.model_config)
params.print()
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
print('Device: {}'.format(device))
model = model_factory(params)
if args.weights is not None:
assert os.path.exists(args.weights), 'Cannot open network weights: {}'.format(args.weights)
print('Loading weights: {}'.format(args.weights))
model.load_state_dict(torch.load(args.weights, map_location=device))
model.to(device)
stats = evaluate(model, device, params, silent=False)
print_eval_stats(stats)
| 7,883 | 36.542857 | 120 | py |
MinkLoc3D | MinkLoc3D-master/models/minkloc.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import torch
import MinkowskiEngine as ME
from models.minkfpn import MinkFPN
from models.netvlad import MinkNetVladWrapper
import layers.pooling as pooling
class MinkLoc(torch.nn.Module):
def __init__(self, model, in_channels, feature_size, output_dim, planes, layers, num_top_down, conv0_kernel_size):
super().__init__()
self.model = model
self.in_channels = in_channels
self.feature_size = feature_size # Size of local features produced by local feature extraction block
self.output_dim = output_dim # Dimensionality of the global descriptor
self.backbone = MinkFPN(in_channels=in_channels, out_channels=self.feature_size, num_top_down=num_top_down,
conv0_kernel_size=conv0_kernel_size, layers=layers, planes=planes)
self.n_backbone_features = output_dim
if model == 'MinkFPN_Max':
assert self.feature_size == self.output_dim, 'output_dim must be the same as feature_size'
self.pooling = pooling.MAC()
elif model == 'MinkFPN_GeM':
assert self.feature_size == self.output_dim, 'output_dim must be the same as feature_size'
self.pooling = pooling.GeM()
elif model == 'MinkFPN_NetVlad':
self.pooling = MinkNetVladWrapper(feature_size=self.feature_size, output_dim=self.output_dim,
cluster_size=64, gating=False)
elif model == 'MinkFPN_NetVlad_CG':
self.pooling = MinkNetVladWrapper(feature_size=self.feature_size, output_dim=self.output_dim,
cluster_size=64, gating=True)
else:
raise NotImplementedError('Model not implemented: {}'.format(model))
def forward(self, batch):
# Coords must be on CPU, features can be on GPU - see MinkowskiEngine documentation
x = ME.SparseTensor(batch['features'], coordinates=batch['coords'])
x = self.backbone(x)
# x is (num_points, n_features) tensor
assert x.shape[1] == self.feature_size, 'Backbone output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.feature_size)
x = self.pooling(x)
assert x.dim() == 2, 'Expected 2-dimensional tensor (batch_size,output_dim). Got {} dimensions.'.format(x.dim())
assert x.shape[1] == self.output_dim, 'Output tensor has: {} channels. Expected: {}'.format(x.shape[1], self.output_dim)
# x is (batch_size, output_dim) tensor
return x
def print_info(self):
print('Model class: MinkLoc')
n_params = sum([param.nelement() for param in self.parameters()])
print('Total parameters: {}'.format(n_params))
n_params = sum([param.nelement() for param in self.backbone.parameters()])
print('Backbone parameters: {}'.format(n_params))
n_params = sum([param.nelement() for param in self.pooling.parameters()])
print('Aggregation parameters: {}'.format(n_params))
if hasattr(self.backbone, 'print_info'):
self.backbone.print_info()
if hasattr(self.pooling, 'print_info'):
self.pooling.print_info()
| 3,242 | 50.47619 | 141 | py |
MinkLoc3D | MinkLoc3D-master/models/resnet.py | # Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
class ResNetBase(nn.Module):
block = None
layers = ()
init_dim = 64
planes = (64, 128, 256, 512)
def __init__(self, in_channels, out_channels, D=3):
nn.Module.__init__(self)
self.D = D
assert self.block is not None
self.network_initialization(in_channels, out_channels, D)
self.weight_initialization()
def network_initialization(self, in_channels, out_channels, D):
self.inplanes = self.init_dim
self.conv1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes)
self.relu = ME.MinkowskiReLU(inplace=True)
self.pool = ME.MinkowskiAvgPooling(kernel_size=2, stride=2, dimension=D)
self.layer1 = self._make_layer(
self.block, self.planes[0], self.layers[0], stride=2)
self.layer2 = self._make_layer(
self.block, self.planes[1], self.layers[1], stride=2)
self.layer3 = self._make_layer(
self.block, self.planes[2], self.layers[2], stride=2)
self.layer4 = self._make_layer(
self.block, self.planes[3], self.layers[3], stride=2)
self.conv5 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=3, stride=3, dimension=D)
self.bn5 = ME.MinkowskiBatchNorm(self.inplanes)
self.glob_avg = ME.MinkowskiGlobalMaxPooling()
self.final = ME.MinkowskiLinear(self.inplanes, out_channels, bias=True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, ME.MinkowskiConvolution):
ME.utils.kaiming_normal_(m.kernel, mode='fan_out', nonlinearity='relu')
if isinstance(m, ME.MinkowskiBatchNorm):
nn.init.constant_(m.bn.weight, 1)
nn.init.constant_(m.bn.bias, 0)
def _make_layer(self,
block,
planes,
blocks,
stride=1,
dilation=1,
bn_momentum=0.1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
ME.MinkowskiConvolution(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
dimension=self.D),
ME.MinkowskiBatchNorm(planes * block.expansion))
layers = []
layers.append(
block(
self.inplanes,
planes,
stride=stride,
dilation=dilation,
downsample=downsample,
dimension=self.D))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
stride=1,
dilation=dilation,
dimension=self.D))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.pool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.conv5(x)
x = self.bn5(x)
x = self.relu(x)
x = self.glob_avg(x)
return self.final(x)
class ResNet14(ResNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1)
class ResNet18(ResNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2)
class ResNet34(ResNetBase):
BLOCK = BasicBlock
LAYERS = (3, 4, 6, 3)
class ResNet50(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 6, 3)
class ResNet101(ResNetBase):
BLOCK = Bottleneck
LAYERS = (3, 4, 23, 3)
| 5,315 | 31.814815 | 87 | py |
MinkLoc3D | MinkLoc3D-master/models/minkfpn.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import torch.nn as nn
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock
from models.resnet import ResNetBase
class MinkFPN(ResNetBase):
# Feature Pyramid Network (FPN) architecture implementation using Minkowski ResNet building blocks
def __init__(self, in_channels, out_channels, num_top_down=1, conv0_kernel_size=5, block=BasicBlock,
layers=(1, 1, 1), planes=(32, 64, 64)):
assert len(layers) == len(planes)
assert 1 <= len(layers)
assert 0 <= num_top_down <= len(layers)
self.num_bottom_up = len(layers)
self.num_top_down = num_top_down
self.conv0_kernel_size = conv0_kernel_size
self.block = block
self.layers = layers
self.planes = planes
self.lateral_dim = out_channels
self.init_dim = planes[0]
ResNetBase.__init__(self, in_channels, out_channels, D=3)
def network_initialization(self, in_channels, out_channels, D):
assert len(self.layers) == len(self.planes)
assert len(self.planes) == self.num_bottom_up
self.convs = nn.ModuleList() # Bottom-up convolutional blocks with stride=2
self.bn = nn.ModuleList() # Bottom-up BatchNorms
self.blocks = nn.ModuleList() # Bottom-up blocks
self.tconvs = nn.ModuleList() # Top-down tranposed convolutions
self.conv1x1 = nn.ModuleList() # 1x1 convolutions in lateral connections
# The first convolution is special case, with kernel size = 5
self.inplanes = self.planes[0]
self.conv0 = ME.MinkowskiConvolution(in_channels, self.inplanes, kernel_size=self.conv0_kernel_size,
dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes)
for plane, layer in zip(self.planes, self.layers):
self.convs.append(ME.MinkowskiConvolution(self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D))
self.bn.append(ME.MinkowskiBatchNorm(self.inplanes))
self.blocks.append(self._make_layer(self.block, plane, layer))
# Lateral connections
for i in range(self.num_top_down):
self.conv1x1.append(ME.MinkowskiConvolution(self.planes[-1 - i], self.lateral_dim, kernel_size=1,
stride=1, dimension=D))
self.tconvs.append(ME.MinkowskiConvolutionTranspose(self.lateral_dim, self.lateral_dim, kernel_size=2,
stride=2, dimension=D))
# There's one more lateral connection than top-down TConv blocks
if self.num_top_down < self.num_bottom_up:
# Lateral connection from Conv block 1 or above
self.conv1x1.append(ME.MinkowskiConvolution(self.planes[-1 - self.num_top_down], self.lateral_dim, kernel_size=1,
stride=1, dimension=D))
else:
# Lateral connection from Con0 block
self.conv1x1.append(ME.MinkowskiConvolution(self.planes[0], self.lateral_dim, kernel_size=1,
stride=1, dimension=D))
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, x):
# *** BOTTOM-UP PASS ***
# First bottom-up convolution is special (with bigger stride)
feature_maps = []
x = self.conv0(x)
x = self.bn0(x)
x = self.relu(x)
if self.num_top_down == self.num_bottom_up:
feature_maps.append(x)
# BOTTOM-UP PASS
for ndx, (conv, bn, block) in enumerate(zip(self.convs, self.bn, self.blocks)):
x = conv(x) # Decreases spatial resolution (conv stride=2)
x = bn(x)
x = self.relu(x)
x = block(x)
if self.num_bottom_up - 1 - self.num_top_down <= ndx < len(self.convs) - 1:
feature_maps.append(x)
assert len(feature_maps) == self.num_top_down
x = self.conv1x1[0](x)
# TOP-DOWN PASS
for ndx, tconv in enumerate(self.tconvs):
x = tconv(x) # Upsample using transposed convolution
x = x + self.conv1x1[ndx+1](feature_maps[-ndx - 1])
return x
| 4,364 | 44.947368 | 125 | py |
MinkLoc3D | MinkLoc3D-master/models/loss.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import numpy as np
import torch
from pytorch_metric_learning import losses, miners, reducers
from pytorch_metric_learning.distances import LpDistance
def make_loss(params):
if params.loss == 'BatchHardTripletMarginLoss':
# BatchHard mining with triplet margin loss
# Expects input: embeddings, positives_mask, negatives_mask
loss_fn = BatchHardTripletLossWithMasks(params.margin, params.normalize_embeddings)
elif params.loss == 'BatchHardContrastiveLoss':
loss_fn = BatchHardContrastiveLossWithMasks(params.pos_margin, params.neg_margin, params.normalize_embeddings)
else:
print('Unknown loss: {}'.format(params.loss))
raise NotImplementedError
return loss_fn
class HardTripletMinerWithMasks:
# Hard triplet miner
def __init__(self, distance):
self.distance = distance
# Stats
self.max_pos_pair_dist = None
self.max_neg_pair_dist = None
self.mean_pos_pair_dist = None
self.mean_neg_pair_dist = None
self.min_pos_pair_dist = None
self.min_neg_pair_dist = None
def __call__(self, embeddings, positives_mask, negatives_mask):
assert embeddings.dim() == 2
d_embeddings = embeddings.detach()
with torch.no_grad():
hard_triplets = self.mine(d_embeddings, positives_mask, negatives_mask)
return hard_triplets
def mine(self, embeddings, positives_mask, negatives_mask):
# Based on pytorch-metric-learning implementation
dist_mat = self.distance(embeddings)
(hardest_positive_dist, hardest_positive_indices), a1p_keep = get_max_per_row(dist_mat, positives_mask)
(hardest_negative_dist, hardest_negative_indices), a2n_keep = get_min_per_row(dist_mat, negatives_mask)
a_keep_idx = torch.where(a1p_keep & a2n_keep)
a = torch.arange(dist_mat.size(0)).to(hardest_positive_indices.device)[a_keep_idx]
p = hardest_positive_indices[a_keep_idx]
n = hardest_negative_indices[a_keep_idx]
self.max_pos_pair_dist = torch.max(hardest_positive_dist).item()
self.max_neg_pair_dist = torch.max(hardest_negative_dist).item()
self.mean_pos_pair_dist = torch.mean(hardest_positive_dist).item()
self.mean_neg_pair_dist = torch.mean(hardest_negative_dist).item()
self.min_pos_pair_dist = torch.min(hardest_positive_dist).item()
self.min_neg_pair_dist = torch.min(hardest_negative_dist).item()
return a, p, n
def get_max_per_row(mat, mask):
non_zero_rows = torch.any(mask, dim=1)
mat_masked = mat.clone()
mat_masked[~mask] = 0
return torch.max(mat_masked, dim=1), non_zero_rows
def get_min_per_row(mat, mask):
non_inf_rows = torch.any(mask, dim=1)
mat_masked = mat.clone()
mat_masked[~mask] = float('inf')
return torch.min(mat_masked, dim=1), non_inf_rows
class BatchHardTripletLossWithMasks:
def __init__(self, margin, normalize_embeddings):
self.margin = margin
self.normalize_embeddings = normalize_embeddings
self.distance = LpDistance(normalize_embeddings=normalize_embeddings, collect_stats=True)
# We use triplet loss with Euclidean distance
self.miner_fn = HardTripletMinerWithMasks(distance=self.distance)
reducer_fn = reducers.AvgNonZeroReducer(collect_stats=True)
self.loss_fn = losses.TripletMarginLoss(margin=self.margin, swap=True, distance=self.distance,
reducer=reducer_fn, collect_stats=True)
def __call__(self, embeddings, positives_mask, negatives_mask):
hard_triplets = self.miner_fn(embeddings, positives_mask, negatives_mask)
dummy_labels = torch.arange(embeddings.shape[0]).to(embeddings.device)
loss = self.loss_fn(embeddings, dummy_labels, hard_triplets)
stats = {'loss': loss.item(), 'avg_embedding_norm': self.loss_fn.distance.final_avg_query_norm,
'num_non_zero_triplets': self.loss_fn.reducer.triplets_past_filter,
'num_triplets': len(hard_triplets[0]),
'mean_pos_pair_dist': self.miner_fn.mean_pos_pair_dist,
'mean_neg_pair_dist': self.miner_fn.mean_neg_pair_dist,
'max_pos_pair_dist': self.miner_fn.max_pos_pair_dist,
'max_neg_pair_dist': self.miner_fn.max_neg_pair_dist,
'min_pos_pair_dist': self.miner_fn.min_pos_pair_dist,
'min_neg_pair_dist': self.miner_fn.min_neg_pair_dist
}
return loss, stats, hard_triplets
class BatchHardContrastiveLossWithMasks:
def __init__(self, pos_margin, neg_margin, normalize_embeddings):
self.pos_margin = pos_margin
self.neg_margin = neg_margin
self.distance = LpDistance(normalize_embeddings=normalize_embeddings, collect_stats=True)
self.miner_fn = HardTripletMinerWithMasks(distance=self.distance)
# We use contrastive loss with squared Euclidean distance
reducer_fn = reducers.AvgNonZeroReducer(collect_stats=True)
self.loss_fn = losses.ContrastiveLoss(pos_margin=self.pos_margin, neg_margin=self.neg_margin,
distance=self.distance, reducer=reducer_fn, collect_stats=True)
def __call__(self, embeddings, positives_mask, negatives_mask):
hard_triplets = self.miner_fn(embeddings, positives_mask, negatives_mask)
dummy_labels = torch.arange(embeddings.shape[0]).to(embeddings.device)
loss = self.loss_fn(embeddings, dummy_labels, hard_triplets)
stats = {'loss': loss.item(), 'avg_embedding_norm': self.loss_fn.distance.final_avg_query_norm,
'pos_pairs_above_threshold': self.loss_fn.reducer.reducers['pos_loss'].pos_pairs_above_threshold,
'neg_pairs_above_threshold': self.loss_fn.reducer.reducers['neg_loss'].neg_pairs_above_threshold,
'pos_loss': self.loss_fn.reducer.reducers['pos_loss'].pos_loss.item(),
'neg_loss': self.loss_fn.reducer.reducers['neg_loss'].neg_loss.item(),
'num_pairs': 2*len(hard_triplets[0]),
'mean_pos_pair_dist': self.miner_fn.mean_pos_pair_dist,
'mean_neg_pair_dist': self.miner_fn.mean_neg_pair_dist,
'max_pos_pair_dist': self.miner_fn.max_pos_pair_dist,
'max_neg_pair_dist': self.miner_fn.max_neg_pair_dist,
'min_pos_pair_dist': self.miner_fn.min_pos_pair_dist,
'min_neg_pair_dist': self.miner_fn.min_neg_pair_dist
}
return loss, stats, hard_triplets
| 6,696 | 49.353383 | 118 | py |
MinkLoc3D | MinkLoc3D-master/models/netvlad.py | # Code taken from PointNetVLAD Pytorch implementation: https://github.com/cattaneod/PointNetVlad-Pytorch
# Adapted by Jacek Komorowski
import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
import math
# NOTE: The toolbox can only pool lists of features of the same length. It was specifically optimized to efficiently
# o so. One way to handle multiple lists of features of variable length is to create, via a data augmentation
# technique, a tensor of shape: 'batch_size'x'max_samples'x'feature_size'. Where 'max_samples' would be the maximum
# number of feature per list. Then for each list, you would fill the tensor with 0 values.
class NetVLADLoupe(nn.Module):
def __init__(self, feature_size, cluster_size, output_dim, gating=True, add_batch_norm=True):
super().__init__()
self.feature_size = feature_size
self.output_dim = output_dim
self.gating = gating
self.add_batch_norm = add_batch_norm
self.cluster_size = cluster_size
self.softmax = nn.Softmax(dim=-1)
self.cluster_weights = nn.Parameter(torch.randn(feature_size, cluster_size) * 1 / math.sqrt(feature_size))
self.cluster_weights2 = nn.Parameter(torch.randn(1, feature_size, cluster_size) * 1 / math.sqrt(feature_size))
self.hidden1_weights = nn.Parameter(
torch.randn(cluster_size * feature_size, output_dim) * 1 / math.sqrt(feature_size))
if add_batch_norm:
self.cluster_biases = None
self.bn1 = nn.BatchNorm1d(cluster_size)
else:
self.cluster_biases = nn.Parameter(torch.randn(cluster_size) * 1 / math.sqrt(feature_size))
self.bn1 = None
self.bn2 = nn.BatchNorm1d(output_dim)
if gating:
self.context_gating = GatingContext(output_dim, add_batch_norm=add_batch_norm)
def forward(self, x):
# Expects (batch_size, num_points, channels) tensor
assert x.dim() == 3
num_points = x.shape[1]
activation = torch.matmul(x, self.cluster_weights)
if self.add_batch_norm:
# activation = activation.transpose(1,2).contiguous()
activation = activation.view(-1, self.cluster_size)
activation = self.bn1(activation)
activation = activation.view(-1, num_points, self.cluster_size)
# activation = activation.transpose(1,2).contiguous()
else:
activation = activation + self.cluster_biases
activation = self.softmax(activation)
activation = activation.view((-1, num_points, self.cluster_size))
a_sum = activation.sum(-2, keepdim=True)
a = a_sum * self.cluster_weights2
activation = torch.transpose(activation, 2, 1)
x = x.view((-1, num_points, self.feature_size))
vlad = torch.matmul(activation, x)
vlad = torch.transpose(vlad, 2, 1)
vlad = vlad - a
vlad = F.normalize(vlad, dim=1, p=2)
vlad = vlad.reshape((-1, self.cluster_size * self.feature_size))
vlad = F.normalize(vlad, dim=1, p=2)
vlad = torch.matmul(vlad, self.hidden1_weights)
vlad = self.bn2(vlad)
if self.gating:
vlad = self.context_gating(vlad)
return vlad
class GatingContext(nn.Module):
def __init__(self, dim, add_batch_norm=True):
super(GatingContext, self).__init__()
self.dim = dim
self.add_batch_norm = add_batch_norm
self.gating_weights = nn.Parameter(
torch.randn(dim, dim) * 1 / math.sqrt(dim))
self.sigmoid = nn.Sigmoid()
if add_batch_norm:
self.gating_biases = None
self.bn1 = nn.BatchNorm1d(dim)
else:
self.gating_biases = nn.Parameter(
torch.randn(dim) * 1 / math.sqrt(dim))
self.bn1 = None
def forward(self, x):
gates = torch.matmul(x, self.gating_weights)
if self.add_batch_norm:
gates = self.bn1(gates)
else:
gates = gates + self.gating_biases
gates = self.sigmoid(gates)
activation = x * gates
return activation
class MinkNetVladWrapper(torch.nn.Module):
# Wrapper around NetVlad class to process sparse tensors from Minkowski networks
def __init__(self, feature_size, output_dim, cluster_size=64, gating=True):
super().__init__()
self.feature_size = feature_size
self.output_dim = output_dim
self.net_vlad = NetVLADLoupe(feature_size=feature_size, cluster_size=cluster_size, output_dim=output_dim,
gating=gating, add_batch_norm=True)
def forward(self, x):
# x is SparseTensor
assert x.F.shape[1] == self.feature_size
features = x.decomposed_features
# features is a list of (n_points, feature_size) tensors with variable number of points
batch_size = len(features)
features = torch.nn.utils.rnn.pad_sequence(features, batch_first=True)
# features is (batch_size, n_points, feature_size) tensor padded with zeros
x = self.net_vlad(features)
assert x.shape[0] == batch_size
assert x.shape[1] == self.output_dim
return x # Return (batch_size, output_dim) tensor
| 5,287 | 38.17037 | 118 | py |
MinkLoc3D | MinkLoc3D-master/datasets/dataset_utils.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import numpy as np
import torch
from torch.utils.data import DataLoader
import MinkowskiEngine as ME
from datasets.oxford import OxfordDataset, TrainTransform, TrainSetTransform
from datasets.samplers import BatchSampler
from misc.utils import MinkLocParams
def make_datasets(params: MinkLocParams, debug=False):
# Create training and validation datasets
datasets = {}
train_transform = TrainTransform(params.aug_mode)
train_set_transform = TrainSetTransform(params.aug_mode)
datasets['train'] = OxfordDataset(params.dataset_folder, params.train_file, train_transform,
set_transform=train_set_transform)
val_transform = None
if params.val_file is not None:
datasets['val'] = OxfordDataset(params.dataset_folder, params.val_file, val_transform)
return datasets
def make_collate_fn(dataset: OxfordDataset, mink_quantization_size=None):
# set_transform: the transform to be applied to all batch elements
def collate_fn(data_list):
# Constructs a batch object
clouds = [e[0] for e in data_list]
labels = [e[1] for e in data_list]
batch = torch.stack(clouds, dim=0) # Produces (batch_size, n_points, 3) tensor
if dataset.set_transform is not None:
# Apply the same transformation on all dataset elements
batch = dataset.set_transform(batch)
if mink_quantization_size is None:
# Not a MinkowskiEngine based model
batch = {'cloud': batch}
else:
coords = [ME.utils.sparse_quantize(coordinates=e, quantization_size=mink_quantization_size)
for e in batch]
coords = ME.utils.batched_coordinates(coords)
# Assign a dummy feature equal to 1 to each point
# Coords must be on CPU, features can be on GPU - see MinkowskiEngine documentation
feats = torch.ones((coords.shape[0], 1), dtype=torch.float32)
batch = {'coords': coords, 'features': feats}
# Compute positives and negatives mask
# Compute positives and negatives mask
positives_mask = [[in_sorted_array(e, dataset.queries[label].positives) for e in labels] for label in labels]
negatives_mask = [[not in_sorted_array(e, dataset.queries[label].non_negatives) for e in labels] for label in labels]
positives_mask = torch.tensor(positives_mask)
negatives_mask = torch.tensor(negatives_mask)
# Returns (batch_size, n_points, 3) tensor and positives_mask and
# negatives_mask which are batch_size x batch_size boolean tensors
return batch, positives_mask, negatives_mask
return collate_fn
def make_dataloaders(params: MinkLocParams, debug=False):
"""
Create training and validation dataloaders that return groups of k=2 similar elements
:param train_params:
:param model_params:
:return:
"""
datasets = make_datasets(params, debug=debug)
dataloders = {}
train_sampler = BatchSampler(datasets['train'], batch_size=params.batch_size,
batch_size_limit=params.batch_size_limit,
batch_expansion_rate=params.batch_expansion_rate)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
train_collate_fn = make_collate_fn(datasets['train'], params.model_params.mink_quantization_size)
dataloders['train'] = DataLoader(datasets['train'], batch_sampler=train_sampler, collate_fn=train_collate_fn,
num_workers=params.num_workers, pin_memory=True)
if 'val' in datasets:
val_sampler = BatchSampler(datasets['val'], batch_size=params.batch_size)
# Collate function collates items into a batch and applies a 'set transform' on the entire batch
# Currently validation dataset has empty set_transform function, but it may change in the future
val_collate_fn = make_collate_fn(datasets['val'], params.model_params.mink_quantization_size)
dataloders['val'] = DataLoader(datasets['val'], batch_sampler=val_sampler, collate_fn=val_collate_fn,
num_workers=params.num_workers, pin_memory=True)
return dataloders
def in_sorted_array(e: int, array: np.ndarray) -> bool:
pos = np.searchsorted(array, e)
if pos == len(array) or pos == -1:
return False
else:
return array[pos] == e
| 4,547 | 44.48 | 125 | py |
MinkLoc3D | MinkLoc3D-master/datasets/oxford.py | # Author: Jacek Komorowski
# Warsaw University of Technology
# Dataset wrapper for Oxford laser scans dataset from PointNetVLAD project
# For information on dataset see: https://github.com/mikacuy/pointnetvlad
import os
import pickle
import numpy as np
import math
from scipy.linalg import expm, norm
import random
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import tqdm
class OxfordDataset(Dataset):
"""
Dataset wrapper for Oxford laser scans dataset from PointNetVLAD project.
"""
def __init__(self, dataset_path: str, query_filename: str, image_path: str = None,
lidar2image_ndx=None, transform=None, set_transform=None, image_transform=None, use_cloud=True):
assert os.path.exists(dataset_path), 'Cannot access dataset path: {}'.format(dataset_path)
self.dataset_path = dataset_path
self.query_filepath = os.path.join(dataset_path, query_filename)
assert os.path.exists(self.query_filepath), 'Cannot access query file: {}'.format(self.query_filepath)
self.transform = transform
self.set_transform = set_transform
self.queries: Dict[int, TrainingTuple] = pickle.load(open(self.query_filepath, 'rb'))
self.image_path = image_path
self.lidar2image_ndx = lidar2image_ndx
self.image_transform = image_transform
self.n_points = 4096 # pointclouds in the dataset are downsampled to 4096 points
self.image_ext = '.png'
self.use_cloud = use_cloud
print('{} queries in the dataset'.format(len(self)))
def __len__(self):
return len(self.queries)
def __getitem__(self, ndx):
# Load point cloud and apply transform
file_pathname = os.path.join(self.dataset_path, self.queries[ndx].rel_scan_filepath)
query_pc = self.load_pc(file_pathname)
if self.transform is not None:
query_pc = self.transform(query_pc)
return query_pc, ndx
def get_positives(self, ndx):
return self.queries[ndx].positives
def get_non_negatives(self, ndx):
return self.queries[ndx].non_negatives
def load_pc(self, filename):
# Load point cloud, does not apply any transform
# Returns Nx3 matrix
file_path = os.path.join(self.dataset_path, filename)
pc = np.fromfile(file_path, dtype=np.float64)
# coords are within -1..1 range in each dimension
assert pc.shape[0] == self.n_points * 3, "Error in point cloud shape: {}".format(file_path)
pc = np.reshape(pc, (pc.shape[0] // 3, 3))
pc = torch.tensor(pc, dtype=torch.float)
return pc
class TrainingTuple:
# Tuple describing an element for training/validation
def __init__(self, id: int, timestamp: int, rel_scan_filepath: str, positives: np.ndarray,
non_negatives: np.ndarray, position: np.ndarray):
# id: element id (ids start from 0 and are consecutive numbers)
# ts: timestamp
# rel_scan_filepath: relative path to the scan
# positives: sorted ndarray of positive elements id
# negatives: sorted ndarray of elements id
# position: x, y position in meters (northing, easting)
assert position.shape == (2,)
self.id = id
self.timestamp = timestamp
self.rel_scan_filepath = rel_scan_filepath
self.positives = positives
self.non_negatives = non_negatives
self.position = position
class TrainTransform:
def __init__(self, aug_mode):
# 1 is default mode, no transform
self.aug_mode = aug_mode
if self.aug_mode == 1:
t = [JitterPoints(sigma=0.001, clip=0.002), RemoveRandomPoints(r=(0.0, 0.1)),
RandomTranslation(max_delta=0.01), RemoveRandomBlock(p=0.4)]
else:
raise NotImplementedError('Unknown aug_mode: {}'.format(self.aug_mode))
self.transform = transforms.Compose(t)
def __call__(self, e):
if self.transform is not None:
e = self.transform(e)
return e
class TrainSetTransform:
def __init__(self, aug_mode):
# 1 is default mode, no transform
self.aug_mode = aug_mode
self.transform = None
t = [RandomRotation(max_theta=5, max_theta2=0, axis=np.array([0, 0, 1])),
RandomFlip([0.25, 0.25, 0.])]
self.transform = transforms.Compose(t)
def __call__(self, e):
if self.transform is not None:
e = self.transform(e)
return e
class RandomFlip:
def __init__(self, p):
# p = [p_x, p_y, p_z] probability of flipping each axis
assert len(p) == 3
assert 0 < sum(p) <= 1, 'sum(p) must be in (0, 1] range, is: {}'.format(sum(p))
self.p = p
self.p_cum_sum = np.cumsum(p)
def __call__(self, coords):
r = random.random()
if r <= self.p_cum_sum[0]:
# Flip the first axis
coords[..., 0] = -coords[..., 0]
elif r <= self.p_cum_sum[1]:
# Flip the second axis
coords[..., 1] = -coords[..., 1]
elif r <= self.p_cum_sum[2]:
# Flip the third axis
coords[..., 2] = -coords[..., 2]
return coords
class RandomRotation:
def __init__(self, axis=None, max_theta=180, max_theta2=15):
self.axis = axis
self.max_theta = max_theta # Rotation around axis
self.max_theta2 = max_theta2 # Smaller rotation in random direction
def _M(self, axis, theta):
return expm(np.cross(np.eye(3), axis / norm(axis) * theta)).astype(np.float32)
def __call__(self, coords):
if self.axis is not None:
axis = self.axis
else:
axis = np.random.rand(3) - 0.5
R = self._M(axis, (np.pi * self.max_theta / 180) * 2 * (np.random.rand(1) - 0.5))
if self.max_theta2 is None:
coords = coords @ R
else:
R_n = self._M(np.random.rand(3) - 0.5, (np.pi * self.max_theta2 / 180) * 2 * (np.random.rand(1) - 0.5))
coords = coords @ R @ R_n
return coords
class RandomTranslation:
def __init__(self, max_delta=0.05):
self.max_delta = max_delta
def __call__(self, coords):
trans = self.max_delta * np.random.randn(1, 3)
return coords + trans.astype(np.float32)
class RandomScale:
def __init__(self, min, max):
self.scale = max - min
self.bias = min
def __call__(self, coords):
s = self.scale * np.random.rand(1) + self.bias
return coords * s.astype(np.float32)
class RandomShear:
def __init__(self, delta=0.1):
self.delta = delta
def __call__(self, coords):
T = np.eye(3) + self.delta * np.random.randn(3, 3)
return coords @ T.astype(np.float32)
class JitterPoints:
def __init__(self, sigma=0.01, clip=None, p=1.):
assert 0 < p <= 1.
assert sigma > 0.
self.sigma = sigma
self.clip = clip
self.p = p
def __call__(self, e):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
sample_shape = (e.shape[0],)
if self.p < 1.:
# Create a mask for points to jitter
m = torch.distributions.categorical.Categorical(probs=torch.tensor([1 - self.p, self.p]))
mask = m.sample(sample_shape=sample_shape)
else:
mask = torch.ones(sample_shape, dtype=torch.int64 )
mask = mask == 1
jitter = self.sigma * torch.randn_like(e[mask])
if self.clip is not None:
jitter = torch.clamp(jitter, min=-self.clip, max=self.clip)
e[mask] = e[mask] + jitter
return e
class RemoveRandomPoints:
def __init__(self, r):
if type(r) is list or type(r) is tuple:
assert len(r) == 2
assert 0 <= r[0] <= 1
assert 0 <= r[1] <= 1
self.r_min = float(r[0])
self.r_max = float(r[1])
else:
assert 0 <= r <= 1
self.r_min = None
self.r_max = float(r)
def __call__(self, e):
n = len(e)
if self.r_min is None:
r = self.r_max
else:
# Randomly select removal ratio
r = random.uniform(self.r_min, self.r_max)
mask = np.random.choice(range(n), size=int(n*r), replace=False) # select elements to remove
e[mask] = torch.zeros_like(e[mask])
return e
class RemoveRandomBlock:
"""
Randomly remove part of the point cloud. Similar to PyTorch RandomErasing but operating on 3D point clouds.
Erases fronto-parallel cuboid.
Instead of erasing we set coords of removed points to (0, 0, 0) to retain the same number of points
"""
def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3)):
self.p = p
self.scale = scale
self.ratio = ratio
def get_params(self, coords):
# Find point cloud 3D bounding box
flattened_coords = coords.view(-1, 3)
min_coords, _ = torch.min(flattened_coords, dim=0)
max_coords, _ = torch.max(flattened_coords, dim=0)
span = max_coords - min_coords
area = span[0] * span[1]
erase_area = random.uniform(self.scale[0], self.scale[1]) * area
aspect_ratio = random.uniform(self.ratio[0], self.ratio[1])
h = math.sqrt(erase_area * aspect_ratio)
w = math.sqrt(erase_area / aspect_ratio)
x = min_coords[0] + random.uniform(0, 1) * (span[0] - w)
y = min_coords[1] + random.uniform(0, 1) * (span[1] - h)
return x, y, w, h
def __call__(self, coords):
if random.random() < self.p:
x, y, w, h = self.get_params(coords) # Fronto-parallel cuboid to remove
mask = (x < coords[..., 0]) & (coords[..., 0] < x+w) & (y < coords[..., 1]) & (coords[..., 1] < y+h)
coords[mask] = torch.zeros_like(coords[mask])
return coords
| 10,148 | 33.40339 | 115 | py |
MinkLoc3D | MinkLoc3D-master/datasets/samplers.py | # Author: Jacek Komorowski
# Warsaw University of Technology
import random
import copy
from torch.utils.data import Sampler
from datasets.oxford import OxfordDataset
class ListDict(object):
def __init__(self, items=None):
if items is not None:
self.items = copy.deepcopy(items)
self.item_to_position = {item: ndx for ndx, item in enumerate(items)}
else:
self.items = []
self.item_to_position = {}
def add(self, item):
if item in self.item_to_position:
return
self.items.append(item)
self.item_to_position[item] = len(self.items)-1
def remove(self, item):
position = self.item_to_position.pop(item)
last_item = self.items.pop()
if position != len(self.items):
self.items[position] = last_item
self.item_to_position[last_item] = position
def choose_random(self):
return random.choice(self.items)
def __contains__(self, item):
return item in self.item_to_position
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
class BatchSampler(Sampler):
# Sampler returning list of indices to form a mini-batch
# Samples elements in groups consisting of k=2 similar elements (positives)
# Batch has the following structure: item1_1, ..., item1_k, item2_1, ... item2_k, itemn_1, ..., itemn_k
def __init__(self, dataset: OxfordDataset, batch_size: int, batch_size_limit: int = None,
batch_expansion_rate: float = None, max_batches: int = None):
if batch_expansion_rate is not None:
assert batch_expansion_rate > 1., 'batch_expansion_rate must be greater than 1'
assert batch_size <= batch_size_limit, 'batch_size_limit must be greater or equal to batch_size'
self.batch_size = batch_size
self.batch_size_limit = batch_size_limit
self.batch_expansion_rate = batch_expansion_rate
self.max_batches = max_batches
self.dataset = dataset
self.k = 2 # Number of positive examples per group must be 2
if self.batch_size < 2 * self.k:
self.batch_size = 2 * self.k
print('WARNING: Batch too small. Batch size increased to {}.'.format(self.batch_size))
self.batch_idx = [] # Index of elements in each batch (re-generated every epoch)
self.elems_ndx = list(self.dataset.queries) # List of point cloud indexes
def __iter__(self):
# Re-generate batches every epoch
self.generate_batches()
for batch in self.batch_idx:
yield batch
def __len(self):
return len(self.batch_idx)
def expand_batch(self):
if self.batch_expansion_rate is None:
print('WARNING: batch_expansion_rate is None')
return
if self.batch_size >= self.batch_size_limit:
return
old_batch_size = self.batch_size
self.batch_size = int(self.batch_size * self.batch_expansion_rate)
self.batch_size = min(self.batch_size, self.batch_size_limit)
print('=> Batch size increased from: {} to {}'.format(old_batch_size, self.batch_size))
def generate_batches(self):
# Generate training/evaluation batches.
# batch_idx holds indexes of elements in each batch as a list of lists
self.batch_idx = []
unused_elements_ndx = ListDict(self.elems_ndx)
current_batch = []
assert self.k == 2, 'sampler can sample only k=2 elements from the same class'
while True:
if len(current_batch) >= self.batch_size or len(unused_elements_ndx) == 0:
# Flush out batch, when it has a desired size, or a smaller batch, when there's no more
# elements to process
if len(current_batch) >= 2*self.k:
# Ensure there're at least two groups of similar elements, otherwise, it would not be possible
# to find negative examples in the batch
assert len(current_batch) % self.k == 0, 'Incorrect bach size: {}'.format(len(current_batch))
self.batch_idx.append(current_batch)
current_batch = []
if (self.max_batches is not None) and (len(self.batch_idx) >= self.max_batches):
break
if len(unused_elements_ndx) == 0:
break
# Add k=2 similar elements to the batch
selected_element = unused_elements_ndx.choose_random()
unused_elements_ndx.remove(selected_element)
positives = self.dataset.get_positives(selected_element)
if len(positives) == 0:
# Broken dataset element without any positives
continue
unused_positives = [e for e in positives if e in unused_elements_ndx]
# If there're unused elements similar to selected_element, sample from them
# otherwise sample from all similar elements
if len(unused_positives) > 0:
second_positive = random.choice(unused_positives)
unused_elements_ndx.remove(second_positive)
else:
second_positive = random.choice(list(positives))
current_batch += [selected_element, second_positive]
for batch in self.batch_idx:
assert len(batch) % self.k == 0, 'Incorrect bach size: {}'.format(len(batch))
| 5,532 | 38.805755 | 114 | py |
MinkLoc3D | MinkLoc3D-master/layers/pooling.py | # Code taken from: https://github.com/filipradenovic/cnnimageretrieval-pytorch
# and ported to MinkowskiEngine by Jacek Komorowski
import torch
import torch.nn as nn
import MinkowskiEngine as ME
class MAC(nn.Module):
def __init__(self):
super().__init__()
self.f = ME.MinkowskiGlobalMaxPooling()
def forward(self, x: ME.SparseTensor):
x = self.f(x)
return x.F # Return (batch_size, n_features) tensor
class SPoC(nn.Module):
def __init__(self):
super().__init__()
self.f = ME.MinkowskiGlobalAvgPooling()
def forward(self, x: ME.SparseTensor):
x = self.f(x)
return x.F # Return (batch_size, n_features) tensor
class GeM(nn.Module):
def __init__(self, p=3, eps=1e-6):
super(GeM, self).__init__()
self.p = nn.Parameter(torch.ones(1) * p)
self.eps = eps
self.f = ME.MinkowskiGlobalAvgPooling()
def forward(self, x: ME.SparseTensor):
# This implicitly applies ReLU on x (clamps negative values)
temp = ME.SparseTensor(x.F.clamp(min=self.eps).pow(self.p), coordinates=x.C)
temp = self.f(temp) # Apply ME.MinkowskiGlobalAvgPooling
return temp.F.pow(1./self.p) # Return (batch_size, n_features) tensor
| 1,280 | 30.243902 | 84 | py |
MRE-ISE | MRE-ISE-main/run.py | import argparse
import logging
import sys
sys.path.append("..")
import torch
import numpy as np
import random
from torchvision import transforms
from torch.utils.data import DataLoader
from cores.gene.model import MRE
from transformers import CLIPProcessor, CLIPModel
from transformers import CLIPConfig
from processor.dataset import MREProcessor, MREDataset
from cores.gene.model import Trainer
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from torch.utils.tensorboard import SummaryWriter
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
MODEL_CLASS = {
'bert': (MREProcessor, MREDataset),
}
DATA_PATH = {
'MRE': {'train': 'data/vsg_tsg/ours_train.json',
'dev': 'data/vsg_tsg/ours_valid.json',
'test': 'data/vsg_tsg/ours_test.json',
'vbow': 'data/vsg_tsg/vbow.pk',
'tbow': 'data/vsg_tsg/tbow.pk'
}
}
IMG_PATH = {
'MRE': {'train': 'data/img_org/train/',
'dev': 'data/img_org/val/',
'test': 'data/img_org/test'}}
def set_seed(seed=2021):
"""set random seed"""
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
np.random.seed(seed)
random.seed(seed)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrain_name', default='openai/clip-vit-base-patch32', type=str, help="The name of pretrained model")
parser.add_argument('--dataset_name', default='MRE', type=str, help="The name of example_dataset.")
parser.add_argument('--num_epochs', default=30, type=int, help="Training epochs")
parser.add_argument('--device', default='cuda', type=str, help="cuda or cpu")
parser.add_argument('--batch_size', default=32, type=int, help="batch size")
parser.add_argument('--lr_pretrained', default=2e-5, type=float, help="pre-trained learning rate")
parser.add_argument('--lr_main', default=2e-4, type=float, help="learning rate")
parser.add_argument('--warmup_ratio', default=0.01, type=float)
parser.add_argument('--eval_begin_epoch', default=1, type=int)
parser.add_argument('--seed', default=1234, type=int, help="random seed, default is 1")
parser.add_argument('--load_path', default=None, type=str, help="Load model from load_path")
parser.add_argument('--save_path', default='ckpt', type=str, help="save model at save_path")
parser.add_argument('--write_path', default=None, type=str, help="do_test=True, predictions will be write in write_path")
parser.add_argument('--notes', default="", type=str, help="input some remarks for making save path dir.")
parser.add_argument('--do_train', action='store_true') # , action='store_true'
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--do_predict', action='store_true')
parser.add_argument('--max_seq', default=40, type=int)
parser.add_argument('--max_obj', default=40, type=int)
parser.add_argument('--hid_size', default=768, type=int, help="hidden state size")
parser.add_argument('--num_layers', default=2, type=int, help="number of refine layers")
parser.add_argument('--beta', default=0.01, type=float, help="Default is 1e-2")
parser.add_argument("--num_per", type=int, default=16, help="Default is 16")
parser.add_argument("--feature_denoise", type=bool, default=True, help="Default is False.")
parser.add_argument("--top_k", type=int, default=10, help="Default is 10.")
parser.add_argument("--epsilon", type=float, default=0.3, help="Default is 0.3.")
parser.add_argument("--temperature", type=float, default=0.1, help="Default is 0.1.")
parser.add_argument("--graph_skip_conn", type=float, default=0.0, help="Default is 0.0.")
parser.add_argument("--graph_include_self", type=bool, default=True, help="Default is True.")
parser.add_argument("--dropout", type=float, default=0.5, help="Default is 0.0")
parser.add_argument("--graph_type", type=str, default="epsilonNN", help="epsilonNN, KNN, prob")
parser.add_argument("--graph_metric_type", type=str, default="multi_mlp")
parser.add_argument("--repar", type=bool, default=True, help="Default is True.")
parser.add_argument("--threshold", type=float, default=0.2, help="Default is 0.2.")
parser.add_argument("--prior_mode", type=str, default="Gaussian", help="Default is Gaussian.")
parser.add_argument("--is_IB", type=bool, default=True, help="Default is True.")
parser.add_argument("--eta1", type=float, default=0.7, help="Default is 0.7")
parser.add_argument("--eta1", type=float, default=0.9, help="Default is 0.9")
parser.add_argument("--text_bow_size", type=int, default=2000, help="Default is 2000")
parser.add_argument("--visual_bow_size", type=int, default=2000, help="Default is 2000")
parser.add_argument("--neighbor_num", type=int, default=2, help="Default is 2")
parser.add_argument("--topic_keywords_number", type=int, default=10, help="Default is 10")
parser.add_argument("--topic_number", type=int, default=10, help="Default is 10")
args = parser.parse_args()
data_path, img_path, aux_path = DATA_PATH[args.dataset_name], IMG_PATH[args.dataset_name], AUX_PATH[args.dataset_name]
data_process, dataset_class = MODEL_CLASS[args.model_name]
re_path = 'data/ours_rel2id.json'
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])])
set_seed(args.seed) # set seed, default is 1
if args.save_path is not None: # make save_path dir
args.save_path = os.path.join(args.save_path, args.model_name, args.dataset_name+"_"+str(args.batch_size)+"_"+str(args.lr)+"_"+args.notes)
if not os.path.exists(args.save_path):
os.makedirs(args.save_path, exist_ok=True)
print(args)
logdir = "logs/" + args.model_name + "_"+args.dataset_name + "_"+str(args.batch_size) + "_" + str(args.lr) + args.notes
writer = SummaryWriter(log_dir=logdir)
if args.do_train:
clip_vit, clip_processor, aux_processor, rcnn_processor = None, None, None, None
clip_processor = CLIPProcessor.from_pretrained(args.vit_name)
aux_processor = CLIPProcessor.from_pretrained(args.vit_name)
aux_processor.feature_extractor.size, aux_processor.feature_extractor.crop_size = args.aux_size, args.aux_size
rcnn_processor = CLIPProcessor.from_pretrained(args.vit_name)
rcnn_processor.feature_extractor.size, rcnn_processor.feature_extractor.crop_size = args.rcnn_size, args.rcnn_size
clip_model = CLIPModel.from_pretrained(args.vit_name)
clip_vit = clip_model.vision_model
clip_text = clip_model.text_model
processor = data_process(data_path, re_path, args.bert_name, args.vit_name, clip_processor=clip_processor, aux_processor=aux_processor, rcnn_processor=rcnn_processor)
train_dataset = dataset_class(processor, transform, img_path, aux_path, args.max_seq, aux_size=args.aux_size, rcnn_size=args.rcnn_size, mode='train', max_obj_num=args.max_obj_num)
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=4, pin_memory=True)
dev_dataset = dataset_class(processor, transform, img_path, aux_path, args.max_seq, aux_size=args.aux_size, rcnn_size=args.rcnn_size, mode='dev', max_obj_num=args.max_obj_num)
dev_dataloader = DataLoader(dev_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
test_dataset = dataset_class(processor, transform, img_path, aux_path, args.max_seq, aux_size=args.aux_size, rcnn_size=args.rcnn_size, mode='test', max_obj_num=args.max_obj_num)
test_dataloader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=4, pin_memory=True)
re_dict = processor.get_relation_dict()
num_labels = len(re_dict)
tokenizer = processor.tokenizer
# test
vision_config = CLIPConfig.from_pretrained(args.vit_name).vision_config
text_config = CLIPConfig.from_pretrained(args.vit_name).text_config
model = MRE(args, vision_config, text_config, clip_vit, clip_text, num_labels,
args.text_bow_size, args.visual_bow_size, tokenizer, processor)
trainer = Trainer(train_data=train_dataloader, dev_data=dev_dataloader, test_data=test_dataloader, re_dict=re_dict, model=model, args=args, logger=logger, writer=writer)
trainer.train()
torch.cuda.empty_cache()
writer.close()
if __name__ == "__main__":
main()
| 9,056 | 52.276471 | 187 | py |
MRE-ISE | MRE-ISE-main/VSG/RelTR_parser/visual_scene_graph.py | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.transforms as T
from PIL import Image
import requests
import matplotlib.pyplot as plt
import json
import pickle
import ast
from tqdm import tqdm
from transformers import CLIPProcessor, CLIPModel
import cv2
from models.backbone import Backbone, Joiner
from models.position_encoding import PositionEmbeddingSine
from models.transformer import Transformer
from models.reltr import RelTR
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
CLASSES = ['N/A', 'airplane', 'animal', 'arm', 'bag', 'banana', 'basket', 'beach', 'bear', 'bed', 'bench', 'bike',
'bird', 'board', 'boat', 'book', 'boot', 'bottle', 'bowl', 'box', 'boy', 'branch', 'building',
'bus', 'cabinet', 'cap', 'car', 'cat', 'chair', 'child', 'clock', 'coat', 'counter', 'cow', 'cup',
'curtain', 'desk', 'dog', 'door', 'drawer', 'ear', 'elephant', 'engine', 'eye', 'face', 'fence',
'finger', 'flag', 'flower', 'food', 'fork', 'fruit', 'giraffe', 'girl', 'glass', 'glove', 'guy',
'hair', 'hand', 'handle', 'hat', 'head', 'helmet', 'hill', 'horse', 'house', 'jacket', 'jean',
'kid', 'kite', 'lady', 'lamp', 'laptop', 'leaf', 'leg', 'letter', 'light', 'logo', 'man', 'men',
'motorcycle', 'mountain', 'mouth', 'neck', 'nose', 'number', 'orange', 'pant', 'paper', 'paw',
'people', 'person', 'phone', 'pillow', 'pizza', 'plane', 'plant', 'plate', 'player', 'pole', 'post',
'pot', 'racket', 'railing', 'rock', 'roof', 'room', 'screen', 'seat', 'sheep', 'shelf', 'shirt',
'shoe', 'short', 'sidewalk', 'sign', 'sink', 'skateboard', 'ski', 'skier', 'sneaker', 'snow',
'sock', 'stand', 'street', 'surfboard', 'table', 'tail', 'tie', 'tile', 'tire', 'toilet', 'towel',
'tower', 'track', 'train', 'tree', 'truck', 'trunk', 'umbrella', 'vase', 'vegetable', 'vehicle',
'wave', 'wheel', 'window', 'windshield', 'wing', 'wire', 'woman', 'zebra']
REL_CLASSES = ['__background__', 'above', 'across', 'against', 'along', 'and', 'at', 'attached to', 'behind',
'belonging to', 'between', 'carrying', 'covered in', 'covering', 'eating', 'flying in', 'for',
'from', 'growing on', 'hanging from', 'has', 'holding', 'in', 'in front of', 'laying on',
'looking at', 'lying on', 'made of', 'mounted on', 'near', 'of', 'on', 'on back of', 'over',
'painted on', 'parked on', 'part of', 'playing', 'riding', 'says', 'sitting on', 'standing on',
'to', 'under', 'using', 'walking in', 'walking on', 'watching', 'wearing', 'wears', 'with']
# for output bounding box post-processing
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=1)
def rescale_bboxes(out_bbox, size):
img_w, img_h = size
b = box_cxcywh_to_xyxy(out_bbox)
x = torch.tensor([img_w, img_h, img_w, img_h], dtype=out_bbox.dtype).to(torch.get_device(out_bbox))
b = b * x
return b
def find_repeat(new_bbox, bbox):
flag = 0
index = 0
def get_list(x):
return [i for i in range(max(0, x-5), x+5)]
for idx, b in enumerate(bbox):
if (new_bbox[0] in get_list(b[0])) and (new_bbox[1] in get_list(b[1])) and (new_bbox[2] in get_list(b[2])) and (new_bbox[3] in get_list(b[3])):
flag = 1
index = idx
break
return flag, index
def construct_scene_graph(data, original_img_dir, target_file, mode='train'):
position_embedding = PositionEmbeddingSine(128, normalize=True)
backbone = Backbone('resnet50', False, False, False)
backbone = Joiner(backbone, position_embedding)
backbone.num_channels = 2048
transformer = Transformer(d_model=256, dropout=0.1, nhead=8,
dim_feedforward=2048,
num_encoder_layers=6,
num_decoder_layers=6,
normalize_before=False,
return_intermediate_dec=True)
model = RelTR(backbone, transformer, num_classes=151, num_rel_classes=51,
num_entities=100, num_triplets=200)
# The checkpoint is pretrained on Visual Genome
ckpt = torch.hub.load_state_dict_from_url(
url='https://cloud.tnt.uni-hannover.de/index.php/s/PB8xTKspKZF7fyK/download/checkpoint0149.pth',
# map_location=lambda storage, loc: storage.cuda(0),
check_hash=True)
# map_location='cpu'
model.load_state_dict(ckpt['model'])
model.to(torch.device('cuda'))
# Some transformation functions
transform = T.Compose([
T.Resize(800),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# img_list = os.listdir(dirname)
# img_list = []
# for d in data:
# img_list.append(d['img_id'])
# img_list = ['twitter_stream_2018_10_10_25_0_2_7.jpg']
res_list = []
with torch.no_grad():
model.eval()
for d in tqdm(data, total=len(data)):
# print("1:{}".format(torch.cuda.memory_allocated(0)))
res = dict()
img_id = d['img_id']
# res['img'] = i
im = Image.open(os.path.join(original_img_dir, mode, img_id))
img = transform(im).unsqueeze(0)
img = img.to(torch.device('cuda'))
# propagate through the model
outputs = model(img)
# outputs = dict()
# for k, v in res.items():
# outputs[k] = v.to(torch.device('cpu'))
keep_thresh = 0.35
# keep only predictions with >0.3 confidence
probas = outputs['rel_logits'].softmax(-1)[0, :, :-1]
probas_sub = outputs['sub_logits'].softmax(-1)[0, :, :-1]
probas_obj = outputs['obj_logits'].softmax(-1)[0, :, :-1]
keep = torch.logical_and(probas.max(-1).values > keep_thresh,
torch.logical_and(probas_sub.max(-1).values > keep_thresh,
probas_obj.max(-1).values > keep_thresh))
# convert boxes from [0; 1] to image scales
sub_bboxes_scaled = rescale_bboxes(outputs['sub_boxes'][0, keep], im.size)
obj_bboxes_scaled = rescale_bboxes(outputs['obj_boxes'][0, keep], im.size)
# topk = 10 # display up to 10 images
keep_queries = torch.nonzero(keep, as_tuple=True)[0]
topk = keep_queries.size(0) # display up to 10 images
indices = torch.argsort(
-probas[keep_queries].max(-1)[0] * probas_sub[keep_queries].max(-1)[0] *
probas_obj[keep_queries].max(-1)[
0])[
:topk]
keep_queries = keep_queries[indices]
bbox = [[0, 0, im.width, im.height]]
bbox_attri = ['img']
# s_bbox = []
# s_bbox_attri = []
# o_bbox = []
# o_bbox_attri = []
rel = [{'s_index': 0, 'o_index': 0, 'name': 'self'}]
for idx, s, o in zip(keep_queries, sub_bboxes_scaled[indices], obj_bboxes_scaled[indices]):
(sxmin, symin, sxmax, symax) = [max(0, round(x)) for x in s.tolist()]
sxmax, symax = min(im.width, sxmax), min(im.height, symax)
(oxmin, oymin, oxmax, oymax) = [max(0, round(x)) for x in o.tolist()]
oxmax, oymax = min(im.width, oxmax), min(im.height, oymax)
# new_img = im.new('RGB', size=s)
_flag, _idx = find_repeat((sxmin, symin, sxmax, symax), bbox)
if not _flag:
bbox.append((sxmin, symin, sxmax, symax))
bbox_attri.append(CLASSES[probas_sub[idx].argmax()])
s_index = len(bbox)-1
else:
s_index = _idx
_flag, _idx = find_repeat((oxmin, oymin, oxmax, oymax), bbox)
if not _flag:
bbox.append((oxmin, oymin, oxmax, oymax))
bbox_attri.append(CLASSES[probas_sub[idx].argmax()])
o_index = len(bbox) - 1
else:
o_index = _idx
# if (sxmin, symin, sxmax, symax) not in bbox:
# bbox.append((sxmin, symin, sxmax, symax))
# bbox_attri.append(CLASSES[probas_sub[idx].argmax()])
# if (oxmin, oymin, oxmax, oymax) not in bbox:
# bbox.append((oxmin, oymin, oxmax, oymax))
# bbox_attri.append(CLASSES[probas_obj[idx].argmax()])
# s_index = bbox.index((sxmin, symin, sxmax, symax))
# o_index = bbox.index((oxmin, oymin, oxmax, oymax))
rel_attri = REL_CLASSES[probas[idx].argmax()]
rel.append({'s_index': s_index, 'o_index': o_index, 'name': rel_attri})
# res['s_bbox'] = s_bbox
# res['s_bbox_attri'] = s_bbox_attri
# res['o_bbox'] = o_bbox
# res['o_bbox_attri'] = o_bbox_attri
res['bbox'] = bbox
res['bbox_attri'] = bbox_attri
res['rel'] = rel
d['VSG'] = res
res_list.append(d)
# torch.empty_cache()
assert len(data) == len(res_list)
with open(target_file, 'w', encoding='utf-8') as f:
json.dump(res_list, f)
def get_obj_features(dirname):
vision_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
if torch.cuda.is_available():
vision_model.to(torch.device('cuda'))
with open(os.path.join(os.path.dirname(dirname), os.path.basename(dirname).split('.')[0] + '.json')) as f:
data = json.load(f)
mode = os.path.basename(dirname).split('.')[0].split('_')[1]
with torch.no_grad():
vision_model.eval()
for d in tqdm(data, total=len(data)):
imgid = d['img']
img_path = os.path.join('../../data/img_org', mode, imgid)
bbox = d['bbox']
features = []
for b in bbox:
crop_img = cv2.imread(img_path)
# print(crop_img.shape)
try:
crop_region = crop_img[b[1]:b[3], b[0]:b[2]]
except TypeError as e:
print(e)
print(bbox)
print(b)
print(imgid)
exit(0)
im = Image.fromarray(crop_region, mode="RGB")
# print(im.size)
images = processor(images=im, return_tensors="pt")
images = images.to(torch.device('cuda'))
image_features = vision_model.get_image_features(**images).squeeze()
features.append(image_features.tolist())
d['features'] = features
with open(os.path.join(os.path.dirname(dirname), os.path.basename(dirname).split('.')[0] + '.pk'), 'wb') as fout:
pickle.dump(data, fout)
if __name__ == '__main__':
FILE_DIR = '../data/tsg/'
IMG_DIR = '../data/img_org/'
DIST_DIR = '../data/vsg_tsg/'
for i in ['ours_train.json', 'ours_val.json', 'ours_test.json']:
print(f'parsing {i} ... ')
with open(os.path.join(FILE_DIR, i)) as f:
data = json.load(f)
base_name = os.path.basename(i).split('.')[0]
# dirname = os.path.join(FILE_DIR, i)
mode = i.split('.')[0].split('_')[1]
print(mode)
construct_scene_graph(data, original_img_dir=IMG_DIR, target_file=os.path.join(DIST_DIR, f'{base_name}.json'),
mode=mode)
# get_obj_features(dirname)
# data = [1, 2, 3]
# output_file = os.path.join(os.path.dirname(dirname), os.path.basename(dirname).split('.')[0] + '.json')
# print(output_file)
# with open(output_file, 'w', encoding='utf-8') as f:
# json.dump(data, f)
# with open(os.path.join(os.path.dirname(dirname), os.path.basename(dirname).split('.')[0] + '.pk'), 'wb') as fout:
# pickle.dump(data, fout)
# x = [{'s_index': 0, 'o_index': 0, 'name': 'self'}, {'s_index': 1, 'o_index': 2, 'name': 'wearing'}, {'s_index': 1, 'o_index': 3, 'name': 'wearing'}, {'s_index': 4, 'o_index': 5, 'name': 'wearing'}, {'s_index': 4, 'o_index': 6, 'name': 'has'}, {'s_index': 4, 'o_index': 7, 'name': 'wearing'}, {'s_index': 8, 'o_index': 9, 'name': 'on'}]
# b = [[i['s_index'], i['o_index']] for i in x]
# print(b)
# with open(os.path.join(os.path.dirname(dirname), os.path.basename(dirname) + '.pk'), 'wb') as fout:
# pickle.dump([1, 2], fout)
| 12,857 | 45.086022 | 341 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/eval_rels.py |
from dataloaders.visual_genome import VGDataLoader, VG
import numpy as np
import torch
from config import ModelConfig
from lib.pytorch_misc import optimistic_restore
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from config import BOX_SCALE, IM_SCALE
import dill as pkl
import os
conf = ModelConfig()
if conf.model == 'motifnet':
from lib.rel_model import RelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RelModel
else:
raise ValueError()
train, val, test = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet')
if conf.test:
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision
)
detector.cuda()
ckpt = torch.load(conf.ckpt)
optimistic_restore(detector, ckpt['state_dict'])
# if conf.mode == 'sgdet':
# det_ckpt = torch.load('checkpoints/new_vgdet/vg-19.tar')['state_dict']
# detector.detector.bbox_fc.weight.data.copy_(det_ckpt['bbox_fc.weight'])
# detector.detector.bbox_fc.bias.data.copy_(det_ckpt['bbox_fc.bias'])
# detector.detector.score_fc.weight.data.copy_(det_ckpt['score_fc.weight'])
# detector.detector.score_fc.bias.data.copy_(det_ckpt['score_fc.bias'])
all_pred_entries = []
def val_batch(batch_num, b, evaluator, thrs=(20, 50, 100)):
det_res = detector[b]
if conf.num_gpus == 1:
det_res = [det_res]
for i, (boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i) in enumerate(det_res):
gt_entry = {
'gt_classes': val.gt_classes[batch_num + i].copy(),
'gt_relations': val.relationships[batch_num + i].copy(),
'gt_boxes': val.gt_boxes[batch_num + i].copy(),
}
assert np.all(objs_i[rels_i[:,0]] > 0) and np.all(objs_i[rels_i[:,1]] > 0)
# assert np.all(rels_i[:,2] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE/IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i,
}
all_pred_entries.append(pred_entry)
evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
evaluator = BasicSceneGraphEvaluator.all_modes(multiple_preds=conf.multi_pred)
if conf.cache is not None and os.path.exists(conf.cache):
print("Found {}! Loading from it".format(conf.cache))
with open(conf.cache,'rb') as f:
all_pred_entries = pkl.load(f)
for i, pred_entry in enumerate(tqdm(all_pred_entries)):
gt_entry = {
'gt_classes': val.gt_classes[i].copy(),
'gt_relations': val.relationships[i].copy(),
'gt_boxes': val.gt_boxes[i].copy(),
}
evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
evaluator[conf.mode].print_stats()
else:
detector.eval()
for val_b, batch in enumerate(tqdm(val_loader)):
val_batch(conf.num_gpus*val_b, batch, evaluator)
evaluator[conf.mode].print_stats()
if conf.cache is not None:
with open(conf.cache,'wb') as f:
pkl.dump(all_pred_entries, f)
| 4,353 | 37.530973 | 89 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/train_detector.py | """
Training script 4 Detection
"""
from dataloaders.mscoco import CocoDetection, CocoDataLoader
from dataloaders.visual_genome import VGDataLoader, VG
from lib.object_detector import ObjectDetector
import numpy as np
from torch import optim
import torch
import pandas as pd
import time
import os
from config import ModelConfig, FG_FRACTION, RPN_FG_FRACTION, IM_SCALE, BOX_SCALE
from torch.nn import functional as F
from lib.fpn.box_utils import bbox_loss
import torch.backends.cudnn as cudnn
from pycocotools.cocoeval import COCOeval
from lib.pytorch_misc import optimistic_restore, clip_grad_norm
from torch.optim.lr_scheduler import ReduceLROnPlateau
cudnn.benchmark = True
conf = ModelConfig()
if conf.coco:
train, val = CocoDetection.splits()
val.ids = val.ids[:conf.val_size]
train.ids = train.ids
train_loader, val_loader = CocoDataLoader.splits(train, val, batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
else:
train, val, _ = VG.splits(num_val_im=conf.val_size, filter_non_overlap=False,
filter_empty_rels=False, use_proposals=conf.use_proposals)
train_loader, val_loader = VGDataLoader.splits(train, val, batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = ObjectDetector(classes=train.ind_to_classes, num_gpus=conf.num_gpus,
mode='rpntrain' if not conf.use_proposals else 'proposals', use_resnet=conf.use_resnet)
detector.cuda()
# Note: if you're doing the stanford setup, you'll need to change this to freeze the lower layers
if conf.use_proposals:
for n, param in detector.named_parameters():
if n.startswith('features'):
param.requires_grad = False
optimizer = optim.SGD([p for p in detector.parameters() if p.requires_grad],
weight_decay=conf.l2, lr=conf.lr * conf.num_gpus * conf.batch_size, momentum=0.9)
scheduler = ReduceLROnPlateau(optimizer, 'max', patience=3, factor=0.1,
verbose=True, threshold=0.001, threshold_mode='abs', cooldown=1)
start_epoch = -1
if conf.ckpt is not None:
ckpt = torch.load(conf.ckpt)
if optimistic_restore(detector, ckpt['state_dict']):
start_epoch = ckpt['epoch']
def train_epoch(epoch_num):
detector.train()
tr = []
start = time.time()
for b, batch in enumerate(train_loader):
tr.append(train_batch(batch))
if b % conf.print_interval == 0 and b >= conf.print_interval:
mn = pd.concat(tr[-conf.print_interval:], axis=1).mean(1)
time_per_batch = (time.time() - start) / conf.print_interval
print("\ne{:2d}b{:5d}/{:5d} {:.3f}s/batch, {:.1f}m/epoch".format(
epoch_num, b, len(train_loader), time_per_batch, len(train_loader) * time_per_batch / 60))
print(mn)
print('-----------', flush=True)
start = time.time()
return pd.concat(tr, axis=1)
def train_batch(b):
"""
:param b: contains:
:param imgs: the image, [batch_size, 3, IM_SIZE, IM_SIZE]
:param all_anchors: [num_anchors, 4] the boxes of all anchors that we'll be using
:param all_anchor_inds: [num_anchors, 2] array of the indices into the concatenated
RPN feature vector that give us all_anchors,
each one (img_ind, fpn_idx)
:param im_sizes: a [batch_size, 4] numpy array of (h, w, scale, num_good_anchors) for each image.
:param num_anchors_per_img: int, number of anchors in total over the feature pyramid per img
Training parameters:
:param train_anchor_inds: a [num_train, 5] array of indices for the anchors that will
be used to compute the training loss (img_ind, fpn_idx)
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:return:
"""
result = detector[b]
scores = result.od_obj_dists
box_deltas = result.od_box_deltas
labels = result.od_obj_labels
roi_boxes = result.od_box_priors
bbox_targets = result.od_box_targets
rpn_scores = result.rpn_scores
rpn_box_deltas = result.rpn_box_deltas
# detector loss
valid_inds = (labels.data != 0).nonzero().squeeze(1)
fg_cnt = valid_inds.size(0)
bg_cnt = labels.size(0) - fg_cnt
class_loss = F.cross_entropy(scores, labels)
# No gather_nd in pytorch so instead convert first 2 dims of tensor to 1d
box_reg_mult = 2 * (1. / FG_FRACTION) * fg_cnt / (fg_cnt + bg_cnt + 1e-4)
twod_inds = valid_inds * box_deltas.size(1) + labels[valid_inds].data
box_loss = bbox_loss(roi_boxes[valid_inds], box_deltas.view(-1, 4)[twod_inds],
bbox_targets[valid_inds]) * box_reg_mult
loss = class_loss + box_loss
# RPN loss
if not conf.use_proposals:
train_anchor_labels = b.train_anchor_labels[:, -1]
train_anchors = b.train_anchors[:, :4]
train_anchor_targets = b.train_anchors[:, 4:]
train_valid_inds = (train_anchor_labels.data == 1).nonzero().squeeze(1)
rpn_class_loss = F.cross_entropy(rpn_scores, train_anchor_labels)
# print("{} fg {} bg, ratio of {:.3f} vs {:.3f}. RPN {}fg {}bg ratio of {:.3f} vs {:.3f}".format(
# fg_cnt, bg_cnt, fg_cnt / (fg_cnt + bg_cnt + 1e-4), FG_FRACTION,
# train_valid_inds.size(0), train_anchor_labels.size(0)-train_valid_inds.size(0),
# train_valid_inds.size(0) / (train_anchor_labels.size(0) + 1e-4), RPN_FG_FRACTION), flush=True)
rpn_box_mult = 2 * (1. / RPN_FG_FRACTION) * train_valid_inds.size(0) / (train_anchor_labels.size(0) + 1e-4)
rpn_box_loss = bbox_loss(train_anchors[train_valid_inds],
rpn_box_deltas[train_valid_inds],
train_anchor_targets[train_valid_inds]) * rpn_box_mult
loss += rpn_class_loss + rpn_box_loss
res = pd.Series([rpn_class_loss.data[0], rpn_box_loss.data[0],
class_loss.data[0], box_loss.data[0], loss.data[0]],
['rpn_class_loss', 'rpn_box_loss', 'class_loss', 'box_loss', 'total'])
else:
res = pd.Series([class_loss.data[0], box_loss.data[0], loss.data[0]],
['class_loss', 'box_loss', 'total'])
optimizer.zero_grad()
loss.backward()
clip_grad_norm(
[(n, p) for n, p in detector.named_parameters() if p.grad is not None],
max_norm=conf.clip, clip=True)
optimizer.step()
return res
def val_epoch():
detector.eval()
# all_boxes is a list of length number-of-classes.
# Each list element is a list of length number-of-images.
# Each of those list elements is either an empty list []
# or a numpy array of detection.
vr = []
for val_b, batch in enumerate(val_loader):
vr.append(val_batch(val_b, batch))
vr = np.concatenate(vr, 0)
if vr.shape[0] == 0:
print("No detections anywhere")
return 0.0
val_coco = val.coco
coco_dt = val_coco.loadRes(vr)
coco_eval = COCOeval(val_coco, coco_dt, 'bbox')
coco_eval.params.imgIds = val.ids if conf.coco else [x for x in range(len(val))]
coco_eval.evaluate()
coco_eval.accumulate()
coco_eval.summarize()
mAp = coco_eval.stats[1]
return mAp
def val_batch(batch_num, b):
result = detector[b]
if result is None:
return np.zeros((0, 7))
scores_np = result.obj_scores.data.cpu().numpy()
cls_preds_np = result.obj_preds.data.cpu().numpy()
boxes_np = result.boxes_assigned.data.cpu().numpy()
im_inds_np = result.im_inds.data.cpu().numpy()
im_scales = b.im_sizes.reshape((-1, 3))[:, 2]
if conf.coco:
boxes_np /= im_scales[im_inds_np][:, None]
boxes_np[:, 2:4] = boxes_np[:, 2:4] - boxes_np[:, 0:2] + 1
cls_preds_np[:] = [val.ind_to_id[c_ind] for c_ind in cls_preds_np]
im_inds_np[:] = [val.ids[im_ind + batch_num * conf.batch_size * conf.num_gpus]
for im_ind in im_inds_np]
else:
boxes_np *= BOX_SCALE / IM_SCALE
boxes_np[:, 2:4] = boxes_np[:, 2:4] - boxes_np[:, 0:2] + 1
im_inds_np += batch_num * conf.batch_size * conf.num_gpus
return np.column_stack((im_inds_np, boxes_np, scores_np, cls_preds_np))
print("Training starts now!")
for epoch in range(start_epoch + 1, start_epoch + 1 + conf.num_epochs):
rez = train_epoch(epoch)
print("overall{:2d}: ({:.3f})\n{}".format(epoch, rez.mean(1)['total'], rez.mean(1)), flush=True)
mAp = val_epoch()
scheduler.step(mAp)
torch.save({
'epoch': epoch,
'state_dict': detector.state_dict(),
'optimizer': optimizer.state_dict(),
}, os.path.join(conf.save_dir, '{}-{}.tar'.format('coco' if conf.coco else 'vg', epoch)))
| 9,155 | 40.808219 | 115 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/eval_rel_count.py | """
Baseline model that works by simply iterating through the training set to make a dictionary.
Also, caches this (we can use this for training).
The model is quite simple, so we don't use the base train/test code
"""
from dataloaders.visual_genome import VGDataLoader, VG
from lib.object_detector import ObjectDetector
import numpy as np
import torch
import os
from lib.get_dataset_counts import get_counts, box_filter
from config import ModelConfig, FG_FRACTION, RPN_FG_FRACTION, DATA_PATH, BOX_SCALE, IM_SCALE, PROPOSAL_FN
import torch.backends.cudnn as cudnn
from lib.pytorch_misc import optimistic_restore, nonintersecting_2d_inds
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from copy import deepcopy
import dill as pkl
cudnn.benchmark = True
conf = ModelConfig()
MUST_OVERLAP=False
train, val, test = VG.splits(num_val_im=conf.val_size, filter_non_overlap=MUST_OVERLAP,
filter_duplicate_rels=True,
use_proposals=conf.use_proposals)
if conf.test:
print("test data!")
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
fg_matrix, bg_matrix = get_counts(train_data=train, must_overlap=MUST_OVERLAP)
detector = ObjectDetector(classes=train.ind_to_classes, num_gpus=conf.num_gpus,
mode='rpntrain' if not conf.use_proposals else 'proposals', use_resnet=conf.use_resnet,
nms_filter_duplicates=True, thresh=0.01)
detector.eval()
detector.cuda()
classifier = ObjectDetector(classes=train.ind_to_classes, num_gpus=conf.num_gpus,
mode='gtbox', use_resnet=conf.use_resnet,
nms_filter_duplicates=True, thresh=0.01)
classifier.eval()
classifier.cuda()
ckpt = torch.load(conf.ckpt)
mismatch = optimistic_restore(detector, ckpt['state_dict'])
mismatch = optimistic_restore(classifier, ckpt['state_dict'])
MOST_COMMON_MODE = True
if MOST_COMMON_MODE:
prob_matrix = fg_matrix.astype(np.float32)
prob_matrix[:,:,0] = bg_matrix
# TRYING SOMETHING NEW.
prob_matrix[:,:,0] += 1
prob_matrix /= np.sum(prob_matrix, 2)[:,:,None]
# prob_matrix /= float(fg_matrix.max())
np.save(os.path.join(DATA_PATH, 'pred_stats.npy'), prob_matrix)
prob_matrix[:,:,0] = 0 # Zero out BG
else:
prob_matrix = fg_matrix.astype(np.float64)
prob_matrix = prob_matrix / prob_matrix.max(2)[:,:,None]
np.save(os.path.join(DATA_PATH, 'pred_dist.npy'), prob_matrix)
# It's test time!
def predict(boxes, classes):
relation_possibilities_ = np.array(box_filter(boxes, must_overlap=MUST_OVERLAP), dtype=int)
full_preds = np.zeros((boxes.shape[0], boxes.shape[0], train.num_predicates))
for o1, o2 in relation_possibilities_:
c1, c2 = classes[[o1, o2]]
full_preds[o1, o2] = prob_matrix[c1, c2]
full_preds[:,:,0] = 0.0 # Zero out BG.
return full_preds
# ##########################################################################################
# ##########################################################################################
# For visualizing / exploring
c_to_ind = {c: i for i, c in enumerate(train.ind_to_classes)}
def gimme_the_dist(c1name, c2name):
c1 = c_to_ind[c1name]
c2 = c_to_ind[c2name]
dist = prob_matrix[c1, c2]
argz = np.argsort(-dist)
for i, a in enumerate(argz):
if dist[a] > 0.0:
print("{:3d}: {:10s} ({:.4f})".format(i, train.ind_to_predicates[a], dist[a]))
counts = np.zeros((train.num_classes, train.num_classes, train.num_predicates), dtype=np.int64)
for ex_ind in tqdm(range(len(val))):
gt_relations = val.relationships[ex_ind].copy()
gt_classes = val.gt_classes[ex_ind].copy()
o1o2 = gt_classes[gt_relations[:, :2]].tolist()
for (o1, o2), pred in zip(o1o2, gt_relations[:, 2]):
counts[o1, o2, pred] += 1
zeroshot_case = counts[np.where(prob_matrix == 0)].sum() / float(counts.sum())
max_inds = prob_matrix.argmax(2).ravel()
max_counts = counts.reshape(-1, 51)[np.arange(max_inds.shape[0]), max_inds]
most_freq_port = max_counts.sum()/float(counts.sum())
print(" Rel acc={:.2f}%, {:.2f}% zsl".format(
most_freq_port*100, zeroshot_case*100))
# ##########################################################################################
# ##########################################################################################
T = len(val)
evaluator = BasicSceneGraphEvaluator.all_modes(multiple_preds=conf.multi_pred)
# First do detection results
img_offset = 0
all_pred_entries = {'sgdet':[], 'sgcls':[], 'predcls':[]}
for val_b, b in enumerate(tqdm(val_loader)):
det_result = detector[b]
img_ids = b.gt_classes_primary.data.cpu().numpy()[:,0]
scores_np = det_result.obj_scores.data.cpu().numpy()
cls_preds_np = det_result.obj_preds.data.cpu().numpy()
boxes_np = det_result.boxes_assigned.data.cpu().numpy()* BOX_SCALE/IM_SCALE
# boxpriors_np = det_result.box_priors.data.cpu().numpy()
im_inds_np = det_result.im_inds.data.cpu().numpy() + img_offset
for img_i in np.unique(img_ids + img_offset):
gt_entry = {
'gt_classes': val.gt_classes[img_i].copy(),
'gt_relations': val.relationships[img_i].copy(),
'gt_boxes': val.gt_boxes[img_i].copy(),
}
pred_boxes = boxes_np[im_inds_np == img_i]
pred_classes = cls_preds_np[im_inds_np == img_i]
obj_scores = scores_np[im_inds_np == img_i]
all_rels = nonintersecting_2d_inds(pred_boxes.shape[0])
fp = predict(pred_boxes, pred_classes)
fp_pred = fp[all_rels[:,0], all_rels[:,1]]
scores = np.column_stack((
obj_scores[all_rels[:,0]],
obj_scores[all_rels[:,1]],
fp_pred.max(1)
)).prod(1)
sorted_inds = np.argsort(-scores)
sorted_inds = sorted_inds[scores[sorted_inds] > 0] #[:100]
pred_entry = {
'pred_boxes': pred_boxes,
'pred_classes': pred_classes,
'obj_scores': obj_scores,
'pred_rel_inds': all_rels[sorted_inds],
'rel_scores': fp_pred[sorted_inds],
}
all_pred_entries['sgdet'].append(pred_entry)
evaluator['sgdet'].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
img_offset += img_ids.max() + 1
evaluator['sgdet'].print_stats()
# -----------------------------------------------------------------------------------------
# EVAL CLS AND SG
img_offset = 0
for val_b, b in enumerate(tqdm(val_loader)):
det_result = classifier[b]
scores, cls_preds = det_result.rm_obj_dists[:,1:].data.max(1)
scores_np = scores.cpu().numpy()
cls_preds_np = (cls_preds+1).cpu().numpy()
img_ids = b.gt_classes_primary.data.cpu().numpy()[:,0]
boxes_np = b.gt_boxes_primary.data.cpu().numpy()
im_inds_np = det_result.im_inds.data.cpu().numpy() + img_offset
for img_i in np.unique(img_ids + img_offset):
gt_entry = {
'gt_classes': val.gt_classes[img_i].copy(),
'gt_relations': val.relationships[img_i].copy(),
'gt_boxes': val.gt_boxes[img_i].copy(),
}
pred_boxes = boxes_np[im_inds_np == img_i]
pred_classes = cls_preds_np[im_inds_np == img_i]
obj_scores = scores_np[im_inds_np == img_i]
all_rels = nonintersecting_2d_inds(pred_boxes.shape[0])
fp = predict(pred_boxes, pred_classes)
fp_pred = fp[all_rels[:,0], all_rels[:,1]]
sg_cls_scores = np.column_stack((
obj_scores[all_rels[:,0]],
obj_scores[all_rels[:,1]],
fp_pred.max(1)
)).prod(1)
sg_cls_inds = np.argsort(-sg_cls_scores)
sg_cls_inds = sg_cls_inds[sg_cls_scores[sg_cls_inds] > 0] #[:100]
pred_entry = {
'pred_boxes': pred_boxes,
'pred_classes': pred_classes,
'obj_scores': obj_scores,
'pred_rel_inds': all_rels[sg_cls_inds],
'rel_scores': fp_pred[sg_cls_inds],
}
all_pred_entries['sgcls'].append(deepcopy(pred_entry))
evaluator['sgcls'].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
########################################################
fp = predict(gt_entry['gt_boxes'], gt_entry['gt_classes'])
fp_pred = fp[all_rels[:, 0], all_rels[:, 1]]
pred_cls_scores = fp_pred.max(1)
pred_cls_inds = np.argsort(-pred_cls_scores)
pred_cls_inds = pred_cls_inds[pred_cls_scores[pred_cls_inds] > 0][:100]
pred_entry['pred_rel_inds'] = all_rels[pred_cls_inds]
pred_entry['rel_scores'] = fp_pred[pred_cls_inds]
pred_entry['pred_classes'] = gt_entry['gt_classes']
pred_entry['obj_scores'] = np.ones(pred_entry['pred_classes'].shape[0])
all_pred_entries['predcls'].append(pred_entry)
evaluator['predcls'].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
img_offset += img_ids.max() + 1
evaluator['predcls'].print_stats()
evaluator['sgcls'].print_stats()
for mode, entries in all_pred_entries.items():
with open('caches/freqbaseline-{}-{}.pkl'.format('overlap' if MUST_OVERLAP else 'nonoverlap', mode), 'wb') as f:
pkl.dump(entries, f)
| 9,552 | 36.758893 | 116 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/_visualize.py | """
Visualization script. I used this to create the figures in the paper.
WARNING: I haven't tested this in a while. It's possible that some later features I added break things here, but hopefully there should be easy fixes. I'm uploading this in the off chance it might help someone. If you get it to work, let me know (and also send a PR with bugs/etc)
"""
from dataloaders.visual_genome import VGDataLoader, VG
from lib.rel_model import RelModel
import numpy as np
import torch
from config import ModelConfig
from lib.pytorch_misc import optimistic_restore
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from tqdm import tqdm
from config import BOX_SCALE, IM_SCALE
from lib.fpn.box_utils import bbox_overlaps
from collections import defaultdict
from PIL import Image, ImageDraw, ImageFont
import os
from functools import reduce
conf = ModelConfig()
train, val, test = VG.splits(num_val_im=conf.val_size)
if conf.test:
val = test
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision
)
detector.cuda()
ckpt = torch.load(conf.ckpt)
optimistic_restore(detector, ckpt['state_dict'])
############################################ HELPER FUNCTIONS ###################################
def get_cmap(N):
import matplotlib.cm as cmx
import matplotlib.colors as colors
"""Returns a function that maps each index in 0, 1, ... N-1 to a distinct RGB color."""
color_norm = colors.Normalize(vmin=0, vmax=N - 1)
scalar_map = cmx.ScalarMappable(norm=color_norm, cmap='hsv')
def map_index_to_rgb_color(index):
pad = 40
return np.round(np.array(scalar_map.to_rgba(index)) * (255 - pad) + pad)
return map_index_to_rgb_color
cmap = get_cmap(len(train.ind_to_classes) + 1)
def load_unscaled(fn):
""" Loads and scales images so that it's 1024 max-dimension"""
image_unpadded = Image.open(fn).convert('RGB')
im_scale = 1024.0 / max(image_unpadded.size)
image = image_unpadded.resize((int(im_scale * image_unpadded.size[0]), int(im_scale * image_unpadded.size[1])),
resample=Image.BICUBIC)
return image
font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeMonoBold.ttf', 32)
def draw_box(draw, boxx, cls_ind, text_str):
box = tuple([float(b) for b in boxx])
if '-GT' in text_str:
color = (255, 128, 0, 255)
else:
color = (0, 128, 0, 255)
# color = tuple([int(x) for x in cmap(cls_ind)])
# draw the fucking box
draw.line([(box[0], box[1]), (box[2], box[1])], fill=color, width=8)
draw.line([(box[2], box[1]), (box[2], box[3])], fill=color, width=8)
draw.line([(box[2], box[3]), (box[0], box[3])], fill=color, width=8)
draw.line([(box[0], box[3]), (box[0], box[1])], fill=color, width=8)
# draw.rectangle(box, outline=color)
w, h = draw.textsize(text_str, font=font)
x1text = box[0]
y1text = max(box[1] - h, 0)
x2text = min(x1text + w, draw.im.size[0])
y2text = y1text + h
print("drawing {}x{} rectangle at {:.1f} {:.1f} {:.1f} {:.1f}".format(
h, w, x1text, y1text, x2text, y2text))
draw.rectangle((x1text, y1text, x2text, y2text), fill=color)
draw.text((x1text, y1text), text_str, fill='black', font=font)
return draw
def val_epoch():
detector.eval()
evaluator = BasicSceneGraphEvaluator.all_modes()
for val_b, batch in enumerate(tqdm(val_loader)):
val_batch(conf.num_gpus * val_b, batch, evaluator)
evaluator[conf.mode].print_stats()
def val_batch(batch_num, b, evaluator, thrs=(20, 50, 100)):
det_res = detector[b]
# if conf.num_gpus == 1:
# det_res = [det_res]
assert conf.num_gpus == 1
boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i = det_res
gt_entry = {
'gt_classes': val.gt_classes[batch_num].copy(),
'gt_relations': val.relationships[batch_num].copy(),
'gt_boxes': val.gt_boxes[batch_num].copy(),
}
# gt_entry = {'gt_classes': gtc[i], 'gt_relations': gtr[i], 'gt_boxes': gtb[i]}
assert np.all(objs_i[rels_i[:, 0]] > 0) and np.all(objs_i[rels_i[:, 1]] > 0)
# assert np.all(rels_i[:, 2] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE / IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i,
}
pred_to_gt, pred_5ples, rel_scores = evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
# SET RECALL THRESHOLD HERE
pred_to_gt = pred_to_gt[:20]
pred_5ples = pred_5ples[:20]
# Get a list of objects that match, and GT objects that dont
objs_match = (bbox_overlaps(pred_entry['pred_boxes'], gt_entry['gt_boxes']) >= 0.5) & (
objs_i[:, None] == gt_entry['gt_classes'][None]
)
objs_matched = objs_match.any(1)
has_seen = defaultdict(int)
has_seen_gt = defaultdict(int)
pred_ind2name = {}
gt_ind2name = {}
edges = {}
missededges = {}
badedges = {}
if val.filenames[batch_num].startswith('2343676'):
import ipdb
ipdb.set_trace()
def query_pred(pred_ind):
if pred_ind not in pred_ind2name:
has_seen[objs_i[pred_ind]] += 1
pred_ind2name[pred_ind] = '{}-{}'.format(train.ind_to_classes[objs_i[pred_ind]],
has_seen[objs_i[pred_ind]])
return pred_ind2name[pred_ind]
def query_gt(gt_ind):
gt_cls = gt_entry['gt_classes'][gt_ind]
if gt_ind not in gt_ind2name:
has_seen_gt[gt_cls] += 1
gt_ind2name[gt_ind] = '{}-GT{}'.format(train.ind_to_classes[gt_cls], has_seen_gt[gt_cls])
return gt_ind2name[gt_ind]
matching_pred5ples = pred_5ples[np.array([len(x) > 0 for x in pred_to_gt])]
for fiveple in matching_pred5ples:
head_name = query_pred(fiveple[0])
tail_name = query_pred(fiveple[1])
edges[(head_name, tail_name)] = train.ind_to_predicates[fiveple[4]]
gt_5ples = np.column_stack((gt_entry['gt_relations'][:, :2],
gt_entry['gt_classes'][gt_entry['gt_relations'][:, 0]],
gt_entry['gt_classes'][gt_entry['gt_relations'][:, 1]],
gt_entry['gt_relations'][:, 2],
))
has_match = reduce(np.union1d, pred_to_gt)
for gt in gt_5ples[np.setdiff1d(np.arange(gt_5ples.shape[0]), has_match)]:
# Head and tail
namez = []
for i in range(2):
matching_obj = np.where(objs_match[:, gt[i]])[0]
if matching_obj.size > 0:
name = query_pred(matching_obj[0])
else:
name = query_gt(gt[i])
namez.append(name)
missededges[tuple(namez)] = train.ind_to_predicates[gt[4]]
for fiveple in pred_5ples[np.setdiff1d(np.arange(pred_5ples.shape[0]), matching_pred5ples)]:
if fiveple[0] in pred_ind2name:
if fiveple[1] in pred_ind2name:
badedges[(pred_ind2name[fiveple[0]], pred_ind2name[fiveple[1]])] = train.ind_to_predicates[fiveple[4]]
theimg = load_unscaled(val.filenames[batch_num])
theimg2 = theimg.copy()
draw2 = ImageDraw.Draw(theimg2)
# Fix the names
for pred_ind in pred_ind2name.keys():
draw2 = draw_box(draw2, pred_entry['pred_boxes'][pred_ind],
cls_ind=objs_i[pred_ind],
text_str=pred_ind2name[pred_ind])
for gt_ind in gt_ind2name.keys():
draw2 = draw_box(draw2, gt_entry['gt_boxes'][gt_ind],
cls_ind=gt_entry['gt_classes'][gt_ind],
text_str=gt_ind2name[gt_ind])
recall = int(100 * len(reduce(np.union1d, pred_to_gt)) / gt_entry['gt_relations'].shape[0])
id = '{}-{}'.format(val.filenames[batch_num].split('/')[-1][:-4], recall)
pathname = os.path.join('qualitative', id)
if not os.path.exists(pathname):
os.mkdir(pathname)
theimg.save(os.path.join(pathname, 'img.jpg'), quality=100, subsampling=0)
theimg2.save(os.path.join(pathname, 'imgbox.jpg'), quality=100, subsampling=0)
with open(os.path.join(pathname, 'shit.txt'), 'w') as f:
f.write('good:\n')
for (o1, o2), p in edges.items():
f.write('{} - {} - {}\n'.format(o1, p, o2))
f.write('fn:\n')
for (o1, o2), p in missededges.items():
f.write('{} - {} - {}\n'.format(o1, p, o2))
f.write('shit:\n')
for (o1, o2), p in badedges.items():
f.write('{} - {} - {}\n'.format(o1, p, o2))
mAp = val_epoch()
| 9,693 | 36.867188 | 280 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/models/train_rels.py | """
Training script for scene graph detection. Integrated with my faster rcnn setup
"""
from dataloaders.visual_genome import VGDataLoader, VG
import numpy as np
from torch import optim
import torch
import pandas as pd
import time
import os
from config import ModelConfig, BOX_SCALE, IM_SCALE
from torch.nn import functional as F
from lib.pytorch_misc import optimistic_restore, de_chunkize, clip_grad_norm
from lib.evaluation.sg_eval import BasicSceneGraphEvaluator
from lib.pytorch_misc import print_para
from torch.optim.lr_scheduler import ReduceLROnPlateau
conf = ModelConfig()
if conf.model == 'motifnet':
from lib.rel_model import RelModel
elif conf.model == 'stanford':
from lib.rel_model_stanford import RelModelStanford as RelModel
else:
raise ValueError()
train, val, _ = VG.splits(num_val_im=conf.val_size, filter_duplicate_rels=True,
use_proposals=conf.use_proposals,
filter_non_overlap=conf.mode == 'sgdet')
train_loader, val_loader = VGDataLoader.splits(train, val, mode='rel',
batch_size=conf.batch_size,
num_workers=conf.num_workers,
num_gpus=conf.num_gpus)
detector = RelModel(classes=train.ind_to_classes, rel_classes=train.ind_to_predicates,
num_gpus=conf.num_gpus, mode=conf.mode, require_overlap_det=True,
use_resnet=conf.use_resnet, order=conf.order,
nl_edge=conf.nl_edge, nl_obj=conf.nl_obj, hidden_dim=conf.hidden_dim,
use_proposals=conf.use_proposals,
pass_in_obj_feats_to_decoder=conf.pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=conf.pass_in_obj_feats_to_edge,
pooling_dim=conf.pooling_dim,
rec_dropout=conf.rec_dropout,
use_bias=conf.use_bias,
use_tanh=conf.use_tanh,
limit_vision=conf.limit_vision
)
# Freeze the detector
for n, param in detector.detector.named_parameters():
param.requires_grad = False
print(print_para(detector), flush=True)
def get_optim(lr):
# Lower the learning rate on the VGG fully connected layers by 1/10th. It's a hack, but it helps
# stabilize the cores.
fc_params = [p for n,p in detector.named_parameters() if n.startswith('roi_fmap') and p.requires_grad]
non_fc_params = [p for n,p in detector.named_parameters() if not n.startswith('roi_fmap') and p.requires_grad]
params = [{'params': fc_params, 'lr': lr / 10.0}, {'params': non_fc_params}]
# params = [p for n,p in detector.named_parameters() if p.requires_grad]
if conf.adam:
optimizer = optim.Adam(params, weight_decay=conf.l2, lr=lr, eps=1e-3)
else:
optimizer = optim.SGD(params, weight_decay=conf.l2, lr=lr, momentum=0.9)
scheduler = ReduceLROnPlateau(optimizer, 'max', patience=3, factor=0.1,
verbose=True, threshold=0.0001, threshold_mode='abs', cooldown=1)
return optimizer, scheduler
ckpt = torch.load(conf.ckpt)
if conf.ckpt.split('-')[-2].split('/')[-1] == 'vgrel':
print("Loading EVERYTHING")
start_epoch = ckpt['epoch']
if not optimistic_restore(detector, ckpt['state_dict']):
start_epoch = -1
# optimistic_restore(detector.detector, torch.load('checkpoints/vgdet/vg-28.tar')['state_dict'])
else:
start_epoch = -1
optimistic_restore(detector.detector, ckpt['state_dict'])
detector.roi_fmap[1][0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap[1][3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap[1][0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap[1][3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
detector.roi_fmap_obj[0].weight.data.copy_(ckpt['state_dict']['roi_fmap.0.weight'])
detector.roi_fmap_obj[3].weight.data.copy_(ckpt['state_dict']['roi_fmap.3.weight'])
detector.roi_fmap_obj[0].bias.data.copy_(ckpt['state_dict']['roi_fmap.0.bias'])
detector.roi_fmap_obj[3].bias.data.copy_(ckpt['state_dict']['roi_fmap.3.bias'])
detector.cuda()
def train_epoch(epoch_num):
detector.train()
tr = []
start = time.time()
for b, batch in enumerate(train_loader):
tr.append(train_batch(batch, verbose=b % (conf.print_interval*10) == 0)) #b == 0))
if b % conf.print_interval == 0 and b >= conf.print_interval:
mn = pd.concat(tr[-conf.print_interval:], axis=1).mean(1)
time_per_batch = (time.time() - start) / conf.print_interval
print("\ne{:2d}b{:5d}/{:5d} {:.3f}s/batch, {:.1f}m/epoch".format(
epoch_num, b, len(train_loader), time_per_batch, len(train_loader) * time_per_batch / 60))
print(mn)
print('-----------', flush=True)
start = time.time()
return pd.concat(tr, axis=1)
def train_batch(b, verbose=False):
"""
:param b: contains:
:param imgs: the image, [batch_size, 3, IM_SIZE, IM_SIZE]
:param all_anchors: [num_anchors, 4] the boxes of all anchors that we'll be using
:param all_anchor_inds: [num_anchors, 2] array of the indices into the concatenated
RPN feature vector that give us all_anchors,
each one (img_ind, fpn_idx)
:param im_sizes: a [batch_size, 4] numpy array of (h, w, scale, num_good_anchors) for each image.
:param num_anchors_per_img: int, number of anchors in total over the feature pyramid per img
Training parameters:
:param train_anchor_inds: a [num_train, 5] array of indices for the anchors that will
be used to compute the training loss (img_ind, fpn_idx)
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:return:
"""
result = detector[b]
losses = {}
losses['class_loss'] = F.cross_entropy(result.rm_obj_dists, result.rm_obj_labels)
losses['rel_loss'] = F.cross_entropy(result.rel_dists, result.rel_labels[:, -1])
loss = sum(losses.values())
optimizer.zero_grad()
loss.backward()
clip_grad_norm(
[(n, p) for n, p in detector.named_parameters() if p.grad is not None],
max_norm=conf.clip, verbose=verbose, clip=True)
losses['total'] = loss
optimizer.step()
res = pd.Series({x: y.data[0] for x, y in losses.items()})
return res
def val_epoch():
detector.eval()
evaluator = BasicSceneGraphEvaluator.all_modes()
for val_b, batch in enumerate(val_loader):
val_batch(conf.num_gpus * val_b, batch, evaluator)
evaluator[conf.mode].print_stats()
return np.mean(evaluator[conf.mode].result_dict[conf.mode + '_recall'][100])
def val_batch(batch_num, b, evaluator):
det_res = detector[b]
if conf.num_gpus == 1:
det_res = [det_res]
for i, (boxes_i, objs_i, obj_scores_i, rels_i, pred_scores_i) in enumerate(det_res):
gt_entry = {
'gt_classes': val.gt_classes[batch_num + i].copy(),
'gt_relations': val.relationships[batch_num + i].copy(),
'gt_boxes': val.gt_boxes[batch_num + i].copy(),
}
assert np.all(objs_i[rels_i[:, 0]] > 0) and np.all(objs_i[rels_i[:, 1]] > 0)
pred_entry = {
'pred_boxes': boxes_i * BOX_SCALE/IM_SCALE,
'pred_classes': objs_i,
'pred_rel_inds': rels_i,
'obj_scores': obj_scores_i,
'rel_scores': pred_scores_i, # hack for now.
}
evaluator[conf.mode].evaluate_scene_graph_entry(
gt_entry,
pred_entry,
)
print("Training starts now!")
optimizer, scheduler = get_optim(conf.lr * conf.num_gpus * conf.batch_size)
for epoch in range(start_epoch + 1, start_epoch + 1 + conf.num_epochs):
rez = train_epoch(epoch)
print("overall{:2d}: ({:.3f})\n{}".format(epoch, rez.mean(1)['total'], rez.mean(1)), flush=True)
if conf.save_dir is not None:
torch.save({
'epoch': epoch,
'state_dict': detector.state_dict(), #{k:v for k,v in detector.state_dict().items() if not k.startswith('detector.')},
# 'optimizer': optimizer.state_dict(),
}, os.path.join(conf.save_dir, '{}-{}.tar'.format('vgrel', epoch)))
mAp = val_epoch()
scheduler.step(mAp)
if any([pg['lr'] <= (conf.lr * conf.num_gpus * conf.batch_size)/99.0 for pg in optimizer.param_groups]):
print("exiting training early", flush=True)
break
| 8,782 | 41.225962 | 130 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/dataloaders/visual_genome.py | """
File that involves dataloaders for the Visual Genome example_dataset.
"""
import json
import os
import h5py
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
from dataloaders.blob import Blob
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from config import VG_IMAGES, IM_DATA_FN, VG_SGG_FN, VG_SGG_DICT_FN, BOX_SCALE, IM_SCALE, PROPOSAL_FN
from dataloaders.image_transforms import SquarePad, Grayscale, Brightness, Sharpness, Contrast, \
RandomOrder, Hue, random_crop
from collections import defaultdict
from pycocotools.coco import COCO
class VG(Dataset):
def __init__(self, mode, roidb_file=VG_SGG_FN, dict_file=VG_SGG_DICT_FN,
image_file=IM_DATA_FN, filter_empty_rels=True, num_im=-1, num_val_im=5000,
filter_duplicate_rels=True, filter_non_overlap=True,
use_proposals=False):
"""
Torch example_dataset for VisualGenome
:param mode: Must be train, test, or val
:param roidb_file: HDF5 containing the GT boxes, classes, and relationships
:param dict_file: JSON Contains mapping of classes/relationships to words
:param image_file: HDF5 containing image filenames
:param filter_empty_rels: True if we filter out images without relationships between
boxes. One might want to set this to false if training a detector.
:param filter_duplicate_rels: Whenever we see a duplicate relationship we'll sample instead
:param num_im: Number of images in the entire example_dataset. -1 for all images.
:param num_val_im: Number of images in the validation set (must be less than num_im
unless num_im is -1.)
:param proposal_file: If None, we don't provide proposals. Otherwise file for where we get RPN
proposals
"""
if mode not in ('test', 'train', 'val'):
raise ValueError("Mode must be in test, train, or val. Supplied {}".format(mode))
self.mode = mode
# Initialize
self.roidb_file = roidb_file
self.dict_file = dict_file
self.image_file = image_file
self.filter_non_overlap = filter_non_overlap
self.filter_duplicate_rels = filter_duplicate_rels and self.mode == 'train'
self.split_mask, self.gt_boxes, self.gt_classes, self.relationships = load_graphs(
self.roidb_file, self.mode, num_im, num_val_im=num_val_im,
filter_empty_rels=filter_empty_rels,
filter_non_overlap=self.filter_non_overlap and self.is_train,
)
self.filenames = load_image_filenames(image_file)
self.filenames = [self.filenames[i] for i in np.where(self.split_mask)[0]]
self.ind_to_classes, self.ind_to_predicates = load_info(dict_file)
if use_proposals:
print("Loading proposals", flush=True)
p_h5 = h5py.File(PROPOSAL_FN, 'r')
rpn_rois = p_h5['rpn_rois']
rpn_scores = p_h5['rpn_scores']
rpn_im_to_roi_idx = np.array(p_h5['im_to_roi_idx'][self.split_mask])
rpn_num_rois = np.array(p_h5['num_rois'][self.split_mask])
self.rpn_rois = []
for i in range(len(self.filenames)):
rpn_i = np.column_stack((
rpn_scores[rpn_im_to_roi_idx[i]:rpn_im_to_roi_idx[i] + rpn_num_rois[i]],
rpn_rois[rpn_im_to_roi_idx[i]:rpn_im_to_roi_idx[i] + rpn_num_rois[i]],
))
self.rpn_rois.append(rpn_i)
else:
self.rpn_rois = None
# You could add data augmentation here. But we didn't.
# tform = []
# if self.is_train:
# tform.append(RandomOrder([
# Grayscale(),
# Brightness(),
# Contrast(),
# Sharpness(),
# Hue(),
# ]))
tform = [
SquarePad(),
Resize(IM_SCALE),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
self.transform_pipeline = Compose(tform)
@property
def coco(self):
"""
:return: a Coco-like object that we can use to evaluate detection!
"""
anns = []
for i, (cls_array, box_array) in enumerate(zip(self.gt_classes, self.gt_boxes)):
for cls, box in zip(cls_array.tolist(), box_array.tolist()):
anns.append({
'area': (box[3] - box[1] + 1) * (box[2] - box[0] + 1),
'bbox': [box[0], box[1], box[2] - box[0] + 1, box[3] - box[1] + 1],
'category_id': cls,
'id': len(anns),
'image_id': i,
'iscrowd': 0,
})
fauxcoco = COCO()
fauxcoco.dataset = {
'info': {'description': 'ayy lmao'},
'images': [{'id': i} for i in range(self.__len__())],
'categories': [{'supercategory': 'person',
'id': i, 'name': name} for i, name in enumerate(self.ind_to_classes) if name != '__background__'],
'annotations': anns,
}
fauxcoco.createIndex()
return fauxcoco
@property
def is_train(self):
return self.mode.startswith('train')
@classmethod
def splits(cls, *args, **kwargs):
""" Helper method to generate splits of the example_dataset"""
train = cls('train', *args, **kwargs)
val = cls('val', *args, **kwargs)
test = cls('test', *args, **kwargs)
return train, val, test
def __getitem__(self, index):
image_unpadded = Image.open(self.filenames[index]).convert('RGB')
# Optionally flip the image if we're doing training
flipped = self.is_train and np.random.random() > 0.5
gt_boxes = self.gt_boxes[index].copy()
# Boxes are already at BOX_SCALE
if self.is_train:
# crop boxes that are too large. This seems to be only a problem for image heights, but whatevs
gt_boxes[:, [1, 3]] = gt_boxes[:, [1, 3]].clip(
None, BOX_SCALE / max(image_unpadded.size) * image_unpadded.size[1])
gt_boxes[:, [0, 2]] = gt_boxes[:, [0, 2]].clip(
None, BOX_SCALE / max(image_unpadded.size) * image_unpadded.size[0])
# # crop the image for data augmentation
# image_unpadded, gt_boxes = random_crop(image_unpadded, gt_boxes, BOX_SCALE, round_boxes=True)
w, h = image_unpadded.size
box_scale_factor = BOX_SCALE / max(w, h)
if flipped:
scaled_w = int(box_scale_factor * float(w))
# print("Scaled w is {}".format(scaled_w))
image_unpadded = image_unpadded.transpose(Image.FLIP_LEFT_RIGHT)
gt_boxes[:, [0, 2]] = scaled_w - gt_boxes[:, [2, 0]]
img_scale_factor = IM_SCALE / max(w, h)
if h > w:
im_size = (IM_SCALE, int(w * img_scale_factor), img_scale_factor)
elif h < w:
im_size = (int(h * img_scale_factor), IM_SCALE, img_scale_factor)
else:
im_size = (IM_SCALE, IM_SCALE, img_scale_factor)
gt_rels = self.relationships[index].copy()
if self.filter_duplicate_rels:
# Filter out dupes!
assert self.mode == 'train'
old_size = gt_rels.shape[0]
all_rel_sets = defaultdict(list)
for (o0, o1, r) in gt_rels:
all_rel_sets[(o0, o1)].append(r)
gt_rels = [(k[0], k[1], np.random.choice(v)) for k,v in all_rel_sets.items()]
gt_rels = np.array(gt_rels)
entry = {
'img': self.transform_pipeline(image_unpadded),
'img_size': im_size,
'gt_boxes': gt_boxes,
'gt_classes': self.gt_classes[index].copy(),
'gt_relations': gt_rels,
'scale': IM_SCALE / BOX_SCALE, # Multiply the boxes by this.
'index': index,
'flipped': flipped,
'fn': self.filenames[index],
}
if self.rpn_rois is not None:
entry['proposals'] = self.rpn_rois[index]
assertion_checks(entry)
return entry
def __len__(self):
return len(self.filenames)
@property
def num_predicates(self):
return len(self.ind_to_predicates)
@property
def num_classes(self):
return len(self.ind_to_classes)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# MISC. HELPER FUNCTIONS ~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def assertion_checks(entry):
im_size = tuple(entry['img'].size())
if len(im_size) != 3:
raise ValueError("Img must be dim-3")
c, h, w = entry['img'].size()
if c != 3:
raise ValueError("Must have 3 color channels")
num_gt = entry['gt_boxes'].shape[0]
if entry['gt_classes'].shape[0] != num_gt:
raise ValueError("GT classes and GT boxes must have same number of examples")
assert (entry['gt_boxes'][:, 2] >= entry['gt_boxes'][:, 0]).all()
assert (entry['gt_boxes'] >= -1).all()
def load_image_filenames(image_file, image_dir=VG_IMAGES):
"""
Loads the image filenames from visual genome from the JSON file that contains them.
This matches the preprocessing in scene-graph-TF-release/data_tools/vg_to_imdb.py.
:param image_file: JSON file. Elements contain the param "image_id".
:param image_dir: directory where the VisualGenome images are located
:return: List of filenames corresponding to the good images
"""
with open(image_file, 'r') as f:
im_data = json.load(f)
corrupted_ims = ['1592.jpg', '1722.jpg', '4616.jpg', '4617.jpg']
fns = []
for i, img in enumerate(im_data):
basename = '{}.jpg'.format(img['image_id'])
if basename in corrupted_ims:
continue
filename = os.path.join(image_dir, basename)
if os.path.exists(filename):
fns.append(filename)
assert len(fns) == 108073
return fns
def load_graphs(graphs_file, mode='train', num_im=-1, num_val_im=0, filter_empty_rels=True,
filter_non_overlap=False):
"""
Load the file containing the GT boxes and relations, as well as the example_dataset split
:param graphs_file: HDF5
:param mode: (train, val, or test)
:param num_im: Number of images we want
:param num_val_im: Number of validation images
:param filter_empty_rels: (will be filtered otherwise.)
:param filter_non_overlap: If training, filter images that dont overlap.
:return: image_index: numpy array corresponding to the index of images we're using
boxes: List where each element is a [num_gt, 4] array of ground
truth boxes (x1, y1, x2, y2)
gt_classes: List where each element is a [num_gt] array of classes
relationships: List where each element is a [num_r, 3] array of
(box_ind_1, box_ind_2, predicate) relationships
"""
if mode not in ('train', 'val', 'test'):
raise ValueError('{} invalid'.format(mode))
roi_h5 = h5py.File(graphs_file, 'r')
data_split = roi_h5['split'][:]
split = 2 if mode == 'test' else 0
split_mask = data_split == split
# Filter out images without bounding boxes
split_mask &= roi_h5['img_to_first_box'][:] >= 0
if filter_empty_rels:
split_mask &= roi_h5['img_to_first_rel'][:] >= 0
image_index = np.where(split_mask)[0]
if num_im > -1:
image_index = image_index[:num_im]
if num_val_im > 0:
if mode == 'val':
image_index = image_index[:num_val_im]
elif mode == 'train':
image_index = image_index[num_val_im:]
split_mask = np.zeros_like(data_split).astype(bool)
split_mask[image_index] = True
# Get box information
all_labels = roi_h5['labels'][:, 0]
all_boxes = roi_h5['boxes_{}'.format(BOX_SCALE)][:] # will index later
assert np.all(all_boxes[:, :2] >= 0) # sanity check
assert np.all(all_boxes[:, 2:] > 0) # no empty box
# convert from xc, yc, w, h to x1, y1, x2, y2
all_boxes[:, :2] = all_boxes[:, :2] - all_boxes[:, 2:] / 2
all_boxes[:, 2:] = all_boxes[:, :2] + all_boxes[:, 2:]
im_to_first_box = roi_h5['img_to_first_box'][split_mask]
im_to_last_box = roi_h5['img_to_last_box'][split_mask]
im_to_first_rel = roi_h5['img_to_first_rel'][split_mask]
im_to_last_rel = roi_h5['img_to_last_rel'][split_mask]
# load relation labels
_relations = roi_h5['relationships'][:]
_relation_predicates = roi_h5['predicates'][:, 0]
assert (im_to_first_rel.shape[0] == im_to_last_rel.shape[0])
assert (_relations.shape[0] == _relation_predicates.shape[0]) # sanity check
# Get everything by image.
boxes = []
gt_classes = []
relationships = []
for i in range(len(image_index)):
boxes_i = all_boxes[im_to_first_box[i]:im_to_last_box[i] + 1, :]
gt_classes_i = all_labels[im_to_first_box[i]:im_to_last_box[i] + 1]
if im_to_first_rel[i] >= 0:
predicates = _relation_predicates[im_to_first_rel[i]:im_to_last_rel[i] + 1]
obj_idx = _relations[im_to_first_rel[i]:im_to_last_rel[i] + 1] - im_to_first_box[i]
assert np.all(obj_idx >= 0)
assert np.all(obj_idx < boxes_i.shape[0])
rels = np.column_stack((obj_idx, predicates))
else:
assert not filter_empty_rels
rels = np.zeros((0, 3), dtype=np.int32)
if filter_non_overlap:
assert mode == 'train'
inters = bbox_overlaps(boxes_i, boxes_i)
rel_overs = inters[rels[:, 0], rels[:, 1]]
inc = np.where(rel_overs > 0.0)[0]
if inc.size > 0:
rels = rels[inc]
else:
split_mask[image_index[i]] = 0
continue
boxes.append(boxes_i)
gt_classes.append(gt_classes_i)
relationships.append(rels)
return split_mask, boxes, gt_classes, relationships
def load_info(info_file):
"""
Loads the file containing the visual genome label meanings
:param info_file: JSON
:return: ind_to_classes: sorted list of classes
ind_to_predicates: sorted list of predicates
"""
info = json.load(open(info_file, 'r'))
info['label_to_idx']['__background__'] = 0
info['predicate_to_idx']['__background__'] = 0
class_to_ind = info['label_to_idx']
predicate_to_ind = info['predicate_to_idx']
ind_to_classes = sorted(class_to_ind, key=lambda k: class_to_ind[k])
ind_to_predicates = sorted(predicate_to_ind, key=lambda k: predicate_to_ind[k])
return ind_to_classes, ind_to_predicates
def vg_collate(data, num_gpus=3, is_train=False, mode='det'):
assert mode in ('det', 'rel')
blob = Blob(mode=mode, is_train=is_train, num_gpus=num_gpus,
batch_size_per_gpu=len(data) // num_gpus)
for d in data:
blob.append(d)
blob.reduce()
return blob
class VGDataLoader(torch.utils.data.DataLoader):
"""
Iterates through the data, filtering out None,
but also loads everything as a (cuda) variable
"""
@classmethod
def splits(cls, train_data, val_data, batch_size=3, num_workers=1, num_gpus=3, mode='det',
**kwargs):
assert mode in ('det', 'rel')
train_load = cls(
dataset=train_data,
batch_size=batch_size * num_gpus,
shuffle=True,
num_workers=num_workers,
collate_fn=lambda x: vg_collate(x, mode=mode, num_gpus=num_gpus, is_train=True),
drop_last=True,
# pin_memory=True,
**kwargs,
)
val_load = cls(
dataset=val_data,
batch_size=batch_size * num_gpus if mode=='det' else num_gpus,
shuffle=False,
num_workers=num_workers,
collate_fn=lambda x: vg_collate(x, mode=mode, num_gpus=num_gpus, is_train=False),
drop_last=True,
# pin_memory=True,
**kwargs,
)
return train_load, val_load
| 16,373 | 37.527059 | 129 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/dataloaders/blob.py | """
Data blob, hopefully to make collating less painful and MGPU training possible
"""
from lib.fpn.anchor_targets import anchor_target_layer
import numpy as np
import torch
from torch.autograd import Variable
class Blob(object):
def __init__(self, mode='det', is_train=False, num_gpus=1, primary_gpu=0, batch_size_per_gpu=3):
"""
Initializes an empty Blob object.
:param mode: 'det' for detection and 'rel' for det+relationship
:param is_train: True if it's training
"""
assert mode in ('det', 'rel')
assert num_gpus >= 1
self.mode = mode
self.is_train = is_train
self.num_gpus = num_gpus
self.batch_size_per_gpu = batch_size_per_gpu
self.primary_gpu = primary_gpu
self.imgs = [] # [num_images, 3, IM_SCALE, IM_SCALE] array
self.im_sizes = [] # [num_images, 4] array of (h, w, scale, num_valid_anchors)
self.all_anchor_inds = [] # [all_anchors, 2] array of (img_ind, anchor_idx). Only has valid
# boxes (meaning some are gonna get cut out)
self.all_anchors = [] # [num_im, IM_SCALE/4, IM_SCALE/4, num_anchors, 4] shapes. Anchors outside get squashed
# to 0
self.gt_boxes = [] # [num_gt, 4] boxes
self.gt_classes = [] # [num_gt,2] array of img_ind, class
self.gt_rels = [] # [num_rels, 3]. Each row is (gtbox0, gtbox1, rel).
self.gt_sents = []
self.gt_nodes = []
self.sent_lengths = []
self.train_anchor_labels = [] # [train_anchors, 5] array of (img_ind, h, w, A, labels)
self.train_anchors = [] # [train_anchors, 8] shapes with anchor, target
self.train_anchor_inds = None # This will be split into GPUs, just (img_ind, h, w, A).
self.batch_size = None
self.gt_box_chunks = None
self.anchor_chunks = None
self.train_chunks = None
self.proposal_chunks = None
self.proposals = []
@property
def is_flickr(self):
return self.mode == 'flickr'
@property
def is_rel(self):
return self.mode == 'rel'
@property
def volatile(self):
return not self.is_train
def append(self, d):
"""
Adds a single image to the blob
:param datom:
:return:
"""
i = len(self.imgs)
self.imgs.append(d['img'])
h, w, scale = d['img_size']
# all anchors
self.im_sizes.append((h, w, scale))
gt_boxes_ = d['gt_boxes'].astype(np.float32) * d['scale']
self.gt_boxes.append(gt_boxes_)
self.gt_classes.append(np.column_stack((
i * np.ones(d['gt_classes'].shape[0], dtype=np.int64),
d['gt_classes'],
)))
# Add relationship info
if self.is_rel:
self.gt_rels.append(np.column_stack((
i * np.ones(d['gt_relations'].shape[0], dtype=np.int64),
d['gt_relations'])))
# Augment with anchor targets
if self.is_train:
train_anchors_, train_anchor_inds_, train_anchor_targets_, train_anchor_labels_ = \
anchor_target_layer(gt_boxes_, (h, w))
self.train_anchors.append(np.hstack((train_anchors_, train_anchor_targets_)))
self.train_anchor_labels.append(np.column_stack((
i * np.ones(train_anchor_inds_.shape[0], dtype=np.int64),
train_anchor_inds_,
train_anchor_labels_,
)))
if 'proposals' in d:
self.proposals.append(np.column_stack((i * np.ones(d['proposals'].shape[0], dtype=np.float32),
d['scale'] * d['proposals'].astype(np.float32))))
def _chunkize(self, datom, tensor=torch.LongTensor):
"""
Turn data list into chunks, one per GPU
:param datom: List of lists of numpy arrays that will be concatenated.
:return:
"""
chunk_sizes = [0] * self.num_gpus
for i in range(self.num_gpus):
for j in range(self.batch_size_per_gpu):
chunk_sizes[i] += datom[i * self.batch_size_per_gpu + j].shape[0]
return Variable(tensor(np.concatenate(datom, 0)), volatile=self.volatile), chunk_sizes
def reduce(self):
""" Merges all the detections into flat lists + numbers of how many are in each"""
if len(self.imgs) != self.batch_size_per_gpu * self.num_gpus:
raise ValueError("Wrong batch size? imgs len {} bsize/gpu {} numgpus {}".format(
len(self.imgs), self.batch_size_per_gpu, self.num_gpus
))
self.imgs = Variable(torch.stack(self.imgs, 0), volatile=self.volatile)
self.im_sizes = np.stack(self.im_sizes).reshape(
(self.num_gpus, self.batch_size_per_gpu, 3))
if self.is_rel:
self.gt_rels, self.gt_rel_chunks = self._chunkize(self.gt_rels)
self.gt_boxes, self.gt_box_chunks = self._chunkize(self.gt_boxes, tensor=torch.FloatTensor)
self.gt_classes, _ = self._chunkize(self.gt_classes)
if self.is_train:
self.train_anchor_labels, self.train_chunks = self._chunkize(self.train_anchor_labels)
self.train_anchors, _ = self._chunkize(self.train_anchors, tensor=torch.FloatTensor)
self.train_anchor_inds = self.train_anchor_labels[:, :-1].contiguous()
if len(self.proposals) != 0:
self.proposals, self.proposal_chunks = self._chunkize(self.proposals, tensor=torch.FloatTensor)
def _scatter(self, x, chunk_sizes, dim=0):
""" Helper function"""
if self.num_gpus == 1:
return x.cuda(self.primary_gpu, async=True)
return torch.nn.parallel.scatter_gather.Scatter.apply(
list(range(self.num_gpus)), chunk_sizes, dim, x)
def scatter(self):
""" Assigns everything to the GPUs"""
self.imgs = self._scatter(self.imgs, [self.batch_size_per_gpu] * self.num_gpus)
self.gt_classes_primary = self.gt_classes.cuda(self.primary_gpu, async=True)
self.gt_boxes_primary = self.gt_boxes.cuda(self.primary_gpu, async=True)
# Predcls might need these
self.gt_classes = self._scatter(self.gt_classes, self.gt_box_chunks)
self.gt_boxes = self._scatter(self.gt_boxes, self.gt_box_chunks)
if self.is_train:
self.train_anchor_inds = self._scatter(self.train_anchor_inds,
self.train_chunks)
self.train_anchor_labels = self.train_anchor_labels.cuda(self.primary_gpu, async=True)
self.train_anchors = self.train_anchors.cuda(self.primary_gpu, async=True)
if self.is_rel:
self.gt_rels = self._scatter(self.gt_rels, self.gt_rel_chunks)
else:
if self.is_rel:
self.gt_rels = self.gt_rels.cuda(self.primary_gpu, async=True)
if self.proposal_chunks is not None:
self.proposals = self._scatter(self.proposals, self.proposal_chunks)
def __getitem__(self, index):
"""
Returns a tuple containing data
:param index: Which GPU we're on, or 0 if no GPUs
:return: If training:
(image, im_size, img_start_ind, anchor_inds, anchors, gt_boxes, gt_classes,
train_anchor_inds)
test:
(image, im_size, img_start_ind, anchor_inds, anchors)
"""
if index not in list(range(self.num_gpus)):
raise ValueError("Out of bounds with index {} and {} gpus".format(index, self.num_gpus))
if self.is_rel:
rels = self.gt_rels
if index > 0 or self.num_gpus != 1:
rels_i = rels[index] if self.is_rel else None
elif self.is_flickr:
rels = (self.gt_sents, self.gt_nodes)
if index > 0 or self.num_gpus != 1:
rels_i = (self.gt_sents[index], self.gt_nodes[index])
else:
rels = None
rels_i = None
if self.proposal_chunks is None:
proposals = None
else:
proposals = self.proposals
if index == 0 and self.num_gpus == 1:
image_offset = 0
if self.is_train:
return (self.imgs, self.im_sizes[0], image_offset,
self.gt_boxes, self.gt_classes, rels, proposals, self.train_anchor_inds)
return self.imgs, self.im_sizes[0], image_offset, self.gt_boxes, self.gt_classes, rels, proposals
# Otherwise proposals is None
assert proposals is None
image_offset = self.batch_size_per_gpu * index
# TODO: Return a namedtuple
if self.is_train:
return (
self.imgs[index], self.im_sizes[index], image_offset,
self.gt_boxes[index], self.gt_classes[index], rels_i, None, self.train_anchor_inds[index])
return (self.imgs[index], self.im_sizes[index], image_offset,
self.gt_boxes[index], self.gt_classes[index], rels_i, None)
| 9,073 | 38.281385 | 118 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/dataloaders/mscoco.py | from config import COCO_PATH, IM_SCALE, BOX_SCALE
import os
from torch.utils.data import Dataset
from pycocotools.coco import COCO
from PIL import Image
from lib.fpn.anchor_targets import anchor_target_layer
from torchvision.transforms import Resize, Compose, ToTensor, Normalize
from dataloaders.image_transforms import SquarePad, Grayscale, Brightness, Sharpness, Contrast, RandomOrder, Hue, random_crop
import numpy as np
from dataloaders.blob import Blob
import torch
class CocoDetection(Dataset):
"""
Adapted from the torchvision code
"""
def __init__(self, mode):
"""
:param mode: train2014 or val2014
"""
self.mode = mode
self.root = os.path.join(COCO_PATH, mode)
self.ann_file = os.path.join(COCO_PATH, 'annotations', 'instances_{}.json'.format(mode))
self.coco = COCO(self.ann_file)
self.ids = [k for k in self.coco.imgs.keys() if len(self.coco.imgToAnns[k]) > 0]
tform = []
if self.is_train:
tform.append(RandomOrder([
Grayscale(),
Brightness(),
Contrast(),
Sharpness(),
Hue(),
]))
tform += [
SquarePad(),
Resize(IM_SCALE),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
self.transform_pipeline = Compose(tform)
self.ind_to_classes = ['__background__'] + [v['name'] for k, v in self.coco.cats.items()]
# COCO inds are weird (84 inds in total but a bunch of numbers are skipped)
self.id_to_ind = {coco_id:(ind+1) for ind, coco_id in enumerate(self.coco.cats.keys())}
self.id_to_ind[0] = 0
self.ind_to_id = {x:y for y,x in self.id_to_ind.items()}
@property
def is_train(self):
return self.mode.startswith('train')
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns: entry dict
"""
img_id = self.ids[index]
path = self.coco.loadImgs(img_id)[0]['file_name']
image_unpadded = Image.open(os.path.join(self.root, path)).convert('RGB')
ann_ids = self.coco.getAnnIds(imgIds=img_id)
anns = self.coco.loadAnns(ann_ids)
gt_classes = np.array([self.id_to_ind[x['category_id']] for x in anns], dtype=np.int64)
if np.any(gt_classes >= len(self.ind_to_classes)):
raise ValueError("OH NO {}".format(index))
if len(anns) == 0:
raise ValueError("Annotations should not be empty")
# gt_boxes = np.array((0, 4), dtype=np.float32)
# else:
gt_boxes = np.array([x['bbox'] for x in anns], dtype=np.float32)
if np.any(gt_boxes[:, [0,1]] < 0):
raise ValueError("GT boxes empty columns")
if np.any(gt_boxes[:, [2,3]] < 0):
raise ValueError("GT boxes empty h/w")
gt_boxes[:, [2, 3]] += gt_boxes[:, [0, 1]]
# Rescale so that the boxes are at BOX_SCALE
if self.is_train:
image_unpadded, gt_boxes = random_crop(image_unpadded,
gt_boxes * BOX_SCALE / max(image_unpadded.size),
BOX_SCALE,
round_boxes=False,
)
else:
# Seems a bit silly because we won't be using GT boxes then but whatever
gt_boxes = gt_boxes * BOX_SCALE / max(image_unpadded.size)
w, h = image_unpadded.size
box_scale_factor = BOX_SCALE / max(w, h)
# Optionally flip the image if we're doing training
flipped = self.is_train and np.random.random() > 0.5
if flipped:
scaled_w = int(box_scale_factor * float(w))
image_unpadded = image_unpadded.transpose(Image.FLIP_LEFT_RIGHT)
gt_boxes[:, [0, 2]] = scaled_w - gt_boxes[:, [2, 0]]
img_scale_factor = IM_SCALE / max(w, h)
if h > w:
im_size = (IM_SCALE, int(w*img_scale_factor), img_scale_factor)
elif h < w:
im_size = (int(h*img_scale_factor), IM_SCALE, img_scale_factor)
else:
im_size = (IM_SCALE, IM_SCALE, img_scale_factor)
entry = {
'img': self.transform_pipeline(image_unpadded),
'img_size': im_size,
'gt_boxes': gt_boxes,
'gt_classes': gt_classes,
'scale': IM_SCALE / BOX_SCALE,
'index': index,
'image_id': img_id,
'flipped': flipped,
'fn': path,
}
return entry
@classmethod
def splits(cls, *args, **kwargs):
""" Helper method to generate splits of the example_dataset"""
train = cls('train2014', *args, **kwargs)
val = cls('val2014', *args, **kwargs)
return train, val
def __len__(self):
return len(self.ids)
def coco_collate(data, num_gpus=3, is_train=False):
blob = Blob(mode='det', is_train=is_train, num_gpus=num_gpus,
batch_size_per_gpu=len(data) // num_gpus)
for d in data:
blob.append(d)
blob.reduce()
return blob
class CocoDataLoader(torch.utils.data.DataLoader):
"""
Iterates through the data, filtering out None,
but also loads everything as a (cuda) variable
"""
# def __iter__(self):
# for x in super(CocoDataLoader, self).__iter__():
# if isinstance(x, tuple) or isinstance(x, list):
# yield tuple(y.cuda(async=True) if hasattr(y, 'cuda') else y for y in x)
# else:
# yield x.cuda(async=True)
@classmethod
def splits(cls, train_data, val_data, batch_size=3, num_workers=1, num_gpus=3, **kwargs):
train_load = cls(
dataset=train_data,
batch_size=batch_size*num_gpus,
shuffle=True,
num_workers=num_workers,
collate_fn=lambda x: coco_collate(x, num_gpus=num_gpus, is_train=True),
drop_last=True,
# pin_memory=True,
**kwargs,
)
val_load = cls(
dataset=val_data,
batch_size=batch_size*num_gpus,
shuffle=False,
num_workers=num_workers,
collate_fn=lambda x: coco_collate(x, num_gpus=num_gpus, is_train=False),
drop_last=True,
# pin_memory=True,
**kwargs,
)
return train_load, val_load
if __name__ == '__main__':
train, val = CocoDetection.splits()
gtbox = train[0]['gt_boxes']
img_size = train[0]['img_size']
anchor_strides, labels, bbox_targets = anchor_target_layer(gtbox, img_size)
| 6,783 | 34.518325 | 125 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/rel_model_stanford.py | """
Let's get the relationships yo
"""
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torch.nn import functional as F
from lib.surgery import filter_dets
from lib.fpn.proposal_assignments.rel_assignments import rel_assignments
from lib.pytorch_misc import arange
from lib.object_detector import filter_det
from lib.rel_model import RelModel
MODES = ('sgdet', 'sgcls', 'predcls')
SIZE=512
class RelModelStanford(RelModel):
"""
RELATIONSHIPS
"""
def __init__(self, classes, rel_classes, mode='sgdet', num_gpus=1, require_overlap_det=True,
use_resnet=False, use_proposals=False, **kwargs):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param num_gpus: how many GPUS 2 use
"""
super(RelModelStanford, self).__init__(classes, rel_classes, mode=mode, num_gpus=num_gpus,
require_overlap_det=require_overlap_det,
use_resnet=use_resnet,
nl_obj=0, nl_edge=0, use_proposals=use_proposals, thresh=0.01,
pooling_dim=4096)
del self.context
del self.post_lstm
del self.post_emb
self.rel_fc = nn.Linear(SIZE, self.num_rels)
self.obj_fc = nn.Linear(SIZE, self.num_classes)
self.obj_unary = nn.Linear(self.obj_dim, SIZE)
self.edge_unary = nn.Linear(4096, SIZE)
self.edge_gru = nn.GRUCell(input_size=SIZE, hidden_size=SIZE)
self.node_gru = nn.GRUCell(input_size=SIZE, hidden_size=SIZE)
self.n_iter = 3
self.sub_vert_w_fc = nn.Sequential(nn.Linear(SIZE*2, 1), nn.Sigmoid())
self.obj_vert_w_fc = nn.Sequential(nn.Linear(SIZE*2, 1), nn.Sigmoid())
self.out_edge_w_fc = nn.Sequential(nn.Linear(SIZE*2, 1), nn.Sigmoid())
self.in_edge_w_fc = nn.Sequential(nn.Linear(SIZE*2, 1), nn.Sigmoid())
def message_pass(self, rel_rep, obj_rep, rel_inds):
"""
:param rel_rep: [num_rel, fc]
:param obj_rep: [num_obj, fc]
:param rel_inds: [num_rel, 2] of the valid relationships
:return: object prediction [num_obj, 151], bbox_prediction [num_obj, 151*4]
and rel prediction [num_rel, 51]
"""
# [num_obj, num_rel] with binary!
numer = torch.arange(0, rel_inds.size(0)).long().cuda(rel_inds.get_device())
objs_to_outrels = rel_rep.data.new(obj_rep.size(0), rel_rep.size(0)).zero_()
objs_to_outrels.view(-1)[rel_inds[:, 0] * rel_rep.size(0) + numer] = 1
objs_to_outrels = Variable(objs_to_outrels)
objs_to_inrels = rel_rep.data.new(obj_rep.size(0), rel_rep.size(0)).zero_()
objs_to_inrels.view(-1)[rel_inds[:, 1] * rel_rep.size(0) + numer] = 1
objs_to_inrels = Variable(objs_to_inrels)
hx_rel = Variable(rel_rep.data.new(rel_rep.size(0), SIZE).zero_(), requires_grad=False)
hx_obj = Variable(obj_rep.data.new(obj_rep.size(0), SIZE).zero_(), requires_grad=False)
vert_factor = [self.node_gru(obj_rep, hx_obj)]
edge_factor = [self.edge_gru(rel_rep, hx_rel)]
for i in range(3):
# compute edge context
sub_vert = vert_factor[i][rel_inds[:, 0]]
obj_vert = vert_factor[i][rel_inds[:, 1]]
weighted_sub = self.sub_vert_w_fc(
torch.cat((sub_vert, edge_factor[i]), 1)) * sub_vert
weighted_obj = self.obj_vert_w_fc(
torch.cat((obj_vert, edge_factor[i]), 1)) * obj_vert
edge_factor.append(self.edge_gru(weighted_sub + weighted_obj, edge_factor[i]))
# Compute vertex context
pre_out = self.out_edge_w_fc(torch.cat((sub_vert, edge_factor[i]), 1)) * \
edge_factor[i]
pre_in = self.in_edge_w_fc(torch.cat((obj_vert, edge_factor[i]), 1)) * edge_factor[
i]
vert_ctx = objs_to_outrels @ pre_out + objs_to_inrels @ pre_in
vert_factor.append(self.node_gru(vert_ctx, vert_factor[i]))
# woohoo! done
return self.obj_fc(vert_factor[-1]), self.rel_fc(edge_factor[-1])
# self.box_fc(vert_factor[-1]).view(-1, self.num_classes, 4), \
# self.rel_fc(edge_factor[-1])
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None,
return_fmap=False):
"""
Forward pass for detection
:param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE]
:param im_sizes: A numpy array of (h, w, scale) for each image.
:param image_offset: Offset onto what image we're on for MGPU training (if single GPU this is 0)
:param gt_boxes:
Training parameters:
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will
be used to compute the training loss. Each (img_ind, fpn_idx)
:return: If train:
scores, boxdeltas, labels, boxes, boxtargets, rpnscores, rpnboxes, rellabels
if test:
prob dists, boxes, img inds, maxscores, classes
"""
result = self.detector(x, im_sizes, image_offset, gt_boxes, gt_classes, gt_rels, proposals,
train_anchor_inds, return_fmap=True)
if result.is_none():
return ValueError("heck")
im_inds = result.im_inds - image_offset
boxes = result.rm_box_priors
if self.training and result.rel_labels is None:
assert self.mode == 'sgdet'
result.rel_labels = rel_assignments(im_inds.data, boxes.data, result.rm_obj_labels.data,
gt_boxes.data, gt_classes.data, gt_rels.data,
image_offset, filter_non_overlap=True, num_sample_per_gt=1)
rel_inds = self.get_rel_inds(result.rel_labels, im_inds, boxes)
rois = torch.cat((im_inds[:, None].float(), boxes), 1)
visual_rep = self.visual_rep(result.fmap, rois, rel_inds[:, 1:])
result.obj_fmap = self.obj_feature_map(result.fmap.detach(), rois)
# Now do the approximation WHEREVER THERES A VALID RELATIONSHIP.
result.rm_obj_dists, result.rel_dists = self.message_pass(
F.relu(self.edge_unary(visual_rep)), self.obj_unary(result.obj_fmap), rel_inds[:, 1:])
# result.box_deltas_update = box_deltas
if self.training:
return result
# Decode here ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if self.mode == 'predcls':
# Hack to get the GT object labels
result.obj_scores = result.rm_obj_dists.data.new(gt_classes.size(0)).fill_(1)
result.obj_preds = gt_classes.data[:, 1]
elif self.mode == 'sgdet':
order, obj_scores, obj_preds= filter_det(F.softmax(result.rm_obj_dists),
result.boxes_all,
start_ind=0,
max_per_img=100,
thresh=0.00,
pre_nms_topn=6000,
post_nms_topn=300,
nms_thresh=0.3,
nms_filter_duplicates=True)
idx, perm = torch.sort(order)
result.obj_preds = rel_inds.new(result.rm_obj_dists.size(0)).fill_(1)
result.obj_scores = result.rm_obj_dists.data.new(result.rm_obj_dists.size(0)).fill_(0)
result.obj_scores[idx] = obj_scores.data[perm]
result.obj_preds[idx] = obj_preds.data[perm]
else:
scores_nz = F.softmax(result.rm_obj_dists).data
scores_nz[:, 0] = 0.0
result.obj_scores, score_ord = scores_nz[:, 1:].sort(dim=1, descending=True)
result.obj_preds = score_ord[:,0] + 1
result.obj_scores = result.obj_scores[:,0]
result.obj_preds = Variable(result.obj_preds)
result.obj_scores = Variable(result.obj_scores)
# Set result's bounding boxes to be size
# [num_boxes, topk, 4] instead of considering every single object assignment.
twod_inds = arange(result.obj_preds.data) * self.num_classes + result.obj_preds.data
if self.mode == 'sgdet':
bboxes = result.boxes_all.view(-1, 4)[twod_inds].view(result.boxes_all.size(0), 4)
else:
# Boxes will get fixed by filter_dets function.
bboxes = result.rm_box_priors
rel_rep = F.softmax(result.rel_dists)
return filter_dets(bboxes, result.obj_scores,
result.obj_preds, rel_inds[:, 1:], rel_rep)
| 9,332 | 44.305825 | 109 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/pytorch_misc.py | """
Miscellaneous functions that might be useful for pytorch
"""
import h5py
import numpy as np
import torch
from torch.autograd import Variable
import os
import dill as pkl
from itertools import tee
from torch import nn
def optimistic_restore(network, state_dict):
mismatch = False
own_state = network.state_dict()
for name, param in state_dict.items():
if name not in own_state:
print("Unexpected key {} in state_dict with size {}".format(name, param.size()))
mismatch = True
elif param.size() == own_state[name].size():
own_state[name].copy_(param)
else:
print("Network has {} with size {}, ckpt has {}".format(name,
own_state[name].size(),
param.size()))
mismatch = True
missing = set(own_state.keys()) - set(state_dict.keys())
if len(missing) > 0:
print("We couldn't find {}".format(','.join(missing)))
mismatch = True
return not mismatch
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def get_ranking(predictions, labels, num_guesses=5):
"""
Given a matrix of predictions and labels for the correct ones, get the number of guesses
required to get the prediction right per example.
:param predictions: [batch_size, range_size] predictions
:param labels: [batch_size] array of labels
:param num_guesses: Number of guesses to return
:return:
"""
assert labels.size(0) == predictions.size(0)
assert labels.dim() == 1
assert predictions.dim() == 2
values, full_guesses = predictions.topk(predictions.size(1), dim=1)
_, ranking = full_guesses.topk(full_guesses.size(1), dim=1, largest=False)
gt_ranks = torch.gather(ranking.data, 1, labels.data[:, None]).squeeze()
guesses = full_guesses[:, :num_guesses]
return gt_ranks, guesses
def cache(f):
"""
Caches a computation
"""
def cache_wrapper(fn, *args, **kwargs):
if os.path.exists(fn):
with open(fn, 'rb') as file:
data = pkl.load(file)
else:
print("file {} not found, so rebuilding".format(fn))
data = f(*args, **kwargs)
with open(fn, 'wb') as file:
pkl.dump(data, file)
return data
return cache_wrapper
class Flattener(nn.Module):
def __init__(self):
"""
Flattens last 3 dimensions to make it only batch size, -1
"""
super(Flattener, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
def to_variable(f):
"""
Decorator that pushes all the outputs to a variable
:param f:
:return:
"""
def variable_wrapper(*args, **kwargs):
rez = f(*args, **kwargs)
if isinstance(rez, tuple):
return tuple([Variable(x) for x in rez])
return Variable(rez)
return variable_wrapper
def arange(base_tensor, n=None):
new_size = base_tensor.size(0) if n is None else n
new_vec = base_tensor.new(new_size).long()
torch.arange(0, new_size, out=new_vec)
return new_vec
def to_onehot(vec, num_classes, fill=1000):
"""
Creates a [size, num_classes] torch FloatTensor where
one_hot[i, vec[i]] = fill
:param vec: 1d torch tensor
:param num_classes: int
:param fill: value that we want + and - things to be.
:return:
"""
onehot_result = vec.new(vec.size(0), num_classes).float().fill_(-fill)
arange_inds = vec.new(vec.size(0)).long()
torch.arange(0, vec.size(0), out=arange_inds)
onehot_result.view(-1)[vec + num_classes*arange_inds] = fill
return onehot_result
def save_net(fname, net):
h5f = h5py.File(fname, mode='w')
for k, v in list(net.state_dict().items()):
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
h5f = h5py.File(fname, mode='r')
for k, v in list(net.state_dict().items()):
param = torch.from_numpy(np.asarray(h5f[k]))
if v.size() != param.size():
print("On k={} desired size is {} but supplied {}".format(k, v.size(), param.size()))
else:
v.copy_(param)
def batch_index_iterator(len_l, batch_size, skip_end=True):
"""
Provides indices that iterate over a list
:param len_l: int representing size of thing that we will
iterate over
:param batch_size: size of each batch
:param skip_end: if true, don't iterate over the last batch
:return: A generator that returns (start, end) tuples
as it goes through all batches
"""
iterate_until = len_l
if skip_end:
iterate_until = (len_l // batch_size) * batch_size
for b_start in range(0, iterate_until, batch_size):
yield (b_start, min(b_start+batch_size, len_l))
def batch_map(f, a, batch_size):
"""
Maps f over the array a in chunks of batch_size.
:param f: function to be applied. Must take in a block of
(batch_size, dim_a) and map it to (batch_size, something).
:param a: Array to be applied over of shape (num_rows, dim_a).
:param batch_size: size of each array
:return: Array of size (num_rows, something).
"""
rez = []
for s, e in batch_index_iterator(a.size(0), batch_size, skip_end=False):
print("Calling on {}".format(a[s:e].size()))
rez.append(f(a[s:e]))
return torch.cat(rez)
def const_row(fill, l, volatile=False):
input_tok = Variable(torch.LongTensor([fill] * l),volatile=volatile)
if torch.cuda.is_available():
input_tok = input_tok.cuda()
return input_tok
def print_para(model):
"""
Prints parameters of a model
:param opt:
:return:
"""
st = {}
strings = []
total_params = 0
for p_name, p in model.named_parameters():
if not ('bias' in p_name.split('.')[-1] or 'bn' in p_name.split('.')[-1]):
st[p_name] = ([str(x) for x in p.size()], np.prod(p.size()), p.requires_grad)
total_params += np.prod(p.size())
for p_name, (size, prod, p_req_grad) in sorted(st.items(), key=lambda x: -x[1][1]):
strings.append("{:<50s}: {:<16s}({:8d}) ({})".format(
p_name, '[{}]'.format(','.join(size)), prod, 'grad' if p_req_grad else ' '
))
return '\n {:.1f}M total parameters \n ----- \n \n{}'.format(total_params / 1000000.0, '\n'.join(strings))
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def nonintersecting_2d_inds(x):
"""
Returns np.array([(a,b) for a in range(x) for b in range(x) if a != b]) efficiently
:param x: Size
:return: a x*(x-1) array that is [(0,1), (0,2)... (0, x-1), (1,0), (1,2), ..., (x-1, x-2)]
"""
rs = 1 - np.diag(np.ones(x, dtype=np.int32))
relations = np.column_stack(np.where(rs))
return relations
def intersect_2d(x1, x2):
"""
Given two arrays [m1, n], [m2,n], returns a [m1, m2] array where each entry is True if those
rows match.
:param x1: [m1, n] numpy array
:param x2: [m2, n] numpy array
:return: [m1, m2] bool array of the intersections
"""
if x1.shape[1] != x2.shape[1]:
raise ValueError("Input arrays must have same #columns")
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
res = (x1[..., None] == x2.T[None, ...]).all(1)
return res
def np_to_variable(x, is_cuda=True, dtype=torch.FloatTensor):
v = Variable(torch.from_numpy(x).type(dtype))
if is_cuda:
v = v.cuda()
return v
def gather_nd(x, index):
"""
:param x: n dimensional tensor [x0, x1, x2, ... x{n-1}, dim]
:param index: [num, n-1] where each row contains the indices we'll use
:return: [num, dim]
"""
nd = x.dim() - 1
assert nd > 0
assert index.dim() == 2
assert index.size(1) == nd
dim = x.size(-1)
sel_inds = index[:,nd-1].clone()
mult_factor = x.size(nd-1)
for col in range(nd-2, -1, -1): # [n-2, n-3, ..., 1, 0]
sel_inds += index[:,col] * mult_factor
mult_factor *= x.size(col)
grouped = x.view(-1, dim)[sel_inds]
return grouped
def enumerate_by_image(im_inds):
im_inds_np = im_inds.cpu().numpy()
initial_ind = int(im_inds_np[0])
s = 0
for i, val in enumerate(im_inds_np):
if val != initial_ind:
yield initial_ind, s, i
initial_ind = int(val)
s = i
yield initial_ind, s, len(im_inds_np)
# num_im = im_inds[-1] + 1
# # print("Num im is {}".format(num_im))
# for i in range(num_im):
# # print("On i={}".format(i))
# inds_i = (im_inds == i).nonzero()
# if inds_i.dim() == 0:
# continue
# inds_i = inds_i.squeeze(1)
# s = inds_i[0]
# e = inds_i[-1] + 1
# # print("On i={} we have s={} e={}".format(i, s, e))
# yield i, s, e
def diagonal_inds(tensor):
"""
Returns the indices required to go along first 2 dims of tensor in diag fashion
:param tensor: thing
:return:
"""
assert tensor.dim() >= 2
assert tensor.size(0) == tensor.size(1)
size = tensor.size(0)
arange_inds = tensor.new(size).long()
torch.arange(0, tensor.size(0), out=arange_inds)
return (size+1)*arange_inds
def enumerate_imsize(im_sizes):
s = 0
for i, (h, w, scale, num_anchors) in enumerate(im_sizes):
na = int(num_anchors)
e = s + na
yield i, s, e, h, w, scale, na
s = e
def argsort_desc(scores):
"""
Returns the indices that sort scores descending in a smart way
:param scores: Numpy array of arbitrary size
:return: an array of size [numel(scores), dim(scores)] where each row is the index you'd
need to get the score.
"""
return np.column_stack(np.unravel_index(np.argsort(-scores.ravel()), scores.shape))
def unravel_index(index, dims):
unraveled = []
index_cp = index.clone()
for d in dims[::-1]:
unraveled.append(index_cp % d)
index_cp /= d
return torch.cat([x[:,None] for x in unraveled[::-1]], 1)
def de_chunkize(tensor, chunks):
s = 0
for c in chunks:
yield tensor[s:(s+c)]
s = s+c
def random_choose(tensor, num):
"randomly choose indices"
num_choose = min(tensor.size(0), num)
if num_choose == tensor.size(0):
return tensor
# Gotta do this in numpy because of https://github.com/pytorch/pytorch/issues/1868
rand_idx = np.random.choice(tensor.size(0), size=num, replace=False)
rand_idx = torch.LongTensor(rand_idx).cuda(tensor.get_device())
chosen = tensor[rand_idx].contiguous()
# rand_values = tensor.new(tensor.size(0)).float().normal_()
# _, idx = torch.sort(rand_values)
#
# chosen = tensor[idx[:num]].contiguous()
return chosen
def transpose_packed_sequence_inds(lengths):
"""
Goes from a TxB packed sequence to a BxT or vice versa. Assumes that nothing is a variable
:param ps: PackedSequence
:return:
"""
new_inds = []
new_lens = []
cum_add = np.cumsum([0] + lengths)
max_len = lengths[0]
length_pointer = len(lengths) - 1
for i in range(max_len):
while length_pointer > 0 and lengths[length_pointer] <= i:
length_pointer -= 1
new_inds.append(cum_add[:(length_pointer+1)].copy())
cum_add[:(length_pointer+1)] += 1
new_lens.append(length_pointer+1)
new_inds = np.concatenate(new_inds, 0)
return new_inds, new_lens
def right_shift_packed_sequence_inds(lengths):
"""
:param lengths: e.g. [2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1]
:return: perm indices for the old stuff (TxB) to shift it right 1 slot so as to accomodate
BOS toks
visual example: of lengths = [4,3,1,1]
before:
a (0) b (4) c (7) d (8)
a (1) b (5)
a (2) b (6)
a (3)
after:
bos a (0) b (4) c (7)
bos a (1)
bos a (2)
bos
"""
cur_ind = 0
inds = []
for (l1, l2) in zip(lengths[:-1], lengths[1:]):
for i in range(l2):
inds.append(cur_ind + i)
cur_ind += l1
return inds
def clip_grad_norm(named_parameters, max_norm, clip=False, verbose=False):
r"""Clips gradient norm of an iterable of parameters.
The norm is computed over all gradients together, as if they were
concatenated into a single vector. Gradients are modified in-place.
Arguments:
parameters (Iterable[Variable]): an iterable of Variables that will have
gradients normalized
max_norm (float or int): max norm of the gradients
Returns:
Total norm of the parameters (viewed as a single vector).
"""
max_norm = float(max_norm)
total_norm = 0
param_to_norm = {}
param_to_shape = {}
for n, p in named_parameters:
if p.grad is not None:
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
param_to_norm[n] = param_norm
param_to_shape[n] = p.size()
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1 and clip:
for _, p in named_parameters:
if p.grad is not None:
p.grad.data.mul_(clip_coef)
if verbose:
print('---Total norm {:.3f} clip coef {:.3f}-----------------'.format(total_norm, clip_coef))
for name, norm in sorted(param_to_norm.items(), key=lambda x: -x[1]):
print("{:<50s}: {:.3f}, ({})".format(name, norm, param_to_shape[name]))
print('-------------------------------', flush=True)
return total_norm
def update_lr(optimizer, lr=1e-4):
print("------ Learning rate -> {}".format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr | 14,457 | 30.430435 | 110 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/get_dataset_counts.py | """
Get counts of all of the examples in the example_dataset. Used for creating the baseline
dictionary model
"""
import numpy as np
from dataloaders.visual_genome import VG
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from lib.pytorch_misc import nonintersecting_2d_inds
def get_counts(train_data=VG(mode='train', filter_duplicate_rels=False, num_val_im=5000), must_overlap=True):
"""
Get counts of all of the relations. Used for modeling directly P(rel | o1, o2)
:param train_data:
:param must_overlap:
:return:
"""
fg_matrix = np.zeros((
train_data.num_classes,
train_data.num_classes,
train_data.num_predicates,
), dtype=np.int64)
bg_matrix = np.zeros((
train_data.num_classes,
train_data.num_classes,
), dtype=np.int64)
for ex_ind in range(len(train_data)):
gt_classes = train_data.gt_classes[ex_ind].copy()
gt_relations = train_data.relationships[ex_ind].copy()
gt_boxes = train_data.gt_boxes[ex_ind].copy()
# For the foreground, we'll just look at everything
o1o2 = gt_classes[gt_relations[:, :2]]
for (o1, o2), gtr in zip(o1o2, gt_relations[:,2]):
fg_matrix[o1, o2, gtr] += 1
# For the background, get all of the things that overlap.
o1o2_total = gt_classes[np.array(
box_filter(gt_boxes, must_overlap=must_overlap), dtype=int)]
for (o1, o2) in o1o2_total:
bg_matrix[o1, o2] += 1
return fg_matrix, bg_matrix
def box_filter(boxes, must_overlap=False):
""" Only include boxes that overlap as possible relations.
If no overlapping boxes, use all of them."""
n_cands = boxes.shape[0]
overlaps = bbox_overlaps(boxes.astype(np.float), boxes.astype(np.float)) > 0
np.fill_diagonal(overlaps, 0)
all_possib = np.ones_like(overlaps, dtype=np.bool)
np.fill_diagonal(all_possib, 0)
if must_overlap:
possible_boxes = np.column_stack(np.where(overlaps))
if possible_boxes.size == 0:
possible_boxes = np.column_stack(np.where(all_possib))
else:
possible_boxes = np.column_stack(np.where(all_possib))
return possible_boxes
if __name__ == '__main__':
fg, bg = get_counts(must_overlap=False)
| 2,293 | 31.309859 | 109 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/resnet.py | import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from torchvision.models.resnet import model_urls, conv3x3, BasicBlock
from torchvision.models.vgg import vgg16
from config import BATCHNORM_MOMENTUM
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, relu_end=True):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BATCHNORM_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BATCHNORM_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, momentum=BATCHNORM_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.relu_end = relu_end
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
if self.relu_end:
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BATCHNORM_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1) # HACK
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BATCHNORM_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet101(pretrained=False, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet_l123():
model = resnet101(pretrained=True)
del model.layer4
del model.avgpool
del model.fc
return model
def resnet_l4(relu_end=True):
model = resnet101(pretrained=True)
l4 = model.layer4
if not relu_end:
l4[-1].relu_end = False
l4[0].conv2.stride = (1, 1)
l4[0].downsample[0].stride = (1, 1)
return l4
def vgg_fc(relu_end=True, linear_end=True):
model = vgg16(pretrained=True)
vfc = model.classifier
del vfc._modules['6'] # Get rid of linear layer
del vfc._modules['5'] # Get rid of linear layer
if not relu_end:
del vfc._modules['4'] # Get rid of linear layer
if not linear_end:
del vfc._modules['3']
return vfc
| 4,805 | 31.693878 | 86 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/rel_model.py | """
Let's get the relationships yo
"""
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torch.nn import functional as F
from torch.nn.utils.rnn import PackedSequence
from lib.resnet import resnet_l4
from config import BATCHNORM_MOMENTUM
from lib.fpn.nms.functions.nms import apply_nms
# from lib.decoder_rnn import DecoderRNN, lstm_factory, LockedDropout
from lib.lstm.decoder_rnn import DecoderRNN
from lib.lstm.highway_lstm_cuda.alternating_highway_lstm import AlternatingHighwayLSTM
from lib.fpn.box_utils import bbox_overlaps, center_size
from lib.get_union_boxes import UnionBoxesAndFeats
from lib.fpn.proposal_assignments.rel_assignments import rel_assignments
from lib.object_detector import ObjectDetector, gather_res, load_vgg
from lib.pytorch_misc import transpose_packed_sequence_inds, to_onehot, arange, enumerate_by_image, diagonal_inds, Flattener
from lib.sparse_targets import FrequencyBias
from lib.surgery import filter_dets
from lib.word_vectors import obj_edge_vectors
from lib.fpn.roi_align.functions.roi_align import RoIAlignFunction
import math
def _sort_by_score(im_inds, scores):
"""
We'll sort everything scorewise from Hi->low, BUT we need to keep images together
and sort LSTM from l
:param im_inds: Which im we're on
:param scores: Goodness ranging between [0, 1]. Higher numbers come FIRST
:return: Permutation to put everything in the right order for the LSTM
Inverse permutation
Lengths for the TxB packed sequence.
"""
num_im = im_inds[-1] + 1
rois_per_image = scores.new(num_im)
lengths = []
for i, s, e in enumerate_by_image(im_inds):
rois_per_image[i] = 2 * (s - e) * num_im + i
lengths.append(e - s)
lengths = sorted(lengths, reverse=True)
inds, ls_transposed = transpose_packed_sequence_inds(lengths) # move it to TxB form
inds = torch.LongTensor(inds).cuda(im_inds.get_device())
# ~~~~~~~~~~~~~~~~
# HACKY CODE ALERT!!!
# we're sorting by confidence which is in the range (0,1), but more importantly by longest
# img....
# ~~~~~~~~~~~~~~~~
roi_order = scores - 2 * rois_per_image[im_inds]
_, perm = torch.sort(roi_order, 0, descending=True)
perm = perm[inds]
_, inv_perm = torch.sort(perm)
return perm, inv_perm, ls_transposed
MODES = ('sgdet', 'sgcls', 'predcls')
class LinearizedContext(nn.Module):
"""
Module for computing the object contexts and edge contexts
"""
def __init__(self, classes, rel_classes, mode='sgdet',
embed_dim=200, hidden_dim=256, obj_dim=2048,
nl_obj=2, nl_edge=2, dropout_rate=0.2, order='confidence',
pass_in_obj_feats_to_decoder=True,
pass_in_obj_feats_to_edge=True):
super(LinearizedContext, self).__init__()
self.classes = classes
self.rel_classes = rel_classes
assert mode in MODES
self.mode = mode
self.nl_obj = nl_obj
self.nl_edge = nl_edge
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.obj_dim = obj_dim
self.dropout_rate = dropout_rate
self.pass_in_obj_feats_to_decoder = pass_in_obj_feats_to_decoder
self.pass_in_obj_feats_to_edge = pass_in_obj_feats_to_edge
assert order in ('size', 'confidence', 'random', 'leftright')
self.order = order
# EMBEDDINGS
embed_vecs = obj_edge_vectors(self.classes, wv_dim=self.embed_dim)
self.obj_embed = nn.Embedding(self.num_classes, self.embed_dim)
self.obj_embed.weight.data = embed_vecs.clone()
self.obj_embed2 = nn.Embedding(self.num_classes, self.embed_dim)
self.obj_embed2.weight.data = embed_vecs.clone()
# This probably doesn't help it much
self.pos_embed = nn.Sequential(*[
nn.BatchNorm1d(4, momentum=BATCHNORM_MOMENTUM / 10.0),
nn.Linear(4, 128),
nn.ReLU(inplace=True),
nn.Dropout(0.1),
])
if self.nl_obj > 0:
self.obj_ctx_rnn = AlternatingHighwayLSTM(
input_size=self.obj_dim+self.embed_dim+128,
hidden_size=self.hidden_dim,
num_layers=self.nl_obj,
recurrent_dropout_probability=dropout_rate)
decoder_inputs_dim = self.hidden_dim
if self.pass_in_obj_feats_to_decoder:
decoder_inputs_dim += self.obj_dim + self.embed_dim
self.decoder_rnn = DecoderRNN(self.classes, embed_dim=self.embed_dim,
inputs_dim=decoder_inputs_dim,
hidden_dim=self.hidden_dim,
recurrent_dropout_probability=dropout_rate)
else:
self.decoder_lin = nn.Linear(self.obj_dim + self.embed_dim + 128, self.num_classes)
if self.nl_edge > 0:
input_dim = self.embed_dim
if self.nl_obj > 0:
input_dim += self.hidden_dim
if self.pass_in_obj_feats_to_edge:
input_dim += self.obj_dim
self.edge_ctx_rnn = AlternatingHighwayLSTM(input_size=input_dim,
hidden_size=self.hidden_dim,
num_layers=self.nl_edge,
recurrent_dropout_probability=dropout_rate)
def sort_rois(self, batch_idx, confidence, box_priors):
"""
:param batch_idx: tensor with what index we're on
:param confidence: tensor with confidences between [0,1)
:param boxes: tensor with (x1, y1, x2, y2)
:return: Permutation, inverse permutation, and the lengths transposed (same as _sort_by_score)
"""
cxcywh = center_size(box_priors)
if self.order == 'size':
sizes = cxcywh[:,2] * cxcywh[:, 3]
# sizes = (box_priors[:, 2] - box_priors[:, 0] + 1) * (box_priors[:, 3] - box_priors[:, 1] + 1)
assert sizes.min() > 0.0
scores = sizes / (sizes.max() + 1)
elif self.order == 'confidence':
scores = confidence
elif self.order == 'random':
scores = torch.FloatTensor(np.random.rand(batch_idx.size(0))).cuda(batch_idx.get_device())
elif self.order == 'leftright':
centers = cxcywh[:,0]
scores = centers / (centers.max() + 1)
else:
raise ValueError("invalid mode {}".format(self.order))
return _sort_by_score(batch_idx, scores)
@property
def num_classes(self):
return len(self.classes)
@property
def num_rels(self):
return len(self.rel_classes)
def edge_ctx(self, obj_feats, obj_dists, im_inds, obj_preds, box_priors=None):
"""
Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:param obj_dists: [num_obj, #classes]
:param im_inds: [num_obj] the indices of the images
:return: edge_ctx: [num_obj, #feats] For later!
"""
# Only use hard embeddings
obj_embed2 = self.obj_embed2(obj_preds)
# obj_embed3 = F.softmax(obj_dists, dim=1) @ self.obj_embed3.weight
inp_feats = torch.cat((obj_embed2, obj_feats), 1)
# Sort by the confidence of the maximum detection.
confidence = F.softmax(obj_dists, dim=1).data.view(-1)[
obj_preds.data + arange(obj_preds.data) * self.num_classes]
perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors)
edge_input_packed = PackedSequence(inp_feats[perm], ls_transposed)
edge_reps = self.edge_ctx_rnn(edge_input_packed)[0][0]
# now we're good! unperm
edge_ctx = edge_reps[inv_perm]
return edge_ctx
def obj_ctx(self, obj_feats, obj_dists, im_inds, obj_labels=None, box_priors=None, boxes_per_cls=None):
"""
Object context and object classification.
:param obj_feats: [num_obj, img_dim + object embedding0 dim]
:param obj_dists: [num_obj, #classes]
:param im_inds: [num_obj] the indices of the images
:param obj_labels: [num_obj] the GT labels of the image
:param boxes: [num_obj, 4] boxes. We'll use this for NMS
:return: obj_dists: [num_obj, #classes] new probability distribution.
obj_preds: argmax of that distribution.
obj_final_ctx: [num_obj, #feats] For later!
"""
# Sort by the confidence of the maximum detection.
confidence = F.softmax(obj_dists, dim=1).data[:, 1:].max(1)[0]
perm, inv_perm, ls_transposed = self.sort_rois(im_inds.data, confidence, box_priors)
# Pass object features, sorted by score, into the encoder LSTM
obj_inp_rep = obj_feats[perm].contiguous()
input_packed = PackedSequence(obj_inp_rep, ls_transposed)
encoder_rep = self.obj_ctx_rnn(input_packed)[0][0]
# Decode in order
if self.mode != 'predcls':
decoder_inp = PackedSequence(torch.cat((obj_inp_rep, encoder_rep), 1) if self.pass_in_obj_feats_to_decoder else encoder_rep,
ls_transposed)
obj_dists, obj_preds = self.decoder_rnn(
decoder_inp, #obj_dists[perm],
labels=obj_labels[perm] if obj_labels is not None else None,
boxes_for_nms=boxes_per_cls[perm] if boxes_per_cls is not None else None,
)
obj_preds = obj_preds[inv_perm]
obj_dists = obj_dists[inv_perm]
else:
assert obj_labels is not None
obj_preds = obj_labels
obj_dists = Variable(to_onehot(obj_preds.data, self.num_classes))
encoder_rep = encoder_rep[inv_perm]
return obj_dists, obj_preds, encoder_rep
def forward(self, obj_fmaps, obj_logits, im_inds, obj_labels=None, box_priors=None, boxes_per_cls=None):
"""
Forward pass through the object and edge context
:param obj_priors:
:param obj_fmaps:
:param im_inds:
:param obj_labels:
:param boxes:
:return:
"""
obj_embed = F.softmax(obj_logits, dim=1) @ self.obj_embed.weight
pos_embed = self.pos_embed(Variable(center_size(box_priors)))
obj_pre_rep = torch.cat((obj_fmaps, obj_embed, pos_embed), 1)
if self.nl_obj > 0:
obj_dists2, obj_preds, obj_ctx = self.obj_ctx(
obj_pre_rep,
obj_logits,
im_inds,
obj_labels,
box_priors,
boxes_per_cls,
)
else:
# UNSURE WHAT TO DO HERE
if self.mode == 'predcls':
obj_dists2 = Variable(to_onehot(obj_labels.data, self.num_classes))
else:
obj_dists2 = self.decoder_lin(obj_pre_rep)
if self.mode == 'sgdet' and not self.training:
# NMS here for baseline
probs = F.softmax(obj_dists2, 1)
nms_mask = obj_dists2.data.clone()
nms_mask.zero_()
for c_i in range(1, obj_dists2.size(1)):
scores_ci = probs.data[:, c_i]
boxes_ci = boxes_per_cls.data[:, c_i]
keep = apply_nms(scores_ci, boxes_ci,
pre_nms_topn=scores_ci.size(0), post_nms_topn=scores_ci.size(0),
nms_thresh=0.3)
nms_mask[:, c_i][keep] = 1
obj_preds = Variable(nms_mask * probs.data, volatile=True)[:,1:].max(1)[1] + 1
else:
obj_preds = obj_labels if obj_labels is not None else obj_dists2[:,1:].max(1)[1] + 1
obj_ctx = obj_pre_rep
edge_ctx = None
if self.nl_edge > 0:
edge_ctx = self.edge_ctx(
torch.cat((obj_fmaps, obj_ctx), 1) if self.pass_in_obj_feats_to_edge else obj_ctx,
obj_dists=obj_dists2.detach(), # Was previously obj_logits.
im_inds=im_inds,
obj_preds=obj_preds,
box_priors=box_priors,
)
return obj_dists2, obj_preds, edge_ctx
class RelModel(nn.Module):
"""
RELATIONSHIPS
"""
def __init__(self, classes, rel_classes, mode='sgdet', num_gpus=1, use_vision=True, require_overlap_det=True,
embed_dim=200, hidden_dim=256, pooling_dim=2048,
nl_obj=1, nl_edge=2, use_resnet=False, order='confidence', thresh=0.01,
use_proposals=False, pass_in_obj_feats_to_decoder=True,
pass_in_obj_feats_to_edge=True, rec_dropout=0.0, use_bias=True, use_tanh=True,
limit_vision=True):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param mode: (sgcls, predcls, or sgdet)
:param num_gpus: how many GPUS 2 use
:param use_vision: Whether to use vision in the final product
:param require_overlap_det: Whether two objects must intersect
:param embed_dim: Dimension for all embeddings
:param hidden_dim: LSTM hidden size
:param obj_dim:
"""
super(RelModel, self).__init__()
self.classes = classes
self.rel_classes = rel_classes
self.num_gpus = num_gpus
assert mode in MODES
self.mode = mode
self.pooling_size = 7
self.embed_dim = embed_dim
self.hidden_dim = hidden_dim
self.obj_dim = 2048 if use_resnet else 4096
self.pooling_dim = pooling_dim
self.use_bias = use_bias
self.use_vision = use_vision
self.use_tanh = use_tanh
self.limit_vision=limit_vision
self.require_overlap = require_overlap_det and self.mode == 'sgdet'
self.detector = ObjectDetector(
classes=classes,
mode=('proposals' if use_proposals else 'refinerels') if mode == 'sgdet' else 'gtbox',
use_resnet=use_resnet,
thresh=thresh,
max_per_img=64,
)
self.context = LinearizedContext(self.classes, self.rel_classes, mode=self.mode,
embed_dim=self.embed_dim, hidden_dim=self.hidden_dim,
obj_dim=self.obj_dim,
nl_obj=nl_obj, nl_edge=nl_edge, dropout_rate=rec_dropout,
order=order,
pass_in_obj_feats_to_decoder=pass_in_obj_feats_to_decoder,
pass_in_obj_feats_to_edge=pass_in_obj_feats_to_edge)
# Image Feats (You'll have to disable if you want to turn off the features from here)
self.union_boxes = UnionBoxesAndFeats(pooling_size=self.pooling_size, stride=16,
dim=1024 if use_resnet else 512)
if use_resnet:
self.roi_fmap = nn.Sequential(
resnet_l4(relu_end=False),
nn.AvgPool2d(self.pooling_size),
Flattener(),
)
else:
roi_fmap = [
Flattener(),
load_vgg(use_dropout=False, use_relu=False, use_linear=pooling_dim == 4096, pretrained=False).classifier,
]
if pooling_dim != 4096:
roi_fmap.append(nn.Linear(4096, pooling_dim))
self.roi_fmap = nn.Sequential(*roi_fmap)
self.roi_fmap_obj = load_vgg(pretrained=False).classifier
###################################
self.post_lstm = nn.Linear(self.hidden_dim, self.pooling_dim * 2)
# Initialize to sqrt(1/2n) so that the outputs all have mean 0 and variance 1.
# (Half contribution comes from LSTM, half from embedding.
# In practice the pre-lstm stuff tends to have stdev 0.1 so I multiplied this by 10.
self.post_lstm.weight.data.normal_(0, 10.0 * math.sqrt(1.0 / self.hidden_dim))
self.post_lstm.bias.data.zero_()
if nl_edge == 0:
self.post_emb = nn.Embedding(self.num_classes, self.pooling_dim*2)
self.post_emb.weight.data.normal_(0, math.sqrt(1.0))
self.rel_compress = nn.Linear(self.pooling_dim, self.num_rels, bias=True)
self.rel_compress.weight = torch.nn.init.xavier_normal(self.rel_compress.weight, gain=1.0)
if self.use_bias:
self.freq_bias = FrequencyBias()
@property
def num_classes(self):
return len(self.classes)
@property
def num_rels(self):
return len(self.rel_classes)
def visual_rep(self, features, rois, pair_inds):
"""
Classify the features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4]
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:param pair_inds inds to use when predicting
:return: score_pred, a [num_rois, num_classes] array
box_pred, a [num_rois, num_classes, 4] array
"""
assert pair_inds.size(1) == 2
uboxes = self.union_boxes(features, rois, pair_inds)
return self.roi_fmap(uboxes)
def get_rel_inds(self, rel_labels, im_inds, box_priors):
# Get the relationship candidates
if self.training:
rel_inds = rel_labels[:, :3].data.clone()
else:
rel_cands = im_inds.data[:, None] == im_inds.data[None]
rel_cands.view(-1)[diagonal_inds(rel_cands)] = 0
# Require overlap for detection
if self.require_overlap:
rel_cands = rel_cands & (bbox_overlaps(box_priors.data,
box_priors.data) > 0)
# if there are fewer then 100 things then we might as well add some?
amt_to_add = 100 - rel_cands.long().sum()
rel_cands = rel_cands.nonzero()
if rel_cands.dim() == 0:
rel_cands = im_inds.data.new(1, 2).fill_(0)
rel_inds = torch.cat((im_inds.data[rel_cands[:, 0]][:, None], rel_cands), 1)
return rel_inds
def obj_feature_map(self, features, rois):
"""
Gets the ROI features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4] (features at level p2)
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:return: [num_rois, #dim] array
"""
feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)(
features, rois)
return self.roi_fmap_obj(feature_pool.view(rois.size(0), -1))
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None,
return_fmap=False):
"""
Forward pass for detection
:param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE]
:param im_sizes: A numpy array of (h, w, scale) for each image.
:param image_offset: Offset onto what image we're on for MGPU training (if single GPU this is 0)
:param gt_boxes:
Training parameters:
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will
be used to compute the training loss. Each (img_ind, fpn_idx)
:return: If train:
scores, boxdeltas, labels, boxes, boxtargets, rpnscores, rpnboxes, rellabels
if test:
prob dists, boxes, img inds, maxscores, classes
"""
result = self.detector(x, im_sizes, image_offset, gt_boxes, gt_classes, gt_rels, proposals,
train_anchor_inds, return_fmap=True)
if result.is_none():
return ValueError("heck")
im_inds = result.im_inds - image_offset
boxes = result.rm_box_priors
if self.training and result.rel_labels is None:
assert self.mode == 'sgdet'
result.rel_labels = rel_assignments(im_inds.data, boxes.data, result.rm_obj_labels.data,
gt_boxes.data, gt_classes.data, gt_rels.data,
image_offset, filter_non_overlap=True,
num_sample_per_gt=1)
rel_inds = self.get_rel_inds(result.rel_labels, im_inds, boxes)
rois = torch.cat((im_inds[:, None].float(), boxes), 1)
result.obj_fmap = self.obj_feature_map(result.fmap.detach(), rois)
# Prevent gradients from flowing back into score_fc from elsewhere
result.rm_obj_dists, result.obj_preds, edge_ctx = self.context(
result.obj_fmap,
result.rm_obj_dists.detach(),
im_inds, result.rm_obj_labels if self.training or self.mode == 'predcls' else None,
boxes.data, result.boxes_all)
if edge_ctx is None:
edge_rep = self.post_emb(result.obj_preds)
else:
edge_rep = self.post_lstm(edge_ctx)
# Split into subject and object representations
edge_rep = edge_rep.view(edge_rep.size(0), 2, self.pooling_dim)
subj_rep = edge_rep[:, 0]
obj_rep = edge_rep[:, 1]
prod_rep = subj_rep[rel_inds[:, 1]] * obj_rep[rel_inds[:, 2]]
if self.use_vision:
vr = self.visual_rep(result.fmap.detach(), rois, rel_inds[:, 1:])
if self.limit_vision:
# exact value TBD
prod_rep = torch.cat((prod_rep[:,:2048] * vr[:,:2048], prod_rep[:,2048:]), 1)
else:
prod_rep = prod_rep * vr
if self.use_tanh:
prod_rep = F.tanh(prod_rep)
result.rel_dists = self.rel_compress(prod_rep)
if self.use_bias:
result.rel_dists = result.rel_dists + self.freq_bias.index_with_labels(torch.stack((
result.obj_preds[rel_inds[:, 1]],
result.obj_preds[rel_inds[:, 2]],
), 1))
if self.training:
return result
twod_inds = arange(result.obj_preds.data) * self.num_classes + result.obj_preds.data
result.obj_scores = F.softmax(result.rm_obj_dists, dim=1).view(-1)[twod_inds]
# Bbox regression
if self.mode == 'sgdet':
bboxes = result.boxes_all.view(-1, 4)[twod_inds].view(result.boxes_all.size(0), 4)
else:
# Boxes will get fixed by filter_dets function.
bboxes = result.rm_box_priors
rel_rep = F.softmax(result.rel_dists, dim=1)
return filter_dets(bboxes, result.obj_scores,
result.obj_preds, rel_inds[:, 1:], rel_rep)
def __getitem__(self, batch):
""" Hack to do multi-GPU training"""
batch.scatter()
if self.num_gpus == 1:
return self(*batch[0])
replicas = nn.parallel.replicate(self, devices=list(range(self.num_gpus)))
outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.num_gpus)])
if self.training:
return gather_res(outputs, 0, dim=0)
return outputs
| 23,579 | 41.032086 | 136 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/word_vectors.py | """
Adapted from PyTorch's text library.
"""
import array
import os
import zipfile
import six
import torch
from six.moves.urllib.request import urlretrieve
from tqdm import tqdm
from config import DATA_PATH
import sys
def obj_edge_vectors(names, wv_type='glove.6B', wv_dir=DATA_PATH, wv_dim=300):
wv_dict, wv_arr, wv_size = load_word_vectors(wv_dir, wv_type, wv_dim)
vectors = torch.Tensor(len(names), wv_dim)
vectors.normal_(0,1)
for i, token in enumerate(names):
wv_index = wv_dict.get(token, None)
if wv_index is not None:
vectors[i] = wv_arr[wv_index]
else:
# Try the longest word (hopefully won't be a preposition
lw_token = sorted(token.split(' '), key=lambda x: len(x), reverse=True)[0]
print("{} -> {} ".format(token, lw_token))
wv_index = wv_dict.get(lw_token, None)
if wv_index is not None:
vectors[i] = wv_arr[wv_index]
else:
print("fail on {}".format(token))
return vectors
URL = {
'glove.42B': 'http://nlp.stanford.edu/data/glove.42B.300d.zip',
'glove.840B': 'http://nlp.stanford.edu/data/glove.840B.300d.zip',
'glove.twitter.27B': 'http://nlp.stanford.edu/data/glove.twitter.27B.zip',
'glove.6B': 'http://nlp.stanford.edu/data/glove.6B.zip',
}
def load_word_vectors(root, wv_type, dim):
"""Load word vectors from a path, trying .pt, .txt, and .zip extensions."""
if isinstance(dim, int):
dim = str(dim) + 'd'
fname = os.path.join(root, wv_type + '.' + dim)
if os.path.isfile(fname + '.pt'):
fname_pt = fname + '.pt'
print('loading word vectors from', fname_pt)
try:
return torch.load(fname_pt)
except Exception as e:
print("""
Error loading the model from {}
This could be because this code was previously run with one
PyTorch version to generate cached data and is now being
run with another version.
You can try to delete the cached files on disk (this file
and others) and re-running the code
Error message:
---------
{}
""".format(fname_pt, str(e)))
sys.exit(-1)
if os.path.isfile(fname + '.txt'):
fname_txt = fname + '.txt'
cm = open(fname_txt, 'rb')
cm = [line for line in cm]
elif os.path.basename(wv_type) in URL:
url = URL[wv_type]
print('downloading word vectors from {}'.format(url))
filename = os.path.basename(fname)
if not os.path.exists(root):
os.makedirs(root)
with tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fname, _ = urlretrieve(url, fname, reporthook=reporthook(t))
with zipfile.ZipFile(fname, "r") as zf:
print('extracting word vectors into {}'.format(root))
zf.extractall(root)
if not os.path.isfile(fname + '.txt'):
raise RuntimeError('no word vectors of requested dimension found')
return load_word_vectors(root, wv_type, dim)
else:
raise RuntimeError('unable to load word vectors')
wv_tokens, wv_arr, wv_size = [], array.array('d'), None
if cm is not None:
for line in tqdm(range(len(cm)), desc="loading word vectors from {}".format(fname_txt)):
entries = cm[line].strip().split(b' ')
word, entries = entries[0], entries[1:]
if wv_size is None:
wv_size = len(entries)
try:
if isinstance(word, six.binary_type):
word = word.decode('utf-8')
except:
print('non-UTF8 token', repr(word), 'ignored')
continue
wv_arr.extend(float(x) for x in entries)
wv_tokens.append(word)
wv_dict = {word: i for i, word in enumerate(wv_tokens)}
wv_arr = torch.Tensor(wv_arr).view(-1, wv_size)
ret = (wv_dict, wv_arr, wv_size)
torch.save(ret, fname + '.pt')
return ret
def reporthook(t):
"""https://github.com/tqdm/tqdm"""
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
"""
b: int, optionala
Number of blocks just transferred [default: 1].
bsize: int, optional
Size of each block (in tqdm units) [default: 1].
tsize: int, optional
Total size (in tqdm units). If [default: None] remains unchanged.
"""
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
| 4,711 | 34.428571 | 96 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/object_detector.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
from torch.autograd import Variable
from torch.nn import functional as F
from config import ANCHOR_SIZE, ANCHOR_RATIOS, ANCHOR_SCALES
from lib.fpn.generate_anchors import generate_anchors
from lib.fpn.box_utils import bbox_preds, center_size, bbox_overlaps
from lib.fpn.nms.functions.nms import apply_nms
from lib.fpn.proposal_assignments.proposal_assignments_gtbox import proposal_assignments_gtbox
from lib.fpn.proposal_assignments.proposal_assignments_det import proposal_assignments_det
from lib.fpn.roi_align.functions.roi_align import RoIAlignFunction
from lib.pytorch_misc import enumerate_by_image, gather_nd, diagonal_inds, Flattener
from torchvision.models.vgg import vgg16
from torchvision.models.resnet import resnet101
from torch.nn.parallel._functions import Gather
class Result(object):
""" little container class for holding the detection result
od: object detector, rm: rel model"""
def __init__(self, od_obj_dists=None, rm_obj_dists=None,
obj_scores=None, obj_preds=None, obj_fmap=None,
od_box_deltas=None, rm_box_deltas=None,
od_box_targets=None, rm_box_targets=None, od_box_priors=None, rm_box_priors=None,
boxes_assigned=None, boxes_all=None, od_obj_labels=None, rm_obj_labels=None,
rpn_scores=None, rpn_box_deltas=None, rel_labels=None,
im_inds=None, fmap=None, rel_dists=None, rel_inds=None, rel_rep=None):
self.__dict__.update(locals())
del self.__dict__['self']
def is_none(self):
return all([v is None for k, v in self.__dict__.items() if k != 'self'])
def gather_res(outputs, target_device, dim=0):
"""
Assuming the signatures are the same accross results!
"""
out = outputs[0]
args = {field: Gather.apply(target_device, dim, *[getattr(o, field) for o in outputs])
for field, v in out.__dict__.items() if v is not None}
return type(out)(**args)
class ObjectDetector(nn.Module):
"""
Core model for doing object detection + getting the visual features. This could be the first step in
a pipeline. We can provide GT rois or use the RPN (which would then be classification!)
"""
MODES = ('rpntrain', 'gtbox', 'refinerels', 'proposals')
def __init__(self, classes, mode='rpntrain', num_gpus=1, nms_filter_duplicates=True,
max_per_img=64, use_resnet=False, thresh=0.05):
"""
:param classes: Object classes
:param rel_classes: Relationship classes. None if were not using rel mode
:param num_gpus: how many GPUS 2 use
"""
super(ObjectDetector, self).__init__()
if mode not in self.MODES:
raise ValueError("invalid mode")
self.mode = mode
self.classes = classes
self.num_gpus = num_gpus
self.pooling_size = 7
self.nms_filter_duplicates = nms_filter_duplicates
self.max_per_img = max_per_img
self.use_resnet = use_resnet
self.thresh = thresh
if not self.use_resnet:
vgg_model = load_vgg()
self.features = vgg_model.features
self.roi_fmap = vgg_model.classifier
rpn_input_dim = 512
output_dim = 4096
else: # Deprecated
self.features = load_resnet()
self.compress = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=1),
nn.ReLU(inplace=True),
nn.BatchNorm2d(256),
)
self.roi_fmap = nn.Sequential(
nn.Linear(256 * 7 * 7, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05),
nn.Linear(2048, 2048),
nn.SELU(inplace=True),
nn.AlphaDropout(p=0.05),
)
rpn_input_dim = 1024
output_dim = 2048
self.score_fc = nn.Linear(output_dim, self.num_classes)
self.bbox_fc = nn.Linear(output_dim, self.num_classes * 4)
self.rpn_head = RPNHead(dim=512, input_dim=rpn_input_dim)
@property
def num_classes(self):
return len(self.classes)
def feature_map(self, x):
"""
Produces feature map from the input image
:param x: [batch_size, 3, size, size] float32 padded image
:return: Feature maps at 1/16 the original size.
Each one is [batch_size, dim, IM_SIZE/k, IM_SIZE/k].
"""
if not self.use_resnet:
return self.features(x) # Uncomment this for "stanford" setting in which it's frozen: .detach()
x = self.features.conv1(x)
x = self.features.bn1(x)
x = self.features.relu(x)
x = self.features.maxpool(x)
c2 = self.features.layer1(x)
c3 = self.features.layer2(c2)
c4 = self.features.layer3(c3)
return c4
def obj_feature_map(self, features, rois):
"""
Gets the ROI features
:param features: [batch_size, dim, IM_SIZE/4, IM_SIZE/4] (features at level p2)
:param rois: [num_rois, 5] array of [img_num, x0, y0, x1, y1].
:return: [num_rois, #dim] array
"""
feature_pool = RoIAlignFunction(self.pooling_size, self.pooling_size, spatial_scale=1 / 16)(
self.compress(features) if self.use_resnet else features, rois)
return self.roi_fmap(feature_pool.view(rois.size(0), -1))
def rpn_boxes(self, fmap, im_sizes, image_offset, gt_boxes=None, gt_classes=None, gt_rels=None,
train_anchor_inds=None, proposals=None):
"""
Gets boxes from the RPN
:param fmap:
:param im_sizes:
:param image_offset:
:param gt_boxes:
:param gt_classes:
:param gt_rels:
:param train_anchor_inds:
:return:
"""
rpn_feats = self.rpn_head(fmap)
rois = self.rpn_head.roi_proposals(
rpn_feats, im_sizes, nms_thresh=0.7,
pre_nms_topn=12000 if self.training and self.mode == 'rpntrain' else 6000,
post_nms_topn=2000 if self.training and self.mode == 'rpntrain' else 1000,
)
if self.training:
if gt_boxes is None or gt_classes is None or train_anchor_inds is None:
raise ValueError(
"Must supply GT boxes, GT classes, trainanchors when in train mode")
rpn_scores, rpn_box_deltas = self.rpn_head.anchor_preds(rpn_feats, train_anchor_inds,
image_offset)
if gt_rels is not None and self.mode == 'rpntrain':
raise ValueError("Training the object detector and the relationship model with detection"
"at the same time isn't supported")
if self.mode == 'refinerels':
all_rois = Variable(rois)
# Potentially you could add in GT rois if none match
# is_match = (bbox_overlaps(rois[:,1:].contiguous(), gt_boxes.data) > 0.5).long()
# gt_not_matched = (is_match.sum(0) == 0).nonzero()
#
# if gt_not_matched.dim() > 0:
# gt_to_add = torch.cat((gt_classes[:,0,None][gt_not_matched.squeeze(1)].float(),
# gt_boxes[gt_not_matched.squeeze(1)]), 1)
#
# all_rois = torch.cat((all_rois, gt_to_add),0)
# num_gt = gt_to_add.size(0)
labels = None
bbox_targets = None
rel_labels = None
else:
all_rois, labels, bbox_targets = proposal_assignments_det(
rois, gt_boxes.data, gt_classes.data, image_offset, fg_thresh=0.5)
rel_labels = None
else:
all_rois = Variable(rois, volatile=True)
labels = None
bbox_targets = None
rel_labels = None
rpn_box_deltas = None
rpn_scores = None
return all_rois, labels, bbox_targets, rpn_scores, rpn_box_deltas, rel_labels
def gt_boxes(self, fmap, im_sizes, image_offset, gt_boxes=None, gt_classes=None, gt_rels=None,
train_anchor_inds=None, proposals=None):
"""
Gets GT boxes!
:param fmap:
:param im_sizes:
:param image_offset:
:param gt_boxes:
:param gt_classes:
:param gt_rels:
:param train_anchor_inds:
:return:
"""
assert gt_boxes is not None
im_inds = gt_classes[:, 0] - image_offset
rois = torch.cat((im_inds.float()[:, None], gt_boxes), 1)
if gt_rels is not None and self.training:
rois, labels, rel_labels = proposal_assignments_gtbox(
rois.data, gt_boxes.data, gt_classes.data, gt_rels.data, image_offset,
fg_thresh=0.5)
else:
labels = gt_classes[:, 1]
rel_labels = None
return rois, labels, None, None, None, rel_labels
def proposal_boxes(self, fmap, im_sizes, image_offset, gt_boxes=None, gt_classes=None, gt_rels=None,
train_anchor_inds=None, proposals=None):
"""
Gets boxes from the RPN
:param fmap:
:param im_sizes:
:param image_offset:
:param gt_boxes:
:param gt_classes:
:param gt_rels:
:param train_anchor_inds:
:return:
"""
assert proposals is not None
rois = filter_roi_proposals(proposals[:, 2:].data.contiguous(), proposals[:, 1].data.contiguous(),
np.array([2000] * len(im_sizes)),
nms_thresh=0.7,
pre_nms_topn=12000 if self.training and self.mode == 'rpntrain' else 6000,
post_nms_topn=2000 if self.training and self.mode == 'rpntrain' else 1000,
)
if self.training:
all_rois, labels, bbox_targets = proposal_assignments_det(
rois, gt_boxes.data, gt_classes.data, image_offset, fg_thresh=0.5)
# RETRAINING FOR DETECTION HERE.
all_rois = torch.cat((all_rois, Variable(rois)), 0)
else:
all_rois = Variable(rois, volatile=True)
labels = None
bbox_targets = None
rpn_scores = None
rpn_box_deltas = None
rel_labels = None
return all_rois, labels, bbox_targets, rpn_scores, rpn_box_deltas, rel_labels
def get_boxes(self, *args, **kwargs):
if self.mode == 'gtbox':
fn = self.gt_boxes
elif self.mode == 'proposals':
assert kwargs['proposals'] is not None
fn = self.proposal_boxes
else:
fn = self.rpn_boxes
return fn(*args, **kwargs)
def forward(self, x, im_sizes, image_offset,
gt_boxes=None, gt_classes=None, gt_rels=None, proposals=None, train_anchor_inds=None,
return_fmap=False):
"""
Forward pass for detection
:param x: Images@[batch_size, 3, IM_SIZE, IM_SIZE]
:param im_sizes: A numpy array of (h, w, scale) for each image.
:param image_offset: Offset onto what image we're on for MGPU training (if single GPU this is 0)
:param gt_boxes:
Training parameters:
:param gt_boxes: [num_gt, 4] GT boxes over the batch.
:param gt_classes: [num_gt, 2] gt boxes where each one is (img_id, class)
:param proposals: things
:param train_anchor_inds: a [num_train, 2] array of indices for the anchors that will
be used to compute the training loss. Each (img_ind, fpn_idx)
:return: If train:
"""
fmap = self.feature_map(x)
# Get boxes from RPN
rois, obj_labels, bbox_targets, rpn_scores, rpn_box_deltas, rel_labels = \
self.get_boxes(fmap, im_sizes, image_offset, gt_boxes,
gt_classes, gt_rels, train_anchor_inds, proposals=proposals)
# Now classify them
obj_fmap = self.obj_feature_map(fmap, rois)
od_obj_dists = self.score_fc(obj_fmap)
od_box_deltas = self.bbox_fc(obj_fmap).view(
-1, len(self.classes), 4) if self.mode != 'gtbox' else None
od_box_priors = rois[:, 1:]
if (not self.training and not self.mode == 'gtbox') or self.mode in ('proposals', 'refinerels'):
nms_inds, nms_scores, nms_preds, nms_boxes_assign, nms_boxes, nms_imgs = self.nms_boxes(
od_obj_dists,
rois,
od_box_deltas, im_sizes,
)
im_inds = nms_imgs + image_offset
obj_dists = od_obj_dists[nms_inds]
obj_fmap = obj_fmap[nms_inds]
box_deltas = od_box_deltas[nms_inds]
box_priors = nms_boxes[:, 0]
if self.training and not self.mode == 'gtbox':
# NOTE: If we're doing this during training, we need to assign labels here.
pred_to_gtbox = bbox_overlaps(box_priors, gt_boxes).data
pred_to_gtbox[im_inds.data[:, None] != gt_classes.data[None, :, 0]] = 0.0
max_overlaps, argmax_overlaps = pred_to_gtbox.max(1)
rm_obj_labels = gt_classes[:, 1][argmax_overlaps]
rm_obj_labels[max_overlaps < 0.5] = 0
else:
rm_obj_labels = None
else:
im_inds = rois[:, 0].long().contiguous() + image_offset
nms_scores = None
nms_preds = None
nms_boxes_assign = None
nms_boxes = None
box_priors = rois[:, 1:]
rm_obj_labels = obj_labels
box_deltas = od_box_deltas
obj_dists = od_obj_dists
return Result(
od_obj_dists=od_obj_dists,
rm_obj_dists=obj_dists,
obj_scores=nms_scores,
obj_preds=nms_preds,
obj_fmap=obj_fmap,
od_box_deltas=od_box_deltas,
rm_box_deltas=box_deltas,
od_box_targets=bbox_targets,
rm_box_targets=bbox_targets,
od_box_priors=od_box_priors,
rm_box_priors=box_priors,
boxes_assigned=nms_boxes_assign,
boxes_all=nms_boxes,
od_obj_labels=obj_labels,
rm_obj_labels=rm_obj_labels,
rpn_scores=rpn_scores,
rpn_box_deltas=rpn_box_deltas,
rel_labels=rel_labels,
im_inds=im_inds,
fmap=fmap if return_fmap else None,
)
def nms_boxes(self, obj_dists, rois, box_deltas, im_sizes):
"""
Performs NMS on the boxes
:param obj_dists: [#rois, #classes]
:param rois: [#rois, 5]
:param box_deltas: [#rois, #classes, 4]
:param im_sizes: sizes of images
:return
nms_inds [#nms]
nms_scores [#nms]
nms_labels [#nms]
nms_boxes_assign [#nms, 4]
nms_boxes [#nms, #classes, 4]. classid=0 is the box prior.
"""
# Now produce the boxes
# box deltas is (num_rois, num_classes, 4) but rois is only #(num_rois, 4)
boxes = bbox_preds(rois[:, None, 1:].expand_as(box_deltas).contiguous().view(-1, 4),
box_deltas.view(-1, 4)).view(*box_deltas.size())
# Clip the boxes and get the best N dets per image.
inds = rois[:, 0].long().contiguous()
dets = []
for i, s, e in enumerate_by_image(inds.data):
h, w = im_sizes[i, :2]
boxes[s:e, :, 0].data.clamp_(min=0, max=w - 1)
boxes[s:e, :, 1].data.clamp_(min=0, max=h - 1)
boxes[s:e, :, 2].data.clamp_(min=0, max=w - 1)
boxes[s:e, :, 3].data.clamp_(min=0, max=h - 1)
d_filtered = filter_det(
F.softmax(obj_dists[s:e], 1), boxes[s:e], start_ind=s,
nms_filter_duplicates=self.nms_filter_duplicates,
max_per_img=self.max_per_img,
thresh=self.thresh,
)
if d_filtered is not None:
dets.append(d_filtered)
if len(dets) == 0:
print("nothing was detected", flush=True)
return None
nms_inds, nms_scores, nms_labels = [torch.cat(x, 0) for x in zip(*dets)]
twod_inds = nms_inds * boxes.size(1) + nms_labels.data
nms_boxes_assign = boxes.view(-1, 4)[twod_inds]
nms_boxes = torch.cat((rois[:, 1:][nms_inds][:, None], boxes[nms_inds][:, 1:]), 1)
return nms_inds, nms_scores, nms_labels, nms_boxes_assign, nms_boxes, inds[nms_inds]
def __getitem__(self, batch):
""" Hack to do multi-GPU training"""
batch.scatter()
if self.num_gpus == 1:
return self(*batch[0])
replicas = nn.parallel.replicate(self, devices=list(range(self.num_gpus)))
outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.num_gpus)])
if any([x.is_none() for x in outputs]):
assert not self.training
return None
return gather_res(outputs, 0, dim=0)
def filter_det(scores, boxes, start_ind=0, max_per_img=100, thresh=0.001, pre_nms_topn=6000,
post_nms_topn=300, nms_thresh=0.3, nms_filter_duplicates=True):
"""
Filters the detections for a single image
:param scores: [num_rois, num_classes]
:param boxes: [num_rois, num_classes, 4]. Assumes the boxes have been clamped
:param max_per_img: Max detections per image
:param thresh: Threshold for calling it a good box
:param nms_filter_duplicates: True if we shouldn't allow for mulitple detections of the
same box (with different labels)
:return: A numpy concatenated array with up to 100 detections/img [num_im, x1, y1, x2, y2, score, cls]
"""
valid_cls = (scores[:, 1:].data.max(0)[0] > thresh).nonzero() + 1
if valid_cls.dim() == 0:
return None
nms_mask = scores.data.clone()
nms_mask.zero_()
for c_i in valid_cls.squeeze(1).cpu():
scores_ci = scores.data[:, c_i]
boxes_ci = boxes.data[:, c_i]
keep = apply_nms(scores_ci, boxes_ci,
pre_nms_topn=pre_nms_topn, post_nms_topn=post_nms_topn,
nms_thresh=nms_thresh)
nms_mask[:, c_i][keep] = 1
dists_all = Variable(nms_mask * scores.data, volatile=True)
if nms_filter_duplicates:
scores_pre, labels_pre = dists_all.data.max(1)
inds_all = scores_pre.nonzero()
assert inds_all.dim() != 0
inds_all = inds_all.squeeze(1)
labels_all = labels_pre[inds_all]
scores_all = scores_pre[inds_all]
else:
nz = nms_mask.nonzero()
assert nz.dim() != 0
inds_all = nz[:, 0]
labels_all = nz[:, 1]
scores_all = scores.data.view(-1)[inds_all * scores.data.size(1) + labels_all]
# dists_all = dists_all[inds_all]
# dists_all[:,0] = 1.0-dists_all.sum(1)
# # Limit to max per image detections
vs, idx = torch.sort(scores_all, dim=0, descending=True)
idx = idx[vs > thresh]
if max_per_img < idx.size(0):
idx = idx[:max_per_img]
inds_all = inds_all[idx] + start_ind
scores_all = Variable(scores_all[idx], volatile=True)
labels_all = Variable(labels_all[idx], volatile=True)
# dists_all = dists_all[idx]
return inds_all, scores_all, labels_all
class RPNHead(nn.Module):
"""
Serves as the class + box outputs for each level in the FPN.
"""
def __init__(self, dim=512, input_dim=1024):
"""
:param aspect_ratios: Aspect ratios for the anchors. NOTE - this can't be changed now
as it depends on other things in the C code...
"""
super(RPNHead, self).__init__()
self.anchor_target_dim = 6
self.stride = 16
self.conv = nn.Sequential(
nn.Conv2d(input_dim, dim, kernel_size=3, padding=1),
nn.ReLU6(inplace=True), # Tensorflow docs use Relu6, so let's use it too....
nn.Conv2d(dim, self.anchor_target_dim * self._A,
kernel_size=1)
)
ans_np = generate_anchors(base_size=ANCHOR_SIZE,
feat_stride=self.stride,
anchor_scales=ANCHOR_SCALES,
anchor_ratios=ANCHOR_RATIOS,
)
self.register_buffer('anchors', torch.FloatTensor(ans_np))
@property
def _A(self):
return len(ANCHOR_RATIOS) * len(ANCHOR_SCALES)
def forward(self, fmap):
"""
Gets the class / noclass predictions over all the scales
:param fmap: [batch_size, dim, IM_SIZE/16, IM_SIZE/16] featuremap
:return: [batch_size, IM_SIZE/16, IM_SIZE/16, A, 6]
"""
rez = self._reshape_channels(self.conv(fmap))
rez = rez.view(rez.size(0), rez.size(1), rez.size(2),
self._A, self.anchor_target_dim)
return rez
def anchor_preds(self, preds, train_anchor_inds, image_offset):
"""
Get predictions for the training indices
:param preds: [batch_size, IM_SIZE/16, IM_SIZE/16, A, 6]
:param train_anchor_inds: [num_train, 4] indices into the predictions
:return: class_preds: [num_train, 2] array of yes/no
box_preds: [num_train, 4] array of predicted boxes
"""
assert train_anchor_inds.size(1) == 4
tai = train_anchor_inds.data.clone()
tai[:, 0] -= image_offset
train_regions = gather_nd(preds, tai)
class_preds = train_regions[:, :2]
box_preds = train_regions[:, 2:]
return class_preds, box_preds
@staticmethod
def _reshape_channels(x):
""" [batch_size, channels, h, w] -> [batch_size, h, w, channels] """
assert x.dim() == 4
batch_size, nc, h, w = x.size()
x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()
x_t = x_t.view(batch_size, h, w, nc)
return x_t
def roi_proposals(self, fmap, im_sizes, nms_thresh=0.7, pre_nms_topn=12000, post_nms_topn=2000):
"""
:param fmap: [batch_size, IM_SIZE/16, IM_SIZE/16, A, 6]
:param im_sizes: [batch_size, 3] numpy array of (h, w, scale)
:return: ROIS: shape [a <=post_nms_topn, 5] array of ROIS.
"""
class_fmap = fmap[:, :, :, :, :2].contiguous()
# GET THE GOOD BOXES AYY LMAO :')
class_preds = F.softmax(class_fmap, 4)[..., 1].data.contiguous()
box_fmap = fmap[:, :, :, :, 2:].data.contiguous()
anchor_stacked = torch.cat([self.anchors[None]] * fmap.size(0), 0)
box_preds = bbox_preds(anchor_stacked.view(-1, 4), box_fmap.view(-1, 4)).view(
*box_fmap.size())
for i, (h, w, scale) in enumerate(im_sizes):
# Zero out all the bad boxes h, w, A, 4
h_end = int(h) // self.stride
w_end = int(w) // self.stride
if h_end < class_preds.size(1):
class_preds[i, h_end:] = -0.01
if w_end < class_preds.size(2):
class_preds[i, :, w_end:] = -0.01
# and clamp the others
box_preds[i, :, :, :, 0].clamp_(min=0, max=w - 1)
box_preds[i, :, :, :, 1].clamp_(min=0, max=h - 1)
box_preds[i, :, :, :, 2].clamp_(min=0, max=w - 1)
box_preds[i, :, :, :, 3].clamp_(min=0, max=h - 1)
sizes = center_size(box_preds.view(-1, 4))
class_preds.view(-1)[(sizes[:, 2] < 4) | (sizes[:, 3] < 4)] = -0.01
return filter_roi_proposals(box_preds.view(-1, 4), class_preds.view(-1),
boxes_per_im=np.array([np.prod(box_preds.size()[1:-1])] * fmap.size(0)),
nms_thresh=nms_thresh,
pre_nms_topn=pre_nms_topn, post_nms_topn=post_nms_topn)
def filter_roi_proposals(box_preds, class_preds, boxes_per_im, nms_thresh=0.7, pre_nms_topn=12000, post_nms_topn=2000):
inds, im_per = apply_nms(
class_preds,
box_preds,
pre_nms_topn=pre_nms_topn,
post_nms_topn=post_nms_topn,
boxes_per_im=boxes_per_im,
nms_thresh=nms_thresh,
)
img_inds = torch.cat([val * torch.ones(i) for val, i in enumerate(im_per)], 0).cuda(
box_preds.get_device())
rois = torch.cat((img_inds[:, None], box_preds[inds]), 1)
return rois
def load_resnet():
model = resnet101(pretrained=True)
del model.layer4
del model.avgpool
del model.fc
return model
def load_vgg(use_dropout=True, use_relu=True, use_linear=True, pretrained=True):
model = vgg16(pretrained=pretrained)
del model.features._modules['30'] # Get rid of the maxpool
del model.classifier._modules['6'] # Get rid of class layer
if not use_dropout:
del model.classifier._modules['5'] # Get rid of dropout
if not use_relu:
del model.classifier._modules['4'] # Get rid of relu activation
if not use_linear:
del model.classifier._modules['3'] # Get rid of linear layer
return model
| 25,429 | 39.11041 | 119 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/get_union_boxes.py | """
credits to https://github.com/ruotianluo/pytorch-faster-rcnn/blob/master/lib/nets/network.py#L91
"""
import torch
from torch.autograd import Variable
from torch.nn import functional as F
from lib.fpn.roi_align.functions.roi_align import RoIAlignFunction
from lib.draw_rectangles.draw_rectangles import draw_union_boxes
import numpy as np
from torch.nn.modules.module import Module
from torch import nn
from config import BATCHNORM_MOMENTUM
class UnionBoxesAndFeats(Module):
def __init__(self, pooling_size=7, stride=16, dim=256, concat=False, use_feats=True):
"""
:param pooling_size: Pool the union boxes to this dimension
:param stride: pixel spacing in the entire image
:param dim: Dimension of the feats
:param concat: Whether to concat (yes) or add (False) the representations
"""
super(UnionBoxesAndFeats, self).__init__()
self.pooling_size = pooling_size
self.stride = stride
self.dim = dim
self.use_feats = use_feats
self.conv = nn.Sequential(
nn.Conv2d(2, dim //2, kernel_size=7, stride=2, padding=3, bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(dim//2, momentum=BATCHNORM_MOMENTUM),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(dim // 2, dim, kernel_size=3, stride=1, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(dim, momentum=BATCHNORM_MOMENTUM),
)
self.concat = concat
def forward(self, fmap, rois, union_inds):
union_pools = union_boxes(fmap, rois, union_inds, pooling_size=self.pooling_size, stride=self.stride)
if not self.use_feats:
return union_pools.detach()
pair_rois = torch.cat((rois[:, 1:][union_inds[:, 0]], rois[:, 1:][union_inds[:, 1]]),1).data.cpu().numpy()
# rects_np = get_rect_features(pair_rois, self.pooling_size*2-1) - 0.5
rects_np = draw_union_boxes(pair_rois, self.pooling_size*4-1) - 0.5
rects = Variable(torch.FloatTensor(rects_np).cuda(fmap.get_device()), volatile=fmap.volatile)
if self.concat:
return torch.cat((union_pools, self.conv(rects)), 1)
return union_pools + self.conv(rects)
def union_boxes(fmap, rois, union_inds, pooling_size=14, stride=16):
"""
:param fmap: (batch_size, d, IM_SIZE/stride, IM_SIZE/stride)
:param rois: (num_rois, 5) with [im_ind, x1, y1, x2, y2]
:param union_inds: (num_urois, 2) with [roi_ind1, roi_ind2]
:param pooling_size: we'll resize to this
:param stride:
:return:
"""
assert union_inds.size(1) == 2
im_inds = rois[:,0][union_inds[:,0]]
assert (im_inds.data == rois.data[:,0][union_inds[:,1]]).sum() == union_inds.size(0)
union_rois = torch.cat((
im_inds[:,None],
torch.min(rois[:, 1:3][union_inds[:, 0]], rois[:, 1:3][union_inds[:, 1]]),
torch.max(rois[:, 3:5][union_inds[:, 0]], rois[:, 3:5][union_inds[:, 1]]),
),1)
# (num_rois, d, pooling_size, pooling_size)
union_pools = RoIAlignFunction(pooling_size, pooling_size,
spatial_scale=1/stride)(fmap, union_rois)
return union_pools
| 3,235 | 39.45 | 114 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/sparse_targets.py | from lib.word_vectors import obj_edge_vectors
import torch.nn as nn
import torch
from torch.autograd import Variable
import numpy as np
from config import DATA_PATH
import os
from lib.get_dataset_counts import get_counts
class FrequencyBias(nn.Module):
"""
The goal of this is to provide a simplified way of computing
P(predicate | obj1, obj2, img).
"""
def __init__(self, eps=1e-3):
super(FrequencyBias, self).__init__()
fg_matrix, bg_matrix = get_counts(must_overlap=True)
bg_matrix += 1
fg_matrix[:, :, 0] = bg_matrix
pred_dist = np.log(fg_matrix / fg_matrix.sum(2)[:, :, None] + eps)
self.num_objs = pred_dist.shape[0]
pred_dist = torch.FloatTensor(pred_dist).view(-1, pred_dist.shape[2])
self.obj_baseline = nn.Embedding(pred_dist.size(0), pred_dist.size(1))
self.obj_baseline.weight.data = pred_dist
def index_with_labels(self, labels):
"""
:param labels: [batch_size, 2]
:return:
"""
return self.obj_baseline(labels[:, 0] * self.num_objs + labels[:, 1])
def forward(self, obj_cands0, obj_cands1):
"""
:param obj_cands0: [batch_size, 151] prob distibution over cands.
:param obj_cands1: [batch_size, 151] prob distibution over cands.
:return: [batch_size, #predicates] array, which contains potentials for
each possibility
"""
# [batch_size, 151, 151] repr of the joint distribution
joint_cands = obj_cands0[:, :, None] * obj_cands1[:, None]
# [151, 151, 51] of targets per.
baseline = joint_cands.view(joint_cands.size(0), -1) @ self.obj_baseline.weight
return baseline
| 1,718 | 31.433962 | 87 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/surgery.py | # create predictions from the other stuff
"""
Go from proposals + scores to relationships.
pred-cls: No bbox regression, obj dist is exactly known
sg-cls : No bbox regression
sg-det : Bbox regression
in all cases we'll return:
boxes, objs, rels, pred_scores
"""
import numpy as np
import torch
from lib.pytorch_misc import unravel_index
from lib.fpn.box_utils import bbox_overlaps
# from ad3 import factor_graph as fg
from time import time
def filter_dets(boxes, obj_scores, obj_classes, rel_inds, pred_scores):
"""
Filters detections....
:param boxes: [num_box, topk, 4] if bbox regression else [num_box, 4]
:param obj_scores: [num_box] probabilities for the scores
:param obj_classes: [num_box] class labels for the topk
:param rel_inds: [num_rel, 2] TENSOR consisting of (im_ind0, im_ind1)
:param pred_scores: [topk, topk, num_rel, num_predicates]
:param use_nms: True if use NMS to filter dets.
:return: boxes, objs, rels, pred_scores
"""
if boxes.dim() != 2:
raise ValueError("Boxes needs to be [num_box, 4] but its {}".format(boxes.size()))
num_box = boxes.size(0)
assert obj_scores.size(0) == num_box
assert obj_classes.size() == obj_scores.size()
num_rel = rel_inds.size(0)
assert rel_inds.size(1) == 2
assert pred_scores.size(0) == num_rel
obj_scores0 = obj_scores.data[rel_inds[:,0]]
obj_scores1 = obj_scores.data[rel_inds[:,1]]
pred_scores_max, pred_classes_argmax = pred_scores.data[:,1:].max(1)
pred_classes_argmax = pred_classes_argmax + 1
rel_scores_argmaxed = pred_scores_max * obj_scores0 * obj_scores1
rel_scores_vs, rel_scores_idx = torch.sort(rel_scores_argmaxed.view(-1), dim=0, descending=True)
rels = rel_inds[rel_scores_idx].cpu().numpy()
pred_scores_sorted = pred_scores[rel_scores_idx].data.cpu().numpy()
obj_scores_np = obj_scores.data.cpu().numpy()
objs_np = obj_classes.data.cpu().numpy()
boxes_out = boxes.data.cpu().numpy()
return boxes_out, objs_np, obj_scores_np, rels, pred_scores_sorted
| 2,059 | 33.333333 | 100 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/evaluation/sg_eval.py | """
Adapted from Danfei Xu. In particular, slow code was removed
"""
import numpy as np
from functools import reduce
from lib.pytorch_misc import intersect_2d, argsort_desc
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from config import MODES
np.set_printoptions(precision=3)
class BasicSceneGraphEvaluator:
def __init__(self, mode, multiple_preds=False):
self.result_dict = {}
self.mode = mode
self.result_dict[self.mode + '_recall'] = {20: [], 50: [], 100: []}
self.multiple_preds = multiple_preds
@classmethod
def all_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, **kwargs) for m in MODES}
return evaluators
@classmethod
def vrd_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, multiple_preds=True, **kwargs) for m in ('preddet', 'phrdet')}
return evaluators
def evaluate_scene_graph_entry(self, gt_entry, pred_scores, viz_dict=None, iou_thresh=0.5):
res = evaluate_from_dict(gt_entry, pred_scores, self.mode, self.result_dict,
viz_dict=viz_dict, iou_thresh=iou_thresh, multiple_preds=self.multiple_preds)
# self.print_stats()
return res
def save(self, fn):
np.save(fn, self.result_dict)
def print_stats(self):
print('======================' + self.mode + '============================')
for k, v in self.result_dict[self.mode + '_recall'].items():
print('R@%i: %f' % (k, np.mean(v)))
def evaluate_from_dict(gt_entry, pred_entry, mode, result_dict, multiple_preds=False,
viz_dict=None, **kwargs):
"""
Shortcut to doing evaluate_recall from dict
:param gt_entry: Dictionary containing gt_relations, gt_boxes, gt_classes
:param pred_entry: Dictionary containing pred_rels, pred_boxes (if detection), pred_classes
:param mode: 'det' or 'cls'
:param result_dict:
:param viz_dict:
:param kwargs:
:return:
"""
gt_rels = gt_entry['gt_relations']
gt_boxes = gt_entry['gt_boxes'].astype(float)
gt_classes = gt_entry['gt_classes']
pred_rel_inds = pred_entry['pred_rel_inds']
rel_scores = pred_entry['rel_scores']
if mode == 'predcls':
pred_boxes = gt_boxes
pred_classes = gt_classes
obj_scores = np.ones(gt_classes.shape[0])
elif mode == 'sgcls':
pred_boxes = gt_boxes
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'sgdet' or mode == 'phrdet':
pred_boxes = pred_entry['pred_boxes'].astype(float)
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'preddet':
# Only extract the indices that appear in GT
prc = intersect_2d(pred_rel_inds, gt_rels[:, :2])
if prc.size == 0:
for k in result_dict[mode + '_recall']:
result_dict[mode + '_recall'][k].append(0.0)
return None, None, None
pred_inds_per_gt = prc.argmax(0)
pred_rel_inds = pred_rel_inds[pred_inds_per_gt]
rel_scores = rel_scores[pred_inds_per_gt]
# Now sort the matching ones
rel_scores_sorted = argsort_desc(rel_scores[:,1:])
rel_scores_sorted[:,1] += 1
rel_scores_sorted = np.column_stack((pred_rel_inds[rel_scores_sorted[:,0]], rel_scores_sorted[:,1]))
matches = intersect_2d(rel_scores_sorted, gt_rels)
for k in result_dict[mode + '_recall']:
rec_i = float(matches[:k].any(0).sum()) / float(gt_rels.shape[0])
result_dict[mode + '_recall'][k].append(rec_i)
return None, None, None
else:
raise ValueError('invalid mode')
if multiple_preds:
obj_scores_per_rel = obj_scores[pred_rel_inds].prod(1)
overall_scores = obj_scores_per_rel[:,None] * rel_scores[:,1:]
score_inds = argsort_desc(overall_scores)[:100]
pred_rels = np.column_stack((pred_rel_inds[score_inds[:,0]], score_inds[:,1]+1))
predicate_scores = rel_scores[score_inds[:,0], score_inds[:,1]+1]
else:
pred_rels = np.column_stack((pred_rel_inds, 1+rel_scores[:,1:].argmax(1)))
predicate_scores = rel_scores[:,1:].max(1)
pred_to_gt, pred_5ples, rel_scores = evaluate_recall(
gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes,
predicate_scores, obj_scores, phrdet= mode=='phrdet',
**kwargs)
for k in result_dict[mode + '_recall']:
match = reduce(np.union1d, pred_to_gt[:k])
rec_i = float(len(match)) / float(gt_rels.shape[0])
result_dict[mode + '_recall'][k].append(rec_i)
return pred_to_gt, pred_5ples, rel_scores
# print(" ".join(["R@{:2d}: {:.3f}".format(k, v[-1]) for k, v in result_dict[mode + '_recall'].items()]))
# Deal with visualization later
# # Optionally, log things to a separate dictionary
# if viz_dict is not None:
# # Caution: pred scores has changed (we took off the 0 class)
# gt_rels_scores = pred_scores[
# gt_rels[:, 0],
# gt_rels[:, 1],
# gt_rels[:, 2] - 1,
# ]
# # gt_rels_scores_cls = gt_rels_scores * pred_class_scores[
# # gt_rels[:, 0]] * pred_class_scores[gt_rels[:, 1]]
#
# viz_dict[mode + '_pred_rels'] = pred_5ples.tolist()
# viz_dict[mode + '_pred_rels_scores'] = max_pred_scores.tolist()
# viz_dict[mode + '_pred_rels_scores_cls'] = max_rel_scores.tolist()
# viz_dict[mode + '_gt_rels_scores'] = gt_rels_scores.tolist()
# viz_dict[mode + '_gt_rels_scores_cls'] = gt_rels_scores_cls.tolist()
#
# # Serialize pred2gt matching as a list of lists, where each sublist is of the form
# # pred_ind, gt_ind1, gt_ind2, ....
# viz_dict[mode + '_pred2gt_rel'] = pred_to_gt
###########################
def evaluate_recall(gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes, rel_scores=None, cls_scores=None,
iou_thresh=0.5, phrdet=False):
"""
Evaluates the recall
:param gt_rels: [#gt_rel, 3] array of GT relations
:param gt_boxes: [#gt_box, 4] array of GT boxes
:param gt_classes: [#gt_box] array of GT classes
:param pred_rels: [#pred_rel, 3] array of pred rels. Assumed these are in sorted order
and refer to IDs in pred classes / pred boxes
(id0, id1, rel)
:param pred_boxes: [#pred_box, 4] array of pred boxes
:param pred_classes: [#pred_box] array of predicted classes for these boxes
:return: pred_to_gt: Matching from predicate to GT
pred_5ples: the predicted (id0, id1, cls0, cls1, rel)
rel_scores: [cls_0score, cls1_score, relscore]
"""
if pred_rels.size == 0:
return [[]], np.zeros((0,5)), np.zeros(0)
num_gt_boxes = gt_boxes.shape[0]
num_gt_relations = gt_rels.shape[0]
assert num_gt_relations != 0
gt_triplets, gt_triplet_boxes, _ = _triplet(gt_rels[:, 2],
gt_rels[:, :2],
gt_classes,
gt_boxes)
num_boxes = pred_boxes.shape[0]
assert pred_rels[:,:2].max() < pred_classes.shape[0]
# Exclude self rels
# assert np.all(pred_rels[:,0] != pred_rels[:,1])
assert np.all(pred_rels[:,2] > 0)
pred_triplets, pred_triplet_boxes, relation_scores = \
_triplet(pred_rels[:,2], pred_rels[:,:2], pred_classes, pred_boxes,
rel_scores, cls_scores)
scores_overall = relation_scores.prod(1)
if not np.all(scores_overall[1:] <= scores_overall[:-1] + 1e-5):
print("Somehow the relations weren't sorted properly: \n{}".format(scores_overall))
# raise ValueError("Somehow the relations werent sorted properly")
# Compute recall. It's most efficient to match once and then do recall after
pred_to_gt = _compute_pred_matches(
gt_triplets,
pred_triplets,
gt_triplet_boxes,
pred_triplet_boxes,
iou_thresh,
phrdet=phrdet,
)
# Contains some extra stuff for visualization. Not needed.
pred_5ples = np.column_stack((
pred_rels[:,:2],
pred_triplets[:, [0, 2, 1]],
))
return pred_to_gt, pred_5ples, relation_scores
def _triplet(predicates, relations, classes, boxes,
predicate_scores=None, class_scores=None):
"""
format predictions into triplets
:param predicates: A 1d numpy array of num_boxes*(num_boxes-1) predicates, corresponding to
each pair of possibilities
:param relations: A (num_boxes*(num_boxes-1), 2) array, where each row represents the boxes
in that relation
:param classes: A (num_boxes) array of the classes for each thing.
:param boxes: A (num_boxes,4) array of the bounding boxes for everything.
:param predicate_scores: A (num_boxes*(num_boxes-1)) array of the scores for each predicate
:param class_scores: A (num_boxes) array of the likelihood for each object.
:return: Triplets: (num_relations, 3) array of class, relation, class
Triplet boxes: (num_relation, 8) array of boxes for the parts
Triplet scores: num_relation array of the scores overall for the triplets
"""
assert (predicates.shape[0] == relations.shape[0])
sub_ob_classes = classes[relations[:, :2]]
triplets = np.column_stack((sub_ob_classes[:, 0], predicates, sub_ob_classes[:, 1]))
triplet_boxes = np.column_stack((boxes[relations[:, 0]], boxes[relations[:, 1]]))
triplet_scores = None
if predicate_scores is not None and class_scores is not None:
triplet_scores = np.column_stack((
class_scores[relations[:, 0]],
class_scores[relations[:, 1]],
predicate_scores,
))
return triplets, triplet_boxes, triplet_scores
def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh, phrdet=False):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
:param gt_triplets:
:param pred_triplets:
:param gt_boxes:
:param pred_boxes:
:param iou_thresh:
:return:
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
# Evaluate where the union box > 0.5
gt_box_union = gt_box.reshape((2, 4))
gt_box_union = np.concatenate((gt_box_union.min(0)[:2], gt_box_union.max(0)[2:]), 0)
box_union = boxes.reshape((-1, 2, 4))
box_union = np.concatenate((box_union.min(1)[:,:2], box_union.max(1)[:,2:]), 1)
inds = bbox_overlaps(gt_box_union[None], box_union)[0] >= iou_thresh
else:
sub_iou = bbox_overlaps(gt_box[None,:4], boxes[:, :4])[0]
obj_iou = bbox_overlaps(gt_box[None,4:], boxes[:, 4:])[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt[i].append(int(gt_ind))
return pred_to_gt
| 11,883 | 40.698246 | 111 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/evaluation/sg_eval_all_rel_cates.py | """
Adapted from Danfei Xu. In particular, slow code was removed
"""
import numpy as np
from functools import reduce
from lib.pytorch_misc import intersect_2d, argsort_desc
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps
from config import MODES
import sys
np.set_printoptions(precision=3)
class BasicSceneGraphEvaluator:
def __init__(self, mode, multiple_preds=False):
self.result_dict = {}
self.mode = mode
rel_cats = {
0: 'all_rel_cates',
1: "above",
2: "across",
3: "against",
4: "along",
5: "and",
6: "at",
7: "attached to",
8: "behind",
9: "belonging to",
10: "between",
11: "carrying",
12: "covered in",
13: "covering",
14: "eating",
15: "flying in",
16: "for",
17: "from",
18: "growing on",
19: "hanging from",
20: "has",
21: "holding",
22: "in",
23: "in front of",
24: "laying on",
25: "looking at",
26: "lying on",
27: "made of",
28: "mounted on",
29: "near",
30: "of",
31: "on",
32: "on back of",
33: "over",
34: "painted on",
35: "parked on",
36: "part of",
37: "playing",
38: "riding",
39: "says",
40: "sitting on",
41: "standing on",
42: "to",
43: "under",
44: "using",
45: "walking in",
46: "walking on",
47: "watching",
48: "wearing",
49: "wears",
50: "with"
}
self.rel_cats = rel_cats
self.result_dict[self.mode + '_recall'] = {20: {}, 50: {}, 100: []}
for key, value in self.result_dict[self.mode + '_recall'].items():
self.result_dict[self.mode + '_recall'][key] = {}
for rel_cat_id, rel_cat_name in rel_cats.items():
self.result_dict[self.mode + '_recall'][key][rel_cat_name] = []
self.multiple_preds = multiple_preds
@classmethod
def all_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, **kwargs) for m in MODES}
return evaluators
@classmethod
def vrd_modes(cls, **kwargs):
evaluators = {m: cls(mode=m, multiple_preds=True, **kwargs) for m in ('preddet', 'phrdet')}
return evaluators
def evaluate_scene_graph_entry(self, gt_entry, pred_scores, viz_dict=None, iou_thresh=0.5):
res = evaluate_from_dict(gt_entry, pred_scores, self.mode, self.result_dict,
viz_dict=viz_dict, iou_thresh=iou_thresh, multiple_preds=self.multiple_preds, rel_cats=self.rel_cats)
# self.print_stats()
return res
def save(self, fn):
np.save(fn, self.result_dict)
def print_stats(self):
print('======================' + self.mode + '============================')
for k, v in self.result_dict[self.mode + '_recall'].items():
for rel_cat_id, rel_cat_name in self.rel_cats.items():
print('R@%i: %f' % (k, np.mean(v[rel_cat_name])), rel_cat_name)
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
def evaluate_from_dict(gt_entry, pred_entry, mode, result_dict, multiple_preds=False,
viz_dict=None, rel_cats=None, **kwargs):
"""
Shortcut to doing evaluate_recall from dict
:param gt_entry: Dictionary containing gt_relations, gt_boxes, gt_classes
:param pred_entry: Dictionary containing pred_rels, pred_boxes (if detection), pred_classes
:param mode: 'det' or 'cls'
:param result_dict:
:param viz_dict:
:param kwargs:
:return:
"""
gt_rels = gt_entry['gt_relations']
gt_boxes = gt_entry['gt_boxes'].astype(float)
gt_classes = gt_entry['gt_classes']
gt_rels_nums = [0 for x in range(len(rel_cats))]
for rel in gt_rels:
gt_rels_nums[rel[2]] += 1
gt_rels_nums[0] += 1
pred_rel_inds = pred_entry['pred_rel_inds']
rel_scores = pred_entry['rel_scores']
if mode == 'predcls':
pred_boxes = gt_boxes
pred_classes = gt_classes
obj_scores = np.ones(gt_classes.shape[0])
elif mode == 'sgcls':
pred_boxes = gt_boxes
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'sgdet' or mode == 'phrdet':
pred_boxes = pred_entry['pred_boxes'].astype(float)
pred_classes = pred_entry['pred_classes']
obj_scores = pred_entry['obj_scores']
elif mode == 'preddet':
# Only extract the indices that appear in GT
prc = intersect_2d(pred_rel_inds, gt_rels[:, :2])
if prc.size == 0:
for k in result_dict[mode + '_recall']:
result_dict[mode + '_recall'][k].append(0.0)
return None, None, None
pred_inds_per_gt = prc.argmax(0)
pred_rel_inds = pred_rel_inds[pred_inds_per_gt]
rel_scores = rel_scores[pred_inds_per_gt]
# Now sort the matching ones
rel_scores_sorted = argsort_desc(rel_scores[:,1:])
rel_scores_sorted[:,1] += 1
rel_scores_sorted = np.column_stack((pred_rel_inds[rel_scores_sorted[:,0]], rel_scores_sorted[:,1]))
matches = intersect_2d(rel_scores_sorted, gt_rels)
for k in result_dict[mode + '_recall']:
rec_i = float(matches[:k].any(0).sum()) / float(gt_rels.shape[0])
result_dict[mode + '_recall'][k].append(rec_i)
return None, None, None
else:
raise ValueError('invalid mode')
if multiple_preds:
obj_scores_per_rel = obj_scores[pred_rel_inds].prod(1)
overall_scores = obj_scores_per_rel[:,None] * rel_scores[:,1:]
score_inds = argsort_desc(overall_scores)[:100]
pred_rels = np.column_stack((pred_rel_inds[score_inds[:,0]], score_inds[:,1]+1))
predicate_scores = rel_scores[score_inds[:,0], score_inds[:,1]+1]
else:
pred_rels = np.column_stack((pred_rel_inds, 1+rel_scores[:,1:].argmax(1)))
predicate_scores = rel_scores[:,1:].max(1)
pred_to_gt, pred_5ples, rel_scores = evaluate_recall(
gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes,
predicate_scores, obj_scores, phrdet= mode=='phrdet',rel_cats=rel_cats,
**kwargs)
for k in result_dict[mode + '_recall']:
for rel_cat_id, rel_cat_name in rel_cats.items():
match = reduce(np.union1d, pred_to_gt[rel_cat_name][:k])
rec_i = float(len(match)) / (float(gt_rels_nums[rel_cat_id]) + sys.float_info.min) #float(gt_rels.shape[0])
result_dict[mode + '_recall'][k][rel_cat_name].append(rec_i)
return pred_to_gt, pred_5ples, rel_scores
# print(" ".join(["R@{:2d}: {:.3f}".format(k, v[-1]) for k, v in result_dict[mode + '_recall'].items()]))
# Deal with visualization later
# # Optionally, log things to a separate dictionary
# if viz_dict is not None:
# # Caution: pred scores has changed (we took off the 0 class)
# gt_rels_scores = pred_scores[
# gt_rels[:, 0],
# gt_rels[:, 1],
# gt_rels[:, 2] - 1,
# ]
# # gt_rels_scores_cls = gt_rels_scores * pred_class_scores[
# # gt_rels[:, 0]] * pred_class_scores[gt_rels[:, 1]]
#
# viz_dict[mode + '_pred_rels'] = pred_5ples.tolist()
# viz_dict[mode + '_pred_rels_scores'] = max_pred_scores.tolist()
# viz_dict[mode + '_pred_rels_scores_cls'] = max_rel_scores.tolist()
# viz_dict[mode + '_gt_rels_scores'] = gt_rels_scores.tolist()
# viz_dict[mode + '_gt_rels_scores_cls'] = gt_rels_scores_cls.tolist()
#
# # Serialize pred2gt matching as a list of lists, where each sublist is of the form
# # pred_ind, gt_ind1, gt_ind2, ....
# viz_dict[mode + '_pred2gt_rel'] = pred_to_gt
###########################
def evaluate_recall(gt_rels, gt_boxes, gt_classes,
pred_rels, pred_boxes, pred_classes, rel_scores=None, cls_scores=None,
iou_thresh=0.5, phrdet=False, rel_cats=None):
"""
Evaluates the recall
:param gt_rels: [#gt_rel, 3] array of GT relations
:param gt_boxes: [#gt_box, 4] array of GT boxes
:param gt_classes: [#gt_box] array of GT classes
:param pred_rels: [#pred_rel, 3] array of pred rels. Assumed these are in sorted order
and refer to IDs in pred classes / pred boxes
(id0, id1, rel)
:param pred_boxes: [#pred_box, 4] array of pred boxes
:param pred_classes: [#pred_box] array of predicted classes for these boxes
:return: pred_to_gt: Matching from predicate to GT
pred_5ples: the predicted (id0, id1, cls0, cls1, rel)
rel_scores: [cls_0score, cls1_score, relscore]
"""
if pred_rels.size == 0:
return [[]], np.zeros((0,5)), np.zeros(0)
num_gt_boxes = gt_boxes.shape[0]
num_gt_relations = gt_rels.shape[0]
assert num_gt_relations != 0
gt_triplets, gt_triplet_boxes, _ = _triplet(gt_rels[:, 2],
gt_rels[:, :2],
gt_classes,
gt_boxes)
num_boxes = pred_boxes.shape[0]
assert pred_rels[:,:2].max() < pred_classes.shape[0]
# Exclude self rels
# assert np.all(pred_rels[:,0] != pred_rels[:,1])
assert np.all(pred_rels[:,2] > 0)
pred_triplets, pred_triplet_boxes, relation_scores = \
_triplet(pred_rels[:,2], pred_rels[:,:2], pred_classes, pred_boxes,
rel_scores, cls_scores)
scores_overall = relation_scores.prod(1)
if not np.all(scores_overall[1:] <= scores_overall[:-1] + 1e-5):
print("Somehow the relations weren't sorted properly: \n{}".format(scores_overall))
# raise ValueError("Somehow the relations werent sorted properly")
# Compute recall. It's most efficient to match once and then do recall after
pred_to_gt = _compute_pred_matches(
gt_triplets,
pred_triplets,
gt_triplet_boxes,
pred_triplet_boxes,
iou_thresh,
phrdet=phrdet,
rel_cats=rel_cats,
)
# Contains some extra stuff for visualization. Not needed.
pred_5ples = np.column_stack((
pred_rels[:,:2],
pred_triplets[:, [0, 2, 1]],
))
return pred_to_gt, pred_5ples, relation_scores
def _triplet(predicates, relations, classes, boxes,
predicate_scores=None, class_scores=None):
"""
format predictions into triplets
:param predicates: A 1d numpy array of num_boxes*(num_boxes-1) predicates, corresponding to
each pair of possibilities
:param relations: A (num_boxes*(num_boxes-1), 2) array, where each row represents the boxes
in that relation
:param classes: A (num_boxes) array of the classes for each thing.
:param boxes: A (num_boxes,4) array of the bounding boxes for everything.
:param predicate_scores: A (num_boxes*(num_boxes-1)) array of the scores for each predicate
:param class_scores: A (num_boxes) array of the likelihood for each object.
:return: Triplets: (num_relations, 3) array of class, relation, class
Triplet boxes: (num_relation, 8) array of boxes for the parts
Triplet scores: num_relation array of the scores overall for the triplets
"""
assert (predicates.shape[0] == relations.shape[0])
sub_ob_classes = classes[relations[:, :2]]
triplets = np.column_stack((sub_ob_classes[:, 0], predicates, sub_ob_classes[:, 1]))
triplet_boxes = np.column_stack((boxes[relations[:, 0]], boxes[relations[:, 1]]))
triplet_scores = None
if predicate_scores is not None and class_scores is not None:
triplet_scores = np.column_stack((
class_scores[relations[:, 0]],
class_scores[relations[:, 1]],
predicate_scores,
))
return triplets, triplet_boxes, triplet_scores
def _compute_pred_matches(gt_triplets, pred_triplets,
gt_boxes, pred_boxes, iou_thresh, phrdet=False, rel_cats=None):
"""
Given a set of predicted triplets, return the list of matching GT's for each of the
given predictions
:param gt_triplets:
:param pred_triplets:
:param gt_boxes:
:param pred_boxes:
:param iou_thresh:
:return:
"""
# This performs a matrix multiplication-esque thing between the two arrays
# Instead of summing, we want the equality, so we reduce in that way
# The rows correspond to GT triplets, columns to pred triplets
keeps = intersect_2d(gt_triplets, pred_triplets)
gt_has_match = keeps.any(1)
pred_to_gt = {}
for rel_cat_id, rel_cat_name in rel_cats.items():
pred_to_gt[rel_cat_name] = [[] for x in range(pred_boxes.shape[0])]
for gt_ind, gt_box, keep_inds in zip(np.where(gt_has_match)[0],
gt_boxes[gt_has_match],
keeps[gt_has_match],
):
boxes = pred_boxes[keep_inds]
if phrdet:
# Evaluate where the union box > 0.5
gt_box_union = gt_box.reshape((2, 4))
gt_box_union = np.concatenate((gt_box_union.min(0)[:2], gt_box_union.max(0)[2:]), 0)
box_union = boxes.reshape((-1, 2, 4))
box_union = np.concatenate((box_union.min(1)[:,:2], box_union.max(1)[:,2:]), 1)
inds = bbox_overlaps(gt_box_union[None], box_union)[0] >= iou_thresh
else:
sub_iou = bbox_overlaps(gt_box[None,:4], boxes[:, :4])[0]
obj_iou = bbox_overlaps(gt_box[None,4:], boxes[:, 4:])[0]
inds = (sub_iou >= iou_thresh) & (obj_iou >= iou_thresh)
for i in np.where(keep_inds)[0][inds]:
pred_to_gt['all_rel_cates'][i].append(int(gt_ind))
pred_to_gt[rel_cats[gt_triplets[int(gt_ind), 1]]][i].append(int(gt_ind))
return pred_to_gt
| 14,355 | 39.439437 | 135 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/decoder_rnn.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import PackedSequence
from typing import Optional, Tuple
from lib.fpn.box_utils import nms_overlaps
from lib.word_vectors import obj_edge_vectors
from .highway_lstm_cuda.alternating_highway_lstm import block_orthogonal
import numpy as np
def get_dropout_mask(dropout_probability: float, tensor_for_masking: torch.autograd.Variable):
"""
Computes and returns an element-wise dropout mask for a given tensor, where
each element in the mask is dropped out with probability dropout_probability.
Note that the mask is NOT applied to the tensor - the tensor is passed to retain
the correct CUDA tensor type for the mask.
Parameters
----------
dropout_probability : float, required.
Probability of dropping a dimension of the input.
tensor_for_masking : torch.Variable, required.
Returns
-------
A torch.FloatTensor consisting of the binary mask scaled by 1/ (1 - dropout_probability).
This scaling ensures expected values and variances of the output of applying this mask
and the original tensor are the same.
"""
binary_mask = tensor_for_masking.clone()
binary_mask.data.copy_(torch.rand(tensor_for_masking.size()) > dropout_probability)
# Scale mask by 1/keep_prob to preserve output statistics.
dropout_mask = binary_mask.float().div(1.0 - dropout_probability)
return dropout_mask
class DecoderRNN(torch.nn.Module):
def __init__(self, classes, embed_dim, inputs_dim, hidden_dim, recurrent_dropout_probability=0.2,
use_highway=True, use_input_projection_bias=True):
"""
Initializes the RNN
:param embed_dim: Dimension of the embeddings
:param encoder_hidden_dim: Hidden dim of the encoder, for attention purposes
:param hidden_dim: Hidden dim of the decoder
:param vocab_size: Number of words in the vocab
:param bos_token: To use during decoding (non teacher forcing mode))
:param bos: beginning of sentence token
:param unk: unknown token (not used)
"""
super(DecoderRNN, self).__init__()
self.classes = classes
embed_vecs = obj_edge_vectors(['start'] + self.classes, wv_dim=100)
self.obj_embed = nn.Embedding(len(self.classes), embed_dim)
self.obj_embed.weight.data = embed_vecs
self.hidden_size = hidden_dim
self.inputs_dim = inputs_dim
self.nms_thresh = 0.3
self.recurrent_dropout_probability=recurrent_dropout_probability
self.use_highway=use_highway
# We do the projections for all the gates all at once, so if we are
# using highway layers, we need some extra projections, which is
# why the sizes of the Linear layers change here depending on this flag.
if use_highway:
self.input_linearity = torch.nn.Linear(self.input_size, 6 * self.hidden_size,
bias=use_input_projection_bias)
self.state_linearity = torch.nn.Linear(self.hidden_size, 5 * self.hidden_size,
bias=True)
else:
self.input_linearity = torch.nn.Linear(self.input_size, 4 * self.hidden_size,
bias=use_input_projection_bias)
self.state_linearity = torch.nn.Linear(self.hidden_size, 4 * self.hidden_size,
bias=True)
self.out = nn.Linear(self.hidden_size, len(self.classes))
self.reset_parameters()
@property
def input_size(self):
return self.inputs_dim + self.obj_embed.weight.size(1)
def reset_parameters(self):
# Use sensible default initializations for parameters.
block_orthogonal(self.input_linearity.weight.data, [self.hidden_size, self.input_size])
block_orthogonal(self.state_linearity.weight.data, [self.hidden_size, self.hidden_size])
self.state_linearity.bias.data.fill_(0.0)
# Initialize forget gate biases to 1.0 as per An Empirical
# Exploration of Recurrent Network Architectures, (Jozefowicz, 2015).
self.state_linearity.bias.data[self.hidden_size:2 * self.hidden_size].fill_(1.0)
def lstm_equations(self, timestep_input, previous_state, previous_memory, dropout_mask=None):
"""
Does the hairy LSTM math
:param timestep_input:
:param previous_state:
:param previous_memory:
:param dropout_mask:
:return:
"""
# Do the projections for all the gates all at once.
projected_input = self.input_linearity(timestep_input)
projected_state = self.state_linearity(previous_state)
# Main LSTM equations using relevant chunks of the big linear
# projections of the hidden state and inputs.
input_gate = torch.sigmoid(projected_input[:, 0 * self.hidden_size:1 * self.hidden_size] +
projected_state[:, 0 * self.hidden_size:1 * self.hidden_size])
forget_gate = torch.sigmoid(projected_input[:, 1 * self.hidden_size:2 * self.hidden_size] +
projected_state[:, 1 * self.hidden_size:2 * self.hidden_size])
memory_init = torch.tanh(projected_input[:, 2 * self.hidden_size:3 * self.hidden_size] +
projected_state[:, 2 * self.hidden_size:3 * self.hidden_size])
output_gate = torch.sigmoid(projected_input[:, 3 * self.hidden_size:4 * self.hidden_size] +
projected_state[:, 3 * self.hidden_size:4 * self.hidden_size])
memory = input_gate * memory_init + forget_gate * previous_memory
timestep_output = output_gate * torch.tanh(memory)
if self.use_highway:
highway_gate = torch.sigmoid(projected_input[:, 4 * self.hidden_size:5 * self.hidden_size] +
projected_state[:, 4 * self.hidden_size:5 * self.hidden_size])
highway_input_projection = projected_input[:, 5 * self.hidden_size:6 * self.hidden_size]
timestep_output = highway_gate * timestep_output + (1 - highway_gate) * highway_input_projection
# Only do dropout if the dropout prob is > 0.0 and we are in training mode.
if dropout_mask is not None and self.training:
timestep_output = timestep_output * dropout_mask
return timestep_output, memory
def forward(self, # pylint: disable=arguments-differ
inputs: PackedSequence,
initial_state: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
labels=None, boxes_for_nms=None):
"""
Parameters
----------
inputs : PackedSequence, required.
A tensor of shape (batch_size, num_timesteps, input_size)
to apply the LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
A tuple (state, memory) representing the initial hidden state and memory
of the LSTM. Each tensor has shape (1, batch_size, output_dimension).
Returns
-------
A PackedSequence containing a torch.FloatTensor of shape
(batch_size, num_timesteps, output_dimension) representing
the outputs of the LSTM per timestep and a tuple containing
the LSTM state, with shape (1, batch_size, hidden_size) to
match the Pytorch API.
"""
if not isinstance(inputs, PackedSequence):
raise ValueError('inputs must be PackedSequence but got %s' % (type(inputs)))
assert isinstance(inputs, PackedSequence)
sequence_tensor, batch_lengths = inputs
batch_size = batch_lengths[0]
# We're just doing an LSTM decoder here so ignore states, etc
if initial_state is None:
previous_memory = Variable(sequence_tensor.data.new()
.resize_(batch_size, self.hidden_size).fill_(0))
previous_state = Variable(sequence_tensor.data.new()
.resize_(batch_size, self.hidden_size).fill_(0))
else:
assert len(initial_state) == 2
previous_state = initial_state[0].squeeze(0)
previous_memory = initial_state[1].squeeze(0)
previous_embed = self.obj_embed.weight[0, None].expand(batch_size, 100)
if self.recurrent_dropout_probability > 0.0:
dropout_mask = get_dropout_mask(self.recurrent_dropout_probability, previous_memory)
else:
dropout_mask = None
# Only accumulating label predictions here, discarding everything else
out_dists = []
out_commitments = []
end_ind = 0
for i, l_batch in enumerate(batch_lengths):
start_ind = end_ind
end_ind = end_ind + l_batch
if previous_memory.size(0) != l_batch:
previous_memory = previous_memory[:l_batch]
previous_state = previous_state[:l_batch]
previous_embed = previous_embed[:l_batch]
if dropout_mask is not None:
dropout_mask = dropout_mask[:l_batch]
timestep_input = torch.cat((sequence_tensor[start_ind:end_ind], previous_embed), 1)
previous_state, previous_memory = self.lstm_equations(timestep_input, previous_state,
previous_memory, dropout_mask=dropout_mask)
pred_dist = self.out(previous_state)
out_dists.append(pred_dist)
if self.training:
labels_to_embed = labels[start_ind:end_ind].clone()
# Whenever labels are 0 set input to be our max prediction
nonzero_pred = pred_dist[:, 1:].max(1)[1] + 1
is_bg = (labels_to_embed.data == 0).nonzero()
if is_bg.dim() > 0:
labels_to_embed[is_bg.squeeze(1)] = nonzero_pred[is_bg.squeeze(1)]
out_commitments.append(labels_to_embed)
previous_embed = self.obj_embed(labels_to_embed+1)
else:
assert l_batch == 1
out_dist_sample = F.softmax(pred_dist, dim=1)
# if boxes_for_nms is not None:
# out_dist_sample[domains_allowed[i] == 0] = 0.0
# Greedily take the max here amongst non-bgs
best_ind = out_dist_sample[:, 1:].max(1)[1] + 1
# if boxes_for_nms is not None and i < boxes_for_nms.size(0):
# best_int = int(best_ind.data[0])
# domains_allowed[i:, best_int] *= (1 - is_overlap[i, i:, best_int])
out_commitments.append(best_ind)
previous_embed = self.obj_embed(best_ind+1)
# Do NMS here as a post-processing step
if boxes_for_nms is not None and not self.training:
is_overlap = nms_overlaps(boxes_for_nms.data).view(
boxes_for_nms.size(0), boxes_for_nms.size(0), boxes_for_nms.size(1)
).cpu().numpy() >= self.nms_thresh
# is_overlap[np.arange(boxes_for_nms.size(0)), np.arange(boxes_for_nms.size(0))] = False
out_dists_sampled = F.softmax(torch.cat(out_dists,0), 1).data.cpu().numpy()
out_dists_sampled[:,0] = 0
out_commitments = out_commitments[0].data.new(len(out_commitments)).fill_(0)
for i in range(out_commitments.size(0)):
box_ind, cls_ind = np.unravel_index(out_dists_sampled.argmax(), out_dists_sampled.shape)
out_commitments[int(box_ind)] = int(cls_ind)
out_dists_sampled[is_overlap[box_ind,:,cls_ind], cls_ind] = 0.0
out_dists_sampled[box_ind] = -1.0 # This way we won't re-sample
out_commitments = Variable(out_commitments)
else:
out_commitments = torch.cat(out_commitments, 0)
return torch.cat(out_dists, 0), out_commitments
| 12,192 | 47.384921 | 109 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/highway_lstm_cuda/alternating_highway_lstm.py | from typing import Tuple
from overrides import overrides
import torch
from torch.autograd import Function, Variable
from torch.nn import Parameter
from torch.nn.utils.rnn import PackedSequence, pad_packed_sequence, pack_padded_sequence
import itertools
from ._ext import highway_lstm_layer
def block_orthogonal(tensor, split_sizes, gain=1.0):
"""
An initializer which allows initializing model parameters in "blocks". This is helpful
in the case of recurrent cores which use multiple gates applied to linear projections,
which can be computed efficiently if they are concatenated together. However, they are
separate parameters which should be initialized independently.
Parameters
----------
tensor : ``torch.Tensor``, required.
A tensor to initialize.
split_sizes : List[int], required.
A list of length ``tensor.ndim()`` specifying the size of the
blocks along that particular dimension. E.g. ``[10, 20]`` would
result in the tensor being split into chunks of size 10 along the
first dimension and 20 along the second.
gain : float, optional (default = 1.0)
The gain (scaling) applied to the orthogonal initialization.
"""
if isinstance(tensor, Variable):
block_orthogonal(tensor.data, split_sizes, gain)
return tensor
sizes = list(tensor.size())
if any([a % b != 0 for a, b in zip(sizes, split_sizes)]):
raise ValueError("tensor dimensions must be divisible by their respective "
"split_sizes. Found size: {} and split_sizes: {}".format(sizes, split_sizes))
indexes = [list(range(0, max_size, split))
for max_size, split in zip(sizes, split_sizes)]
# Iterate over all possible blocks within the tensor.
for block_start_indices in itertools.product(*indexes):
# A list of tuples containing the index to start at for this block
# and the appropriate step size (i.e split_size[i] for dimension i).
index_and_step_tuples = zip(block_start_indices, split_sizes)
# This is a tuple of slices corresponding to:
# tensor[index: index + step_size, ...]. This is
# required because we could have an arbitrary number
# of dimensions. The actual slices we need are the
# start_index: start_index + step for each dimension in the tensor.
block_slice = tuple([slice(start_index, start_index + step)
for start_index, step in index_and_step_tuples])
# let's not initialize empty things to 0s because THAT SOUNDS REALLY BAD
assert len(block_slice) == 2
sizes = [x.stop - x.start for x in block_slice]
tensor_copy = tensor.new(max(sizes), max(sizes))
torch.nn.init.orthogonal(tensor_copy, gain=gain)
tensor[block_slice] = tensor_copy[0:sizes[0], 0:sizes[1]]
class _AlternatingHighwayLSTMFunction(Function):
def __init__(self, input_size: int, hidden_size: int, num_layers: int, train: bool) -> None:
super(_AlternatingHighwayLSTMFunction, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.train = train
@overrides
def forward(self, # pylint: disable=arguments-differ
inputs: torch.Tensor,
weight: torch.Tensor,
bias: torch.Tensor,
state_accumulator: torch.Tensor,
memory_accumulator: torch.Tensor,
dropout_mask: torch.Tensor,
lengths: torch.Tensor,
gates: torch.Tensor) -> Tuple[torch.Tensor, None]:
sequence_length, batch_size, input_size = inputs.size()
tmp_i = inputs.new(batch_size, 6 * self.hidden_size)
tmp_h = inputs.new(batch_size, 5 * self.hidden_size)
is_training = 1 if self.train else 0
highway_lstm_layer.highway_lstm_forward_cuda(input_size, # type: ignore # pylint: disable=no-member
self.hidden_size,
batch_size,
self.num_layers,
sequence_length,
inputs,
lengths,
state_accumulator,
memory_accumulator,
tmp_i,
tmp_h,
weight,
bias,
dropout_mask,
gates,
is_training)
self.save_for_backward(inputs, lengths, weight, bias, state_accumulator,
memory_accumulator, dropout_mask, gates)
# The state_accumulator has shape: (num_layers, sequence_length + 1, batch_size, hidden_size)
# so for the output, we want the last layer and all but the first timestep, which was the
# initial state.
output = state_accumulator[-1, 1:, :, :]
return output, state_accumulator[:, 1:, :, :]
@overrides
def backward(self, grad_output, grad_hy): # pylint: disable=arguments-differ
(inputs, lengths, weight, bias, state_accumulator, # pylint: disable=unpacking-non-sequence
memory_accumulator, dropout_mask, gates) = self.saved_tensors
inputs = inputs.contiguous()
sequence_length, batch_size, input_size = inputs.size()
parameters_need_grad = 1 if self.needs_input_grad[1] else 0 # pylint: disable=unsubscriptable-object
grad_input = inputs.new().resize_as_(inputs).zero_()
grad_state_accumulator = inputs.new().resize_as_(state_accumulator).zero_()
grad_memory_accumulator = inputs.new().resize_as_(memory_accumulator).zero_()
grad_weight = inputs.new()
grad_bias = inputs.new()
grad_dropout = None
grad_lengths = None
grad_gates = None
if parameters_need_grad:
grad_weight.resize_as_(weight).zero_()
grad_bias.resize_as_(bias).zero_()
tmp_i_gates_grad = inputs.new().resize_(batch_size, 6 * self.hidden_size).zero_()
tmp_h_gates_grad = inputs.new().resize_(batch_size, 5 * self.hidden_size).zero_()
is_training = 1 if self.train else 0
highway_lstm_layer.highway_lstm_backward_cuda(input_size, # pylint: disable=no-member
self.hidden_size,
batch_size,
self.num_layers,
sequence_length,
grad_output,
lengths,
grad_state_accumulator,
grad_memory_accumulator,
inputs,
state_accumulator,
memory_accumulator,
weight,
gates,
dropout_mask,
tmp_h_gates_grad,
tmp_i_gates_grad,
grad_hy,
grad_input,
grad_weight,
grad_bias,
is_training,
parameters_need_grad)
return (grad_input, grad_weight, grad_bias, grad_state_accumulator,
grad_memory_accumulator, grad_dropout, grad_lengths, grad_gates)
class AlternatingHighwayLSTM(torch.nn.Module):
"""
A stacked LSTM with LSTM layers which alternate between going forwards over
the sequence and going backwards, with highway connections between each of
the alternating layers. This implementation is based on the description in
`Deep Semantic Role Labelling - What works and what's next
<https://homes.cs.washington.edu/~luheng/files/acl2017_hllz.pdf>`_ .
Parameters
----------
input_size : int, required
The dimension of the inputs to the LSTM.
hidden_size : int, required
The dimension of the outputs of the LSTM.
num_layers : int, required
The number of stacked LSTMs to use.
recurrent_dropout_probability: float, optional (default = 0.0)
The dropout probability to be used in a dropout scheme as stated in
`A Theoretically Grounded Application of Dropout in Recurrent Neural Networks
<https://arxiv.org/abs/1512.05287>`_ .
Returns
-------
output : PackedSequence
The outputs of the interleaved LSTMs per timestep. A tensor of shape
(batch_size, max_timesteps, hidden_size) where for a given batch
element, all outputs past the sequence length for that batch are
zero tensors.
"""
def __init__(self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
recurrent_dropout_probability: float = 0) -> None:
super(AlternatingHighwayLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.recurrent_dropout_probability = recurrent_dropout_probability
self.training = True
# Input dimensions consider the fact that we do
# all of the LSTM projections (and highway parts)
# in a single matrix multiplication.
input_projection_size = 6 * hidden_size
state_projection_size = 5 * hidden_size
bias_size = 5 * hidden_size
# Here we are creating a single weight and bias with the
# parameters for all layers unfolded into it. This is necessary
# because unpacking and re-packing the weights inside the
# kernel would be slow, as it would happen every time it is called.
total_weight_size = 0
total_bias_size = 0
for layer in range(num_layers):
layer_input_size = input_size if layer == 0 else hidden_size
input_weights = input_projection_size * layer_input_size
state_weights = state_projection_size * hidden_size
total_weight_size += input_weights + state_weights
total_bias_size += bias_size
self.weight = Parameter(torch.FloatTensor(total_weight_size))
self.bias = Parameter(torch.FloatTensor(total_bias_size))
self.reset_parameters()
def reset_parameters(self) -> None:
self.bias.data.zero_()
weight_index = 0
bias_index = 0
for i in range(self.num_layers):
input_size = self.input_size if i == 0 else self.hidden_size
# Create a tensor of the right size and initialize it.
init_tensor = self.weight.data.new(input_size, self.hidden_size * 6).zero_()
block_orthogonal(init_tensor, [input_size, self.hidden_size])
# Copy it into the flat weight.
self.weight.data[weight_index: weight_index + init_tensor.nelement()] \
.view_as(init_tensor).copy_(init_tensor)
weight_index += init_tensor.nelement()
# Same for the recurrent connection weight.
init_tensor = self.weight.data.new(self.hidden_size, self.hidden_size * 5).zero_()
block_orthogonal(init_tensor, [self.hidden_size, self.hidden_size])
self.weight.data[weight_index: weight_index + init_tensor.nelement()] \
.view_as(init_tensor).copy_(init_tensor)
weight_index += init_tensor.nelement()
# Set the forget bias to 1.
self.bias.data[bias_index + self.hidden_size:bias_index + 2 * self.hidden_size].fill_(1)
bias_index += 5 * self.hidden_size
def forward(self, inputs, initial_state=None) -> Tuple[PackedSequence, torch.Tensor]:
"""
Parameters
----------
inputs : ``PackedSequence``, required.
A batch first ``PackedSequence`` to run the stacked LSTM over.
initial_state : Tuple[torch.Tensor, torch.Tensor], optional, (default = None)
Currently, this is ignored.
Returns
-------
output_sequence : ``PackedSequence``
The encoded sequence of shape (batch_size, sequence_length, hidden_size)
final_states: ``torch.Tensor``
The per-layer final (state, memory) states of the LSTM, each with shape
(num_layers, batch_size, hidden_size).
"""
inputs, lengths = pad_packed_sequence(inputs, batch_first=False)
sequence_length, batch_size, _ = inputs.size()
accumulator_shape = [self.num_layers, sequence_length + 1, batch_size, self.hidden_size]
state_accumulator = Variable(inputs.data.new(*accumulator_shape).zero_(), requires_grad=False)
memory_accumulator = Variable(inputs.data.new(*accumulator_shape).zero_(), requires_grad=False)
dropout_weights = inputs.data.new().resize_(self.num_layers, batch_size, self.hidden_size).fill_(1.0)
if self.training:
# Normalize by 1 - dropout_prob to preserve the output statistics of the layer.
dropout_weights.bernoulli_(1 - self.recurrent_dropout_probability) \
.div_((1 - self.recurrent_dropout_probability))
dropout_weights = Variable(dropout_weights, requires_grad=False)
gates = Variable(inputs.data.new().resize_(self.num_layers,
sequence_length,
batch_size, 6 * self.hidden_size))
lengths_variable = Variable(torch.IntTensor(lengths))
implementation = _AlternatingHighwayLSTMFunction(self.input_size,
self.hidden_size,
num_layers=self.num_layers,
train=self.training)
output, _ = implementation(inputs, self.weight, self.bias, state_accumulator,
memory_accumulator, dropout_weights, lengths_variable, gates)
output = pack_padded_sequence(output, lengths, batch_first=False)
return output, None
| 15,176 | 48.924342 | 109 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/highway_lstm_cuda/build.py | # pylint: disable=invalid-name
import os
import torch
from torch.utils.ffi import create_extension
if not torch.cuda.is_available():
raise Exception('HighwayLSTM can only be compiled with CUDA')
sources = ['src/highway_lstm_cuda.c']
headers = ['src/highway_lstm_cuda.h']
defines = [('WITH_CUDA', None)]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
extra_objects = ['src/highway_lstm_kernel.cu.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
'_ext.highway_lstm_layer',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects
)
if __name__ == '__main__':
ffi.build()
| 798 | 25.633333 | 75 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/lstm/highway_lstm_cuda/_ext/highway_lstm_layer/__init__.py |
from torch.utils.ffi import _wrap_function
from ._highway_lstm_layer import lib as _lib, ffi as _ffi
__all__ = []
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
locals[symbol] = _wrap_function(fn, _ffi)
__all__.append(symbol)
_import_symbols(locals())
| 317 | 23.461538 | 57 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/box_utils.py | import torch
import numpy as np
from torch.nn import functional as F
from lib.fpn.box_intersections_cpu.bbox import bbox_overlaps as bbox_overlaps_np
from lib.fpn.box_intersections_cpu.bbox import bbox_intersections as bbox_intersections_np
def bbox_loss(prior_boxes, deltas, gt_boxes, eps=1e-4, scale_before=1):
"""
Computes the loss for predicting the GT boxes from prior boxes
:param prior_boxes: [num_boxes, 4] (x1, y1, x2, y2)
:param deltas: [num_boxes, 4] (tx, ty, th, tw)
:param gt_boxes: [num_boxes, 4] (x1, y1, x2, y2)
:return:
"""
prior_centers = center_size(prior_boxes) #(cx, cy, w, h)
gt_centers = center_size(gt_boxes) #(cx, cy, w, h)
center_targets = (gt_centers[:, :2] - prior_centers[:, :2]) / prior_centers[:, 2:]
size_targets = torch.log(gt_centers[:, 2:]) - torch.log(prior_centers[:, 2:])
all_targets = torch.cat((center_targets, size_targets), 1)
loss = F.smooth_l1_loss(deltas, all_targets, size_average=False)/(eps + prior_centers.size(0))
return loss
def bbox_preds(boxes, deltas):
"""
Converts "deltas" (predicted by the network) along with prior boxes
into (x1, y1, x2, y2) representation.
:param boxes: Prior boxes, represented as (x1, y1, x2, y2)
:param deltas: Offsets (tx, ty, tw, th)
:param box_strides [num_boxes,] distance apart between boxes. anchor box can't go more than
\pm box_strides/2 from its current position. If None then we'll use the widths
and heights
:return: Transformed boxes
"""
if boxes.size(0) == 0:
return boxes
prior_centers = center_size(boxes)
xys = prior_centers[:, :2] + prior_centers[:, 2:] * deltas[:, :2]
whs = torch.exp(deltas[:, 2:]) * prior_centers[:, 2:]
return point_form(torch.cat((xys, whs), 1))
def center_size(boxes):
""" Convert prior_boxes to (cx, cy, w, h)
representation for comparison to center-size form ground truth data.
Args:
boxes: (tensor) point_form boxes
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
wh = boxes[:, 2:] - boxes[:, :2] + 1.0
if isinstance(boxes, np.ndarray):
return np.column_stack((boxes[:, :2] + 0.5 * wh, wh))
return torch.cat((boxes[:, :2] + 0.5 * wh, wh), 1)
def point_form(boxes):
""" Convert prior_boxes to (xmin, ymin, xmax, ymax)
representation for comparison to point form ground truth data.
Args:
boxes: (tensor) center-size default boxes from priorbox layers.
Return:
boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
"""
if isinstance(boxes, np.ndarray):
return np.column_stack((boxes[:, :2] - 0.5 * boxes[:, 2:],
boxes[:, :2] + 0.5 * (boxes[:, 2:] - 2.0)))
return torch.cat((boxes[:, :2] - 0.5 * boxes[:, 2:],
boxes[:, :2] + 0.5 * (boxes[:, 2:] - 2.0)), 1) # xmax, ymax
###########################################################################
### Torch Utils, creds to Max de Groot
###########################################################################
def bbox_intersections(box_a, box_b):
""" We resize both tensors to [A,B,2] without new malloc:
[A,2] -> [A,1,2] -> [A,B,2]
[B,2] -> [1,B,2] -> [A,B,2]
Then we compute the area of intersect between box_a and box_b.
Args:
box_a: (tensor) bounding boxes, Shape: [A,4].
box_b: (tensor) bounding boxes, Shape: [B,4].
Return:
(tensor) intersection area, Shape: [A,B].
"""
if isinstance(box_a, np.ndarray):
assert isinstance(box_b, np.ndarray)
return bbox_intersections_np(box_a, box_b)
A = box_a.size(0)
B = box_b.size(0)
max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2),
box_b[:, 2:].unsqueeze(0).expand(A, B, 2))
min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2),
box_b[:, :2].unsqueeze(0).expand(A, B, 2))
inter = torch.clamp((max_xy - min_xy + 1.0), min=0)
return inter[:, :, 0] * inter[:, :, 1]
def bbox_overlaps(box_a, box_b):
"""Compute the jaccard overlap of two sets of boxes. The jaccard overlap
is simply the intersection over union of two boxes. Here we operate on
ground truth boxes and default boxes.
E.g.:
A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)
Args:
box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
Return:
jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
"""
if isinstance(box_a, np.ndarray):
assert isinstance(box_b, np.ndarray)
return bbox_overlaps_np(box_a, box_b)
inter = bbox_intersections(box_a, box_b)
area_a = ((box_a[:, 2] - box_a[:, 0] + 1.0) *
(box_a[:, 3] - box_a[:, 1] + 1.0)).unsqueeze(1).expand_as(inter) # [A,B]
area_b = ((box_b[:, 2] - box_b[:, 0] + 1.0) *
(box_b[:, 3] - box_b[:, 1] + 1.0)).unsqueeze(0).expand_as(inter) # [A,B]
union = area_a + area_b - inter
return inter / union # [A,B]
def nms_overlaps(boxes):
""" get overlaps for each channel"""
assert boxes.dim() == 3
N = boxes.size(0)
nc = boxes.size(1)
max_xy = torch.min(boxes[:, None, :, 2:].expand(N, N, nc, 2),
boxes[None, :, :, 2:].expand(N, N, nc, 2))
min_xy = torch.max(boxes[:, None, :, :2].expand(N, N, nc, 2),
boxes[None, :, :, :2].expand(N, N, nc, 2))
inter = torch.clamp((max_xy - min_xy + 1.0), min=0)
# n, n, 151
inters = inter[:,:,:,0]*inter[:,:,:,1]
boxes_flat = boxes.view(-1, 4)
areas_flat = (boxes_flat[:,2]- boxes_flat[:,0]+1.0)*(
boxes_flat[:,3]- boxes_flat[:,1]+1.0)
areas = areas_flat.view(boxes.size(0), boxes.size(1))
union = -inters + areas[None] + areas[:, None]
return inters / union
| 5,965 | 37.24359 | 98 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/proposal_assignments_rel.py | # --------------------------------------------------------
# Goal: assign ROIs to targets
# --------------------------------------------------------
import numpy as np
import numpy.random as npr
from config import BG_THRESH_HI, BG_THRESH_LO, FG_FRACTION_REL, ROIS_PER_IMG_REL, REL_FG_FRACTION, \
RELS_PER_IMG
from lib.fpn.box_utils import bbox_overlaps
from lib.pytorch_misc import to_variable, nonintersecting_2d_inds
from collections import defaultdict
import torch
@to_variable
def proposal_assignments_rel(rpn_rois, gt_boxes, gt_classes, gt_rels, image_offset, fg_thresh=0.5):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1]
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
:param gt_rels [num_boxes, 4] array of [img_ind, box_0, box_1, rel type]
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
bbox_targets [num_rois, 4] array of targets for the labels.
rel_labels: [num_rels, 4] (img ind, box0 ind, box1ind, rel type)
"""
fg_rois_per_image = int(np.round(ROIS_PER_IMG_REL * FG_FRACTION_REL))
fg_rels_per_image = int(np.round(REL_FG_FRACTION * RELS_PER_IMG))
pred_inds_np = rpn_rois[:, 0].cpu().numpy().astype(np.int64)
pred_boxes_np = rpn_rois[:, 1:].cpu().numpy()
gt_boxes_np = gt_boxes.cpu().numpy()
gt_classes_np = gt_classes.cpu().numpy()
gt_rels_np = gt_rels.cpu().numpy()
gt_classes_np[:, 0] -= image_offset
gt_rels_np[:, 0] -= image_offset
num_im = gt_classes_np[:, 0].max()+1
rois = []
obj_labels = []
rel_labels = []
bbox_targets = []
num_box_seen = 0
for im_ind in range(num_im):
pred_ind = np.where(pred_inds_np == im_ind)[0]
gt_ind = np.where(gt_classes_np[:, 0] == im_ind)[0]
gt_boxes_i = gt_boxes_np[gt_ind]
gt_classes_i = gt_classes_np[gt_ind, 1]
gt_rels_i = gt_rels_np[gt_rels_np[:, 0] == im_ind, 1:]
pred_boxes_i = np.concatenate((pred_boxes_np[pred_ind], gt_boxes_i), 0)
ious = bbox_overlaps(pred_boxes_i, gt_boxes_i)
obj_inds_i, obj_labels_i, obj_assignments_i = _sel_inds(ious, gt_classes_i,
fg_thresh, fg_rois_per_image, ROIS_PER_IMG_REL)
all_rels_i = _sel_rels(ious[obj_inds_i], pred_boxes_i[obj_inds_i], obj_labels_i,
gt_classes_i, gt_rels_i,
fg_thresh=fg_thresh, fg_rels_per_image=fg_rels_per_image)
all_rels_i[:,0:2] += num_box_seen
rois.append(np.column_stack((
im_ind * np.ones(obj_inds_i.shape[0], dtype=np.float32),
pred_boxes_i[obj_inds_i],
)))
obj_labels.append(obj_labels_i)
rel_labels.append(np.column_stack((
im_ind*np.ones(all_rels_i.shape[0], dtype=np.int64),
all_rels_i,
)))
# print("Gtboxes i {} obj assignments i {}".format(gt_boxes_i, obj_assignments_i))
bbox_targets.append(gt_boxes_i[obj_assignments_i])
num_box_seen += obj_inds_i.size
rois = torch.FloatTensor(np.concatenate(rois, 0)).cuda(rpn_rois.get_device(), async=True)
labels = torch.LongTensor(np.concatenate(obj_labels, 0)).cuda(rpn_rois.get_device(), async=True)
bbox_targets = torch.FloatTensor(np.concatenate(bbox_targets, 0)).cuda(rpn_rois.get_device(),
async=True)
rel_labels = torch.LongTensor(np.concatenate(rel_labels, 0)).cuda(rpn_rois.get_device(),
async=True)
return rois, labels, bbox_targets, rel_labels
def _sel_rels(ious, pred_boxes, pred_labels, gt_classes, gt_rels, fg_thresh=0.5, fg_rels_per_image=128, num_sample_per_gt=1, filter_non_overlap=True):
"""
Selects the relations needed
:param ious: [num_pred', num_gt]
:param pred_boxes: [num_pred', num_gt]
:param pred_labels: [num_pred']
:param gt_classes: [num_gt]
:param gt_rels: [num_gtrel, 3]
:param fg_thresh:
:param fg_rels_per_image:
:return: new rels, [num_predrel, 3] where each is (pred_ind1, pred_ind2, predicate)
"""
is_match = (ious >= fg_thresh) & (pred_labels[:, None] == gt_classes[None, :])
pbi_iou = bbox_overlaps(pred_boxes, pred_boxes)
# Limit ourselves to only IOUs that overlap, but are not the exact same box
# since we duplicated stuff earlier.
if filter_non_overlap:
rel_possibilities = (pbi_iou < 1) & (pbi_iou > 0)
rels_intersect = rel_possibilities
else:
rel_possibilities = np.ones((pred_labels.shape[0], pred_labels.shape[0]),
dtype=np.int64) - np.eye(pred_labels.shape[0], dtype=np.int64)
rels_intersect = (pbi_iou < 1) & (pbi_iou > 0)
# ONLY select relations between ground truth because otherwise we get useless data
rel_possibilities[pred_labels == 0] = 0
rel_possibilities[:,pred_labels == 0] = 0
# For each GT relationship, sample exactly 1 relationship.
fg_rels = []
p_size = []
for i, (from_gtind, to_gtind, rel_id) in enumerate(gt_rels):
fg_rels_i = []
fg_scores_i = []
for from_ind in np.where(is_match[:,from_gtind])[0]:
for to_ind in np.where(is_match[:,to_gtind])[0]:
if from_ind != to_ind:
fg_rels_i.append((from_ind, to_ind, rel_id))
fg_scores_i.append((ious[from_ind, from_gtind]*ious[to_ind, to_gtind]))
rel_possibilities[from_ind, to_ind] = 0
if len(fg_rels_i) == 0:
continue
p = np.array(fg_scores_i)
p = p/p.sum()
p_size.append(p.shape[0])
num_to_add = min(p.shape[0], num_sample_per_gt)
for rel_to_add in npr.choice(p.shape[0], p=p, size=num_to_add, replace=False):
fg_rels.append(fg_rels_i[rel_to_add])
bg_rels = np.column_stack(np.where(rel_possibilities))
bg_rels = np.column_stack((bg_rels, np.zeros(bg_rels.shape[0], dtype=np.int64)))
fg_rels = np.array(fg_rels, dtype=np.int64)
if fg_rels.size > 0 and fg_rels.shape[0] > fg_rels_per_image:
fg_rels = fg_rels[npr.choice(fg_rels.shape[0], size=fg_rels_per_image, replace=False)]
# print("{} scores for {} GT. max={} min={} BG rels {}".format(
# fg_rels_scores.shape[0], gt_rels.shape[0], fg_rels_scores.max(), fg_rels_scores.min(),
# bg_rels.shape))
elif fg_rels.size == 0:
fg_rels = np.zeros((0,3), dtype=np.int64)
num_bg_rel = min(RELS_PER_IMG - fg_rels.shape[0], bg_rels.shape[0])
if bg_rels.size > 0:
# Sample 4x as many intersecting relationships as non-intersecting.
bg_rels_intersect = rels_intersect[bg_rels[:,0], bg_rels[:,1]]
p = bg_rels_intersect.astype(np.float32)
p[bg_rels_intersect == 0] = 0.2
p[bg_rels_intersect == 1] = 0.8
p /= p.sum()
bg_rels = bg_rels[np.random.choice(bg_rels.shape[0], p=p, size=num_bg_rel, replace=False)]
else:
bg_rels = np.zeros((0,3), dtype=np.int64)
#print("GTR {} -> AR {} vs {}".format(gt_rels.shape, fg_rels.shape, bg_rels.shape))
all_rels = np.concatenate((fg_rels, bg_rels), 0)
# Sort by 2nd ind and then 1st ind
all_rels = all_rels[np.lexsort((all_rels[:, 1], all_rels[:, 0]))]
return all_rels
def _sel_inds(ious, gt_classes_i, fg_thresh=0.5, fg_rois_per_image=128, rois_per_image=256, n_sample_per=1):
#gt_assignment = ious.argmax(1)
#max_overlaps = ious[np.arange(ious.shape[0]), gt_assignment]
#fg_inds = np.where(max_overlaps >= fg_thresh)[0]
fg_ious = ious.T >= fg_thresh #[num_gt, num_pred]
#is_bg = ~fg_ious.any(0)
# Sample K inds per GT image.
fg_inds = []
for i, (ious_i, cls_i) in enumerate(zip(fg_ious, gt_classes_i)):
n_sample_this_roi = min(n_sample_per, ious_i.sum())
if n_sample_this_roi > 0:
p = ious_i.astype(np.float64) / ious_i.sum()
for ind in npr.choice(ious_i.shape[0], p=p, size=n_sample_this_roi, replace=False):
fg_inds.append((ind, i))
fg_inds = np.array(fg_inds, dtype=np.int64)
if fg_inds.size == 0:
fg_inds = np.zeros((0, 2), dtype=np.int64)
elif fg_inds.shape[0] > fg_rois_per_image:
#print("sample FG")
fg_inds = fg_inds[npr.choice(fg_inds.shape[0], size=fg_rois_per_image, replace=False)]
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
max_overlaps = ious.max(1)
bg_inds = np.where((max_overlaps < BG_THRESH_HI) & (max_overlaps >= BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = min(rois_per_image-fg_inds.shape[0], bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
# FIx for format issues
obj_inds = np.concatenate((fg_inds[:,0], bg_inds), 0)
obj_assignments_i = np.concatenate((fg_inds[:,1], np.zeros(bg_inds.shape[0], dtype=np.int64)))
obj_labels_i = gt_classes_i[obj_assignments_i]
obj_labels_i[fg_inds.shape[0]:] = 0
#print("{} FG and {} BG".format(fg_inds.shape[0], bg_inds.shape[0]))
return obj_inds, obj_labels_i, obj_assignments_i
| 9,678 | 41.451754 | 150 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/proposal_assignments_gtbox.py | from lib.pytorch_misc import enumerate_by_image, gather_nd, random_choose
from lib.fpn.box_utils import bbox_preds, center_size, bbox_overlaps
import torch
from lib.pytorch_misc import diagonal_inds, to_variable
from config import RELS_PER_IMG, REL_FG_FRACTION
@to_variable
def proposal_assignments_gtbox(rois, gt_boxes, gt_classes, gt_rels, image_offset, fg_thresh=0.5):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1]. Not needed it seems
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
Note, the img_inds here start at image_offset
:param gt_rels [num_boxes, 4] array of [img_ind, box_0, box_1, rel type].
Note, the img_inds here start at image_offset
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
bbox_targets [num_rois, 4] array of targets for the labels.
rel_labels: [num_rels, 4] (img ind, box0 ind, box1ind, rel type)
"""
im_inds = rois[:,0].long()
num_im = im_inds[-1] + 1
# Offset the image indices in fg_rels to refer to absolute indices (not just within img i)
fg_rels = gt_rels.clone()
fg_rels[:,0] -= image_offset
offset = {}
for i, s, e in enumerate_by_image(im_inds):
offset[i] = s
for i, s, e in enumerate_by_image(fg_rels[:, 0]):
fg_rels[s:e, 1:3] += offset[i]
# Try ALL things, not just intersections.
is_cand = (im_inds[:, None] == im_inds[None])
is_cand.view(-1)[diagonal_inds(is_cand)] = 0
# # Compute salience
# gt_inds = fg_rels[:, 1:3].contiguous().view(-1)
# labels_arange = labels.data.new(labels.size(0))
# torch.arange(0, labels.size(0), out=labels_arange)
# salience_labels = ((gt_inds[:, None] == labels_arange[None]).long().sum(0) > 0).long()
# labels = torch.stack((labels, salience_labels), 1)
# Add in some BG labels
# NOW WE HAVE TO EXCLUDE THE FGs.
# TODO: check if this causes an error if many duplicate GTs havent been filtered out
is_cand.view(-1)[fg_rels[:,1]*im_inds.size(0) + fg_rels[:,2]] = 0
is_bgcand = is_cand.nonzero()
# TODO: make this sample on a per image case
# If too many then sample
num_fg = min(fg_rels.size(0), int(RELS_PER_IMG * REL_FG_FRACTION * num_im))
if num_fg < fg_rels.size(0):
fg_rels = random_choose(fg_rels, num_fg)
# If too many then sample
num_bg = min(is_bgcand.size(0) if is_bgcand.dim() > 0 else 0,
int(RELS_PER_IMG * num_im) - num_fg)
if num_bg > 0:
bg_rels = torch.cat((
im_inds[is_bgcand[:, 0]][:, None],
is_bgcand,
(is_bgcand[:, 0, None] < -10).long(),
), 1)
if num_bg < is_bgcand.size(0):
bg_rels = random_choose(bg_rels, num_bg)
rel_labels = torch.cat((fg_rels, bg_rels), 0)
else:
rel_labels = fg_rels
# last sort by rel.
_, perm = torch.sort(rel_labels[:, 0]*(gt_boxes.size(0)**2) +
rel_labels[:,1]*gt_boxes.size(0) + rel_labels[:,2])
rel_labels = rel_labels[perm].contiguous()
labels = gt_classes[:,1].contiguous()
return rois, labels, rel_labels
| 3,434 | 38.034091 | 97 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/rel_assignments.py | # --------------------------------------------------------
# Goal: assign ROIs to targets
# --------------------------------------------------------
import numpy as np
import numpy.random as npr
from config import BG_THRESH_HI, BG_THRESH_LO, REL_FG_FRACTION, RELS_PER_IMG_REFINE
from lib.fpn.box_utils import bbox_overlaps
from lib.pytorch_misc import to_variable, nonintersecting_2d_inds
from collections import defaultdict
import torch
@to_variable
def rel_assignments(im_inds, rpn_rois, roi_gtlabels, gt_boxes, gt_classes, gt_rels, image_offset,
fg_thresh=0.5, num_sample_per_gt=4, filter_non_overlap=True):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1]
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
:param gt_rels [num_boxes, 4] array of [img_ind, box_0, box_1, rel type]
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
bbox_targets [num_rois, 4] array of targets for the labels.
rel_labels: [num_rels, 4] (img ind, box0 ind, box1ind, rel type)
"""
fg_rels_per_image = int(np.round(REL_FG_FRACTION * 64))
pred_inds_np = im_inds.cpu().numpy()
pred_boxes_np = rpn_rois.cpu().numpy()
pred_boxlabels_np = roi_gtlabels.cpu().numpy()
gt_boxes_np = gt_boxes.cpu().numpy()
gt_classes_np = gt_classes.cpu().numpy()
gt_rels_np = gt_rels.cpu().numpy()
gt_classes_np[:, 0] -= image_offset
gt_rels_np[:, 0] -= image_offset
num_im = gt_classes_np[:, 0].max()+1
# print("Pred inds {} pred boxes {} pred box labels {} gt classes {} gt rels {}".format(
# pred_inds_np, pred_boxes_np, pred_boxlabels_np, gt_classes_np, gt_rels_np
# ))
rel_labels = []
num_box_seen = 0
for im_ind in range(num_im):
pred_ind = np.where(pred_inds_np == im_ind)[0]
gt_ind = np.where(gt_classes_np[:, 0] == im_ind)[0]
gt_boxes_i = gt_boxes_np[gt_ind]
gt_classes_i = gt_classes_np[gt_ind, 1]
gt_rels_i = gt_rels_np[gt_rels_np[:, 0] == im_ind, 1:]
# [num_pred, num_gt]
pred_boxes_i = pred_boxes_np[pred_ind]
pred_boxlabels_i = pred_boxlabels_np[pred_ind]
ious = bbox_overlaps(pred_boxes_i, gt_boxes_i)
is_match = (pred_boxlabels_i[:,None] == gt_classes_i[None]) & (ious >= fg_thresh)
# FOR BG. Limit ourselves to only IOUs that overlap, but are not the exact same box
pbi_iou = bbox_overlaps(pred_boxes_i, pred_boxes_i)
if filter_non_overlap:
rel_possibilities = (pbi_iou < 1) & (pbi_iou > 0)
rels_intersect = rel_possibilities
else:
rel_possibilities = np.ones((pred_boxes_i.shape[0], pred_boxes_i.shape[0]),
dtype=np.int64) - np.eye(pred_boxes_i.shape[0],
dtype=np.int64)
rels_intersect = (pbi_iou < 1) & (pbi_iou > 0)
# ONLY select relations between ground truth because otherwise we get useless data
rel_possibilities[pred_boxlabels_i == 0] = 0
rel_possibilities[:, pred_boxlabels_i == 0] = 0
# Sample the GT relationships.
fg_rels = []
p_size = []
for i, (from_gtind, to_gtind, rel_id) in enumerate(gt_rels_i):
fg_rels_i = []
fg_scores_i = []
for from_ind in np.where(is_match[:, from_gtind])[0]:
for to_ind in np.where(is_match[:, to_gtind])[0]:
if from_ind != to_ind:
fg_rels_i.append((from_ind, to_ind, rel_id))
fg_scores_i.append((ious[from_ind, from_gtind] * ious[to_ind, to_gtind]))
rel_possibilities[from_ind, to_ind] = 0
if len(fg_rels_i) == 0:
continue
p = np.array(fg_scores_i)
p = p / p.sum()
p_size.append(p.shape[0])
num_to_add = min(p.shape[0], num_sample_per_gt)
for rel_to_add in npr.choice(p.shape[0], p=p, size=num_to_add, replace=False):
fg_rels.append(fg_rels_i[rel_to_add])
fg_rels = np.array(fg_rels, dtype=np.int64)
if fg_rels.size > 0 and fg_rels.shape[0] > fg_rels_per_image:
fg_rels = fg_rels[npr.choice(fg_rels.shape[0], size=fg_rels_per_image, replace=False)]
elif fg_rels.size == 0:
fg_rels = np.zeros((0, 3), dtype=np.int64)
bg_rels = np.column_stack(np.where(rel_possibilities))
bg_rels = np.column_stack((bg_rels, np.zeros(bg_rels.shape[0], dtype=np.int64)))
num_bg_rel = min(64 - fg_rels.shape[0], bg_rels.shape[0])
if bg_rels.size > 0:
# Sample 4x as many intersecting relationships as non-intersecting.
# bg_rels_intersect = rels_intersect[bg_rels[:, 0], bg_rels[:, 1]]
# p = bg_rels_intersect.astype(np.float32)
# p[bg_rels_intersect == 0] = 0.2
# p[bg_rels_intersect == 1] = 0.8
# p /= p.sum()
bg_rels = bg_rels[
np.random.choice(bg_rels.shape[0],
#p=p,
size=num_bg_rel, replace=False)]
else:
bg_rels = np.zeros((0, 3), dtype=np.int64)
if fg_rels.size == 0 and bg_rels.size == 0:
# Just put something here
bg_rels = np.array([[0, 0, 0]], dtype=np.int64)
# print("GTR {} -> AR {} vs {}".format(gt_rels.shape, fg_rels.shape, bg_rels.shape))
all_rels_i = np.concatenate((fg_rels, bg_rels), 0)
all_rels_i[:,0:2] += num_box_seen
all_rels_i = all_rels_i[np.lexsort((all_rels_i[:,1], all_rels_i[:,0]))]
rel_labels.append(np.column_stack((
im_ind*np.ones(all_rels_i.shape[0], dtype=np.int64),
all_rels_i,
)))
num_box_seen += pred_boxes_i.shape[0]
rel_labels = torch.LongTensor(np.concatenate(rel_labels, 0)).cuda(rpn_rois.get_device(),
async=True)
return rel_labels
| 6,381 | 42.712329 | 98 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/proposal_assignments_det.py |
import numpy as np
import numpy.random as npr
from config import BG_THRESH_HI, BG_THRESH_LO, FG_FRACTION, ROIS_PER_IMG
from lib.fpn.box_utils import bbox_overlaps
from lib.pytorch_misc import to_variable
import torch
#############################################################
# The following is only for object detection
@to_variable
def proposal_assignments_det(rpn_rois, gt_boxes, gt_classes, image_offset, fg_thresh=0.5):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
bbox_targets [num_rois, 4] array of targets for the labels.
"""
fg_rois_per_image = int(np.round(ROIS_PER_IMG * FG_FRACTION))
gt_img_inds = gt_classes[:, 0] - image_offset
all_boxes = torch.cat([rpn_rois[:, 1:], gt_boxes], 0)
ims_per_box = torch.cat([rpn_rois[:, 0].long(), gt_img_inds], 0)
im_sorted, idx = torch.sort(ims_per_box, 0)
all_boxes = all_boxes[idx]
# Assume that the GT boxes are already sorted in terms of image id
num_images = int(im_sorted[-1]) + 1
labels = []
rois = []
bbox_targets = []
for im_ind in range(num_images):
g_inds = (gt_img_inds == im_ind).nonzero()
if g_inds.dim() == 0:
continue
g_inds = g_inds.squeeze(1)
g_start = g_inds[0]
g_end = g_inds[-1] + 1
t_inds = (im_sorted == im_ind).nonzero().squeeze(1)
t_start = t_inds[0]
t_end = t_inds[-1] + 1
# Max overlaps: for each predicted box, get the max ROI
# Get the indices into the GT boxes too (must offset by the box start)
ious = bbox_overlaps(all_boxes[t_start:t_end], gt_boxes[g_start:g_end])
max_overlaps, gt_assignment = ious.max(1)
max_overlaps = max_overlaps.cpu().numpy()
# print("Best overlap is {}".format(max_overlaps.max()))
# print("\ngt assignment is {} while g_start is {} \n ---".format(gt_assignment, g_start))
gt_assignment += g_start
keep_inds_np, num_fg = _sel_inds(max_overlaps, fg_thresh, fg_rois_per_image,
ROIS_PER_IMG)
if keep_inds_np.size == 0:
continue
keep_inds = torch.LongTensor(keep_inds_np).cuda(rpn_rois.get_device())
labels_ = gt_classes[:, 1][gt_assignment[keep_inds]]
bbox_target_ = gt_boxes[gt_assignment[keep_inds]]
# Clamp labels_ for the background RoIs to 0
if num_fg < labels_.size(0):
labels_[num_fg:] = 0
rois_ = torch.cat((
im_sorted[t_start:t_end, None][keep_inds].float(),
all_boxes[t_start:t_end][keep_inds],
), 1)
labels.append(labels_)
rois.append(rois_)
bbox_targets.append(bbox_target_)
rois = torch.cat(rois, 0)
labels = torch.cat(labels, 0)
bbox_targets = torch.cat(bbox_targets, 0)
return rois, labels, bbox_targets
def _sel_inds(max_overlaps, fg_thresh=0.5, fg_rois_per_image=128, rois_per_image=256):
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= fg_thresh)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = min(fg_rois_per_image, fg_inds.shape[0])
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where((max_overlaps < BG_THRESH_HI) & (max_overlaps >= BG_THRESH_LO))[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = min(bg_rois_per_this_image, bg_inds.size)
# Sample background regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False)
return np.append(fg_inds, bg_inds), fg_rois_per_this_image
| 4,477 | 36.949153 | 98 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/proposal_assignments/proposal_assignments_postnms.py | # --------------------------------------------------------
# Goal: assign ROIs to targets
# --------------------------------------------------------
import numpy as np
import numpy.random as npr
from .proposal_assignments_rel import _sel_rels
from lib.fpn.box_utils import bbox_overlaps
from lib.pytorch_misc import to_variable
import torch
@to_variable
def proposal_assignments_postnms(
rois, gt_boxes, gt_classes, gt_rels, nms_inds, image_offset, fg_thresh=0.5,
max_objs=100, max_rels=100, rand_val=0.01):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
:param rpn_rois: [img_ind, x1, y1, x2, y2]
:param gt_boxes: [num_boxes, 4] array of x0, y0, x1, y1]
:param gt_classes: [num_boxes, 2] array of [img_ind, class]
:param gt_rels [num_boxes, 4] array of [img_ind, box_0, box_1, rel type]
:param Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
:return:
rois: [num_rois, 5]
labels: [num_rois] array of labels
rel_labels: [num_rels, 4] (img ind, box0 ind, box1ind, rel type)
"""
pred_inds_np = rois[:, 0].cpu().numpy().astype(np.int64)
pred_boxes_np = rois[:, 1:].cpu().numpy()
nms_inds_np = nms_inds.cpu().numpy()
sup_inds_np = np.setdiff1d(np.arange(pred_boxes_np.shape[0]), nms_inds_np)
# split into chosen and suppressed
chosen_inds_np = pred_inds_np[nms_inds_np]
chosen_boxes_np = pred_boxes_np[nms_inds_np]
suppre_inds_np = pred_inds_np[sup_inds_np]
suppre_boxes_np = pred_boxes_np[sup_inds_np]
gt_boxes_np = gt_boxes.cpu().numpy()
gt_classes_np = gt_classes.cpu().numpy()
gt_rels_np = gt_rels.cpu().numpy()
gt_classes_np[:, 0] -= image_offset
gt_rels_np[:, 0] -= image_offset
num_im = gt_classes_np[:, 0].max()+1
rois = []
obj_labels = []
rel_labels = []
num_box_seen = 0
for im_ind in range(num_im):
chosen_ind = np.where(chosen_inds_np == im_ind)[0]
suppre_ind = np.where(suppre_inds_np == im_ind)[0]
gt_ind = np.where(gt_classes_np[:, 0] == im_ind)[0]
gt_boxes_i = gt_boxes_np[gt_ind]
gt_classes_i = gt_classes_np[gt_ind, 1]
gt_rels_i = gt_rels_np[gt_rels_np[:, 0] == im_ind, 1:]
# Get IOUs between chosen and GT boxes and if needed we'll add more in
chosen_boxes_i = chosen_boxes_np[chosen_ind]
suppre_boxes_i = suppre_boxes_np[suppre_ind]
n_chosen = chosen_boxes_i.shape[0]
n_suppre = suppre_boxes_i.shape[0]
n_gt_box = gt_boxes_i.shape[0]
# add a teensy bit of random noise because some GT boxes might be duplicated, etc.
pred_boxes_i = np.concatenate((chosen_boxes_i, suppre_boxes_i, gt_boxes_i), 0)
ious = bbox_overlaps(pred_boxes_i, gt_boxes_i) + rand_val*(
np.random.rand(pred_boxes_i.shape[0], gt_boxes_i.shape[0])-0.5)
# Let's say that a box can only be assigned ONCE for now because we've already done
# the NMS and stuff.
is_hit = ious > fg_thresh
obj_assignments_i = is_hit.argmax(1)
obj_assignments_i[~is_hit.any(1)] = -1
vals, first_occurance_ind = np.unique(obj_assignments_i, return_index=True)
obj_assignments_i[np.setdiff1d(
np.arange(obj_assignments_i.shape[0]), first_occurance_ind)] = -1
extra_to_add = np.where(obj_assignments_i[n_chosen:] != -1)[0] + n_chosen
# Add them in somewhere at random
num_inds_to_have = min(max_objs, n_chosen + extra_to_add.shape[0])
boxes_i = np.zeros((num_inds_to_have, 4), dtype=np.float32)
labels_i = np.zeros(num_inds_to_have, dtype=np.int64)
inds_from_nms = np.sort(np.random.choice(num_inds_to_have, size=n_chosen, replace=False))
inds_from_elsewhere = np.setdiff1d(np.arange(num_inds_to_have), inds_from_nms)
boxes_i[inds_from_nms] = chosen_boxes_i
labels_i[inds_from_nms] = gt_classes_i[obj_assignments_i[:n_chosen]]
boxes_i[inds_from_elsewhere] = pred_boxes_i[extra_to_add]
labels_i[inds_from_elsewhere] = gt_classes_i[obj_assignments_i[extra_to_add]]
# Now, we do the relationships. same as for rle
all_rels_i = _sel_rels(bbox_overlaps(boxes_i, gt_boxes_i),
boxes_i,
labels_i,
gt_classes_i,
gt_rels_i,
fg_thresh=fg_thresh,
fg_rels_per_image=100)
all_rels_i[:,0:2] += num_box_seen
rois.append(np.column_stack((
im_ind * np.ones(boxes_i.shape[0], dtype=np.float32),
boxes_i,
)))
obj_labels.append(labels_i)
rel_labels.append(np.column_stack((
im_ind*np.ones(all_rels_i.shape[0], dtype=np.int64),
all_rels_i,
)))
num_box_seen += boxes_i.size
rois = torch.FloatTensor(np.concatenate(rois, 0)).cuda(gt_boxes.get_device(), async=True)
labels = torch.LongTensor(np.concatenate(obj_labels, 0)).cuda(gt_boxes.get_device(), async=True)
rel_labels = torch.LongTensor(np.concatenate(rel_labels, 0)).cuda(gt_boxes.get_device(),
async=True)
return rois, labels, rel_labels
| 5,420 | 39.455224 | 100 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/build.py | import os
import torch
from torch.utils.ffi import create_extension
# Might have to export PATH=/usr/local/cuda-8.0/bin${PATH:+:${PATH}}
# sources = ['src/roi_align.c']
# headers = ['src/roi_align.h']
sources = []
headers = []
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/roi_align_cuda.c']
headers += ['src/roi_align_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
print(this_file)
extra_objects = ['src/cuda/roi_align.cu.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
'_ext.roi_align',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects
)
if __name__ == '__main__':
ffi.build()
| 901 | 23.378378 | 75 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/functions/roi_align.py | """
performs ROI aligning
"""
import torch
from torch.autograd import Function
from .._ext import roi_align
class RoIAlignFunction(Function):
def __init__(self, aligned_height, aligned_width, spatial_scale):
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
self.feature_size = None
def forward(self, features, rois):
self.save_for_backward(rois)
rois_normalized = rois.clone()
self.feature_size = features.size()
batch_size, num_channels, data_height, data_width = self.feature_size
height = (data_height -1) / self.spatial_scale
width = (data_width - 1) / self.spatial_scale
rois_normalized[:,1] /= width
rois_normalized[:,2] /= height
rois_normalized[:,3] /= width
rois_normalized[:,4] /= height
num_rois = rois.size(0)
output = features.new(num_rois, num_channels, self.aligned_height,
self.aligned_width).zero_()
if features.is_cuda:
res = roi_align.roi_align_forward_cuda(self.aligned_height,
self.aligned_width,
self.spatial_scale, features,
rois_normalized, output)
assert res == 1
else:
raise ValueError
return output
def backward(self, grad_output):
assert(self.feature_size is not None and grad_output.is_cuda)
rois = self.saved_tensors[0]
rois_normalized = rois.clone()
batch_size, num_channels, data_height, data_width = self.feature_size
height = (data_height -1) / self.spatial_scale
width = (data_width - 1) / self.spatial_scale
rois_normalized[:,1] /= width
rois_normalized[:,2] /= height
rois_normalized[:,3] /= width
rois_normalized[:,4] /= height
grad_input = rois_normalized.new(batch_size, num_channels, data_height,
data_width).zero_()
res = roi_align.roi_align_backward_cuda(self.aligned_height,
self.aligned_width,
self.spatial_scale, grad_output,
rois_normalized, grad_input)
assert res == 1
return grad_input, None
| 2,455 | 31.746667 | 79 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/modules/roi_align.py | from torch.nn.modules.module import Module
from torch.nn.functional import avg_pool2d, max_pool2d
from ..functions.roi_align import RoIAlignFunction
class RoIAlign(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale):
super(RoIAlign, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
return RoIAlignFunction(self.aligned_height, self.aligned_width,
self.spatial_scale)(features, rois)
class RoIAlignAvg(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale):
super(RoIAlignAvg, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
x = RoIAlignFunction(self.aligned_height+1, self.aligned_width+1,
self.spatial_scale)(features, rois)
return avg_pool2d(x, kernel_size=2, stride=1)
class RoIAlignMax(Module):
def __init__(self, aligned_height, aligned_width, spatial_scale):
super(RoIAlignMax, self).__init__()
self.aligned_width = int(aligned_width)
self.aligned_height = int(aligned_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
x = RoIAlignFunction(self.aligned_height+1, self.aligned_width+1,
self.spatial_scale)(features, rois)
return max_pool2d(x, kernel_size=2, stride=1)
| 1,672 | 37.906977 | 74 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/roi_align/_ext/roi_align/__init__.py |
from torch.utils.ffi import _wrap_function
from ._roi_align import lib as _lib, ffi as _ffi
__all__ = []
def _import_symbols(locals):
for symbol in dir(_lib):
fn = getattr(_lib, symbol)
locals[symbol] = _wrap_function(fn, _ffi)
__all__.append(symbol)
_import_symbols(locals())
| 308 | 22.769231 | 49 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/nms/build.py | import os
import torch
from torch.utils.ffi import create_extension
# Might have to export PATH=/usr/local/cuda-8.0/bin${PATH:+:${PATH}}
sources = []
headers = []
defines = []
with_cuda = False
if torch.cuda.is_available():
print('Including CUDA code.')
sources += ['src/nms_cuda.c']
headers += ['src/nms_cuda.h']
defines += [('WITH_CUDA', None)]
with_cuda = True
this_file = os.path.dirname(os.path.realpath(__file__))
print(this_file)
extra_objects = ['src/cuda/nms.cu.o']
extra_objects = [os.path.join(this_file, fname) for fname in extra_objects]
ffi = create_extension(
'_ext.nms',
headers=headers,
sources=sources,
define_macros=defines,
relative_to=__file__,
with_cuda=with_cuda,
extra_objects=extra_objects
)
if __name__ == '__main__':
ffi.build()
| 814 | 21.638889 | 75 | py |
MRE-ISE | MRE-ISE-main/VSG/VG_parser/lib/fpn/nms/functions/nms.py | # Le code for doing NMS
import torch
import numpy as np
from .._ext import nms
def apply_nms(scores, boxes, pre_nms_topn=12000, post_nms_topn=2000, boxes_per_im=None,
nms_thresh=0.7):
"""
Note - this function is non-differentiable so everything is assumed to be a tensor, not
a variable.
"""
just_inds = boxes_per_im is None
if boxes_per_im is None:
boxes_per_im = [boxes.size(0)]
s = 0
keep = []
im_per = []
for bpi in boxes_per_im:
e = s + int(bpi)
keep_im = _nms_single_im(scores[s:e], boxes[s:e], pre_nms_topn, post_nms_topn, nms_thresh)
keep.append(keep_im + s)
im_per.append(keep_im.size(0))
s = e
inds = torch.cat(keep, 0)
if just_inds:
return inds
return inds, im_per
def _nms_single_im(scores, boxes, pre_nms_topn=12000, post_nms_topn=2000, nms_thresh=0.7):
keep = torch.IntTensor(scores.size(0))
vs, idx = torch.sort(scores, dim=0, descending=True)
if idx.size(0) > pre_nms_topn:
idx = idx[:pre_nms_topn]
boxes_sorted = boxes[idx].contiguous()
num_out = nms.nms_apply(keep, boxes_sorted, nms_thresh)
num_out = min(num_out, post_nms_topn)
keep = keep[:num_out].long()
keep = idx[keep.cuda(scores.get_device())]
return keep
| 1,312 | 27.543478 | 98 | py |
MRE-ISE | MRE-ISE-main/processor/create_bow.py | import numpy as np
import os
from sklearn.cluster import KMeans
from PIL import Image
import cv2
import pickle
from transformers import CLIPModel, CLIPProcessor
import torch
import json
from tqdm import tqdm
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords as stop_words
from scipy.cluster.vq import kmeans, vq
def create_tbow(data, textual_bow_size, target_file, stopwords_language="english"):
"""
create text bow vocabulary
"""
stopwords = set(stop_words.words(stopwords_language))
vectorizer = CountVectorizer(max_features=textual_bow_size, stop_words=stopwords)
text_for_bow = []
for d in data:
text_for_bow.append(' '.join(d['token']))
vectorizer.fit(text_for_bow)
vocab = vectorizer.get_feature_names()
with open(target_file, 'wb') as f:
pickle.dump([vocab, vectorizer], f)
return vocab
def create_vbow(data, visual_bow_size, mode, original_img_dir, target_file,
clip_version="openai/clip-vit-base-patch32"):
"""
create Visual words
:param data: input data.
:param visual_bow_size: the vocabulary size of visual bow.
:param mode: 'train' / 'val' / 'test'
:param target_file: the final target file to storage visual words.
:param clip_version: the vision of pre-trained clip model.
"""
print('prepare vision features extractor ...')
vision_model = CLIPModel.from_pretrained(clip_version)
for name, param in vision_model.named_parameters():
param.requires_grad = False
vision_model.eval()
processor = CLIPProcessor.from_pretrained(clip_version)
if torch.cuda.is_available():
vision_model.to(torch.device('cuda'))
print('extract the visual words')
with torch.no_grad():
des_features = []
for d in tqdm(data, total=len(data)):
imgid = d['img_id']
img_path = os.path.join(original_img_dir, mode, imgid)
bbox = d['VSG']['bbox']
for b in bbox:
crop_img = cv2.imread(img_path)
try:
crop_region = crop_img[b[1]:b[3], b[0]:b[2]]
except TypeError as e:
print(e)
print(bbox)
print(b)
print(imgid)
exit(0)
im = Image.fromarray(crop_region, mode="RGB")
# print(im.size)
images = processor(images=im, return_tensors="pt")
images = images.to(torch.device('cuda'))
image_features = vision_model.get_image_features(**images).squeeze()
des_features.append(image_features.numpy())
kmeans = KMeans(n_clusters=visual_bow_size, random_state=0, n_init=10)
img_cluster = kmeans.fit(np.array(des_features))
visual_word = img_cluster.cluster_centers_ # ndarray of shape (n_clusters, n_features)
# labels = img_cluster.labels_
with open(target_file, 'wb') as f:
pickle.dump([visual_word, kmeans], f)
return visual_word
def extract_visual_words(vision_model, processor, data, visual_bow_size, original_img_dir, target_file):
"""
create Visual words
:param data: input data.
:param visual_bow_size: the vocabulary size of visual bow.
:param target_file: the final target file to storage visual words.
"""
print('extract the visual words')
with torch.no_grad():
des_features = []
for d in tqdm(data, total=len(data)):
imgid = d['img_id']
img_path = os.path.join(original_img_dir, imgid)
bbox = d['VSG']['bbox']
for b in bbox:
crop_img = cv2.imread(img_path)
try:
crop_region = crop_img[b[1]:b[3], b[0]:b[2]]
except TypeError as e:
print(e)
print(bbox)
print(b)
print(imgid)
exit(0)
im = Image.fromarray(crop_region, mode="RGB")
# print(im.size)
images = processor(images=im, return_tensors="pt")
images = images.to(torch.device('cuda'))
image_features = vision_model.get_image_features(**images).squeeze()
des_features.append(image_features.numpy())
kmeans = KMeans(n_clusters=visual_bow_size, random_state=0, n_init=10)
img_cluster = kmeans.fit(np.array(des_features))
visual_word = img_cluster.cluster_centers_ # ndarray of shape (n_clusters, n_features)
# labels = img_cluster.labels_
id2token = {}
vocab = []
for idx in range(visual_word.shape(0)):
vocab.append('vword_' + str(idx))
for k, v in zip(range(0, len(vocab)), vocab):
id2token[k] = v
with open(target_file, 'wb') as f:
pickle.dump([img_cluster, vocab, id2token, visual_word], f)
return img_cluster, vocab, id2token, visual_word
def extract_vbow_features(file_path, visual_word_path, visual_bow_size, original_img_dir,
clip_version="openai/clip-vit-base-patch32"):
print('prepare vision features extractor ...')
vision_model = CLIPModel.from_pretrained(clip_version)
for name, param in vision_model.named_parameters():
param.requires_grad = False
vision_model.eval()
processor = CLIPProcessor.from_pretrained(clip_version)
if torch.cuda.is_available():
vision_model.to(torch.device('cuda'))
print('extract visual bow features .....')
with open(file_path, 'r') as f:
data = json.load(f)
if os.path.exists(visual_word_path):
with open(visual_word_path, 'rb') as f:
img_cluster, vocab, id2token, visual_word = pickle.load(f)
else:
# data, visual_bow_size, mode, original_img_dir, target_file,
# clip_version = "openai/clip-vit-base-patch32"
img_cluster, vocab, id2token, visual_word = extract_visual_words(vision_model, processor,
data, visual_bow_size, original_img_dir,
visual_word_path)
des_list = []
for d in tqdm(data, total=len(data)):
imgid = d['img_id']
img_path = os.path.join(original_img_dir, 'train', imgid)
bbox = d['VSG']['bbox']
features_list = []
for b in bbox:
crop_img = cv2.imread(img_path)
try:
crop_region = crop_img[b[1]:b[3], b[0]:b[2]]
except TypeError as e:
print(e)
print(bbox)
print(b)
print(imgid)
exit(0)
im = Image.fromarray(crop_region, mode="RGB")
# print(im.size)
images = processor(images=im, return_tensors="pt")
images = images.to(torch.device('cuda'))
image_features = vision_model.get_image_features(**images).squeeze()
features_list.append(image_features.numpy())
des_list.append((d['img_id'], d['VSG']['bbox'], np.array(features_list)))
vbow_features = np.zeros((len(des_list), len(visual_word)), "float32")
for i in tqdm(range(len(des_list)), total=len(des_list)):
words, distance = vq(des_list[i][2], visual_word)
assert len(words) == len(distance) == len(des_list[i][1])
for w in words:
vbow_features[i][w] += 1
return vbow_features, id2token
def extract_text_bow_vocab(train_file_path, target_file, textual_bow_size, stopwords_language="english"):
with open(train_file_path, 'rb') as f:
data = json.load(f)
text_for_bow = []
for d in tqdm(data, total=len(data)):
text_for_bow.append(' '.join(d['token']))
stopwords = set(stop_words.words(stopwords_language))
vectorizer = CountVectorizer(max_features=textual_bow_size, stop_words=stopwords)
vectorizer.fit(text_for_bow)
# train_bow_embeddings = vectorizer.fit_transform(text_for_bow)
vocab = vectorizer.get_feature_names()
id2token = {k: v for k, v in zip(range(0, len(vocab)), vocab)}
with open(target_file, 'wb') as f:
pickle.dump([vectorizer, vocab, id2token], f)
return vectorizer, vocab, id2token
def extract_tbow_features(file_path, textual_word_path, textual_bow_size):
print('extract textual bow features .....')
if os.path.exists(textual_word_path):
with open(textual_word_path, 'rb') as f:
vectorizer, vocab, id2token = pickle.load(f)
else:
vectorizer, vocab, id2token = extract_text_bow_vocab(file_path, textual_word_path, textual_bow_size)
with open(file_path, 'rb') as f:
data = json.load(f)
text_for_bow = []
for d in tqdm(data, total=len(data)):
text_for_bow.append(' '.join(d['token']))
tbow_features = vectorizer.transform(text_for_bow)
return tbow_features, id2token
if __name__ == '__main__':
FILE_DIR = '../data/vsg_tsg/'
with open(os.path.join(FILE_DIR, 'ours_train.json'), 'r') as f:
data = json.load(f)
print('create textual bow')
target_tbow = 'tbow.pt'
textual_bow_size = 2000
create_tbow(data, textual_bow_size, os.path.join(FILE_DIR, target_tbow))
print('create visual bow')
target_tbow = 'vbow.pt'
IMG_DIR = '../data/img_org/'
visual_bow_size = 2000
create_vbow(data, visual_bow_size, 'train', IMG_DIR, os.path.join(FILE_DIR, target_tbow))
| 9,510 | 38.962185 | 113 | py |
MRE-ISE | MRE-ISE-main/processor/dataset.py | import pickle
import random
import os
import numpy as np
import torch
import json
import ast
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from transformers import BertTokenizer
from torchvision import transforms
from transformers import CLIPTokenizer
from torch_geometric.utils import to_dense_adj, dense_to_sparse, add_self_loops
import logging
from processor.create_bow import extract_tbow_features, extract_vbow_features
logger = logging.getLogger(__name__)
def printf(param, name):
print(name, param)
class Vocabulary():
def __init__(self):
self.UNK = 'UNK'
self.PAD = 'PAD'
self.vocab = {self.UNK: 0, self.PAD: 1}
self.rev_vocab = {0: self.UNK, 1: self.PAD}
def build_vocab(self, data):
for m in data:
for n in m:
if n not in self.vocab:
idx = len(self.vocab)
self.vocab[n] = idx
self.rev_vocab[idx] = n
def id2token(self, idx):
return self.rev_vocab.get(idx) if self.rev_vocab.get(idx) else self.rev_vocab.get(0)
def token2id(self, token):
return self.vocab.get(token) if self.vocab.get(token) else self.vocab.get(self.UNK)
def construct_adjacent_matrix(relation, seq_len):
matrix = torch.tensor([[0 for _ in range(seq_len)] for _ in range(seq_len)], dtype=torch.float)
for i, r in enumerate(relation):
matrix[i][i] = 1
if r != 0:
matrix[i][r - 1] = 1
return matrix
class MMREProcessor(object):
def __init__(self, data_path, re_path, img_path, vit_name,
visual_bow_size=2000, textual_bow_size=2000,
clip_processor=None):
self.data_path = data_path
self.re_path = re_path
self.img_path = img_path
self.visual_bow_size = visual_bow_size
self.textual_bow_size = textual_bow_size
self.vit_name = vit_name
self.tokenizer = CLIPTokenizer.from_pretrained(vit_name, do_lower_case=True)
self.clip_processor = clip_processor
def load_from_json(self, mode="train"):
load_file = self.data_path[mode]
logger.info("Loading data from {}".format(load_file))
words, relations, heads, tails, imgids, dataid, VSG, TSG = [], [], [], [], [], [], [], []
with open(os.path.join(load_file)) as f:
lines = json.load(f)
for i, line in enumerate(lines):
words.append(line['token'])
relations.append(line['relation'])
heads.append(line['h']) # {name, pos}
tails.append(line['t'])
imgids.append(line['img_id'])
VSG.append(line['VSG'])
TSG.append(line['TSG'])
dataid.append(i)
assert len(words) == len(relations) == len(heads) == len(tails) == (len(imgids)) == len(VSG) == len(TSG)
# file_path, visual_word_path, visual_bow_size, original_img_dir, clip_version = "openai/clip-vit-base-patch32"
vbow_features, vbow_id2token = extract_vbow_features(self.data_path[mode], self.data_path['vbow'],
visual_bow_size=self.visual_bow_size,
original_img_dir=self.img_path['train'],
clip_version=self.vit_name)
# file_path, textual_word_path, textual_bow_size
tbow_features, tbow_id2token = extract_tbow_features(self.data_path[mode], self.data_path['tbow'],
textual_bow_size=self.textual_bow_size)
return {'words': words, 'relations': relations, 'heads': heads, 'tails': tails, 'imgids': imgids,
'VSG': VSG, 'TSG': TSG, 'dataid': dataid,
'vbow_features': vbow_features, 'vbow_id2token': vbow_id2token,
'tbow_features': tbow_features, 'tbow_id2token': tbow_id2token}
def get_relation_dict(self):
with open(self.re_path, 'r', encoding="utf-8") as f:
line = f.readlines()[0]
re_dict = json.loads(line)
return re_dict
def get_rel2id(self, train_path):
with open(self.re_path, 'r', encoding="utf-8") as f:
line = f.readlines()[0]
re_dict = json.loads(line)
re2id = {key: [] for key in re_dict.keys()}
with open(train_path, "r", encoding="utf-8") as f:
lines = f.readlines()
for i, line in enumerate(lines):
line = ast.literal_eval(line) # str to dict
assert line['relation'] in re2id
re2id[line['relation']].append(i)
return re2id
class NewMMREDatasetForIB(Dataset):
def __init__(self, processor, transform, img_path=None, max_seq=40,
mode="train", max_obj_num=40) -> None:
self.processor = processor
self.transform = transform
self.max_seq = max_seq
self.img_path = img_path[mode] if img_path is not None else img_path
self.mode = mode
self.data_dict = self.processor.load_from_json(mode)
self.re_dict = self.processor.get_relation_dict()
self.tokenizer = self.processor.tokenizer
self.clip_processor = self.processor.clip_processor
self.max_obj_num = max_obj_num
self.text_bow_size = len(self.data_dict['tbow_id2token'])
self.visual_bow_size = len(self.data_dict['vbow_id2token'])
# self.tfms = transforms.Compose([transforms.Resize(model.image_size), transforms.ToTensor(),
# transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5]), ])
def __len__(self):
return len(self.data_dict['words'])
def __getitem__(self, idx):
word_list, relation, head_d, tail_d, imgid = self.data_dict['words'][idx], self.data_dict['relations'][idx], \
self.data_dict['heads'][idx], self.data_dict['tails'][idx], \
self.data_dict['imgids'][idx]
item_id = self.data_dict['dataid'][idx]
# [CLS] ... <s> head </s> ... <o> tail <o/> .. [SEP]
head_pos, tail_pos = head_d['pos'], tail_d['pos']
head_tail_pos = torch.tensor(head_d['pos'] + tail_d['pos'])
tokens = [self.tokenizer.tokenize(word) for word in word_list]
pieces = [piece for pieces in tokens for piece in pieces]
_bert_inputs = self.tokenizer.convert_tokens_to_ids(pieces)
_bert_inputs = [self.tokenizer.cls_token_id] + _bert_inputs + [self.tokenizer.sep_token_id]
input_ids = np.zeros(self.max_seq, np.int)
input_ids[:len(_bert_inputs)] = _bert_inputs
input_ids = torch.tensor(input_ids)
attention_mask = torch.zeros(self.max_seq, dtype=torch.long)
attention_mask[:len(_bert_inputs)] = 1
token_type_ids = torch.zeros(self.max_seq, dtype=torch.long)
length = len(word_list)
_pieces2word = np.zeros((length, len(_bert_inputs)), dtype=np.bool)
start = 0
for i, pieces in enumerate(tokens):
if len(pieces) == 0:
continue
pieces = list(range(start, start + len(pieces)))
_pieces2word[i, pieces[0] + 1:pieces[-1] + 2] = 1
start += len(pieces)
# max_pie = np.max([len(x) for x in tokens])
pieces2word = np.zeros((self.max_seq, self.max_seq), dtype=np.bool)
pieces2word[:_pieces2word.shape[0], :_pieces2word.shape[1]] = _pieces2word
pieces2word = torch.tensor(pieces2word)
re_label = self.re_dict[relation] # label to id
dep_head = [k if i - 1 < 0 else i - 1 for k, i in enumerate(self.data_dict['dep_head'][idx])]
dep_tail = [i for i in range(0, len(dep_head))]
edge_index = torch.tensor([dep_head, dep_tail], dtype=torch.long)
edge_index = add_self_loops(edge_index)[0]
adj_matrix = to_dense_adj(edge_index, max_num_nodes=self.max_seq).squeeze()
edge_mask = torch.zeros(self.max_seq + self.max_obj_num, self.max_seq + self.max_obj_num)
edge_mask[:length, :length] = 1
edge_mask[self.max_seq + self.max_obj_num:, :length] = 1
edge_mask[self.max_seq:self.max_seq + self.max_obj_num] = 1
edge_mask[self.max_seq:self.max_seq + self.max_obj_num, self.max_seq:self.max_seq + self.max_obj_num] = 1
# text_bow features
tbow_features = self.data_dict['tbow_features'][idx]
# visual_bow features
vbow_features = self.data_dict['vbow_features'][idx]
# image process
if self.img_path is not None:
try:
img_path = os.path.join(self.img_path, imgid)
image = Image.open(img_path).convert('RGB')
# image = self.transform(image)
image = self.clip_processor(images=image, return_tensors='pt')['pixel_values'].squeeze()
except:
img_path = os.path.join(self.img_path, 'inf.png')
image = Image.open(img_path).convert('RGB')
image = self.clip_processor(images=image, return_tensors='pt')['pixel_values'].squeeze()
if self.aux_img_path is not None:
# detected object img
aux_imgs = []
aux_img_paths = []
imgid = imgid.split(".")[0]
if item_id in self.data_dict['aux_imgs']:
aux_img_paths = self.data_dict['aux_imgs'][item_id]
aux_img_paths = [os.path.join(self.aux_img_path, path) for path in aux_img_paths]
# select 3 img
for i in range(min(3, len(aux_img_paths))):
aux_img = Image.open(aux_img_paths[i]).convert('RGB')
aux_img = self.aux_processor(images=aux_img, return_tensors='pt')['pixel_values'].squeeze()
aux_imgs.append(aux_img)
# padding
aux_mask = torch.tensor([1 for _ in range(len(aux_imgs))] + [0 for _ in range(3 - len(aux_imgs))])
for i in range(3 - len(aux_imgs)):
aux_imgs.append(torch.zeros((3, self.aux_size, self.aux_size)))
aux_imgs = torch.stack(aux_imgs, dim=0)
assert len(aux_imgs) == 3
return input_ids, pieces2word, attention_mask, token_type_ids, adj_matrix, head_tail_pos, torch.tensor(
re_label), image, aux_imgs, aux_mask, edge_mask, vbow_features, tbow_features
return input_ids, pieces2word, attention_mask, token_type_ids, adj_matrix, head_tail_pos, torch.tensor(
re_label), image, edge_mask, vbow_features, tbow_features
return input_ids, pieces2word, attention_mask, token_type_ids, adj_matrix, head_tail_pos, torch.tensor(
re_label), edge_mask, vbow_features, tbow_features
def extend_tensor(tensor, extended_shape, fill=0):
tensor_shape = tensor.shape
extended_tensor = torch.zeros(extended_shape, dtype=tensor.dtype).to(tensor.device)
extended_tensor = extended_tensor.fill_(fill)
if len(tensor_shape) == 1:
extended_tensor[:tensor_shape[0]] = tensor
elif len(tensor_shape) == 2:
extended_tensor[:tensor_shape[0], :tensor_shape[1]] = tensor
elif len(tensor_shape) == 3:
extended_tensor[:tensor_shape[0], :tensor_shape[1], :tensor_shape[2]] = tensor
elif len(tensor_shape) == 4:
extended_tensor[:tensor_shape[0], :tensor_shape[1], :tensor_shape[2], :tensor_shape[3]] = tensor
return extended_tensor
def padded_stack(tensors, padding=0):
dim_count = len(tensors[0].shape)
max_shape = [max([t.shape[d] for t in tensors]) for d in range(dim_count)]
padded_tensors = []
for t in tensors:
e = extend_tensor(t, max_shape, fill=padding)
padded_tensors.append(e)
stacked = torch.stack(padded_tensors)
return stacked
# def collate_fn_padding(batch):
# data_types = len(batch[0])
# bsz = len(batch)
#
# for i in range(data_types):
# samples = [x for b in range(bsz) for x in batch[b][i]]
# if not batch[0][i].shape:
# padded_batch[key] = torch.stack(samples)
# else:
# padded_batch[key] = padded_stack([s[key] for s in batch])
#
# return padded_batch
#
# padded_batch = dict()
# keys = batch[0].keys()
| 12,476 | 42.024138 | 120 | py |
MRE-ISE | MRE-ISE-main/cores/lamo/decoding_network.py | import torch
from torch import nn
from torch.nn import functional as F
from cores.lamo.inference_network import CombinedInferenceNetwork, ContextualInferenceNetwork
class DecoderNetwork(nn.Module):
def __init__(self, text_input_size, visual_input_size, bert_size, infnet, n_components=10, model_type='prodLDA',
hidden_sizes=(100,100), activation='softplus', dropout=0.2,
learn_priors=True, label_size=0):
"""
Initialize InferenceNetwork.
Args
text_input_size : int, dimension of text input
visual_input_size : int, dimension of visual input
n_components : int, number of topic components, (default 10)
model_type : string, 'prodLDA' or 'LDA' (default 'prodLDA')
hidden_sizes : tuple, length = n_layers, (default (100, 100))
activation : string, 'softplus', 'relu', (default 'softplus')
learn_priors : bool, make priors learnable parameter
"""
super(DecoderNetwork, self).__init__()
assert isinstance(text_input_size, int), "text input_size must by type int."
assert isinstance(visual_input_size, int), "visual input_size must by type int."
assert isinstance(n_components, int) and n_components > 0, \
"n_components must be type int > 0."
assert model_type in ['prodLDA', 'LDA'], \
"model type must be 'prodLDA' or 'LDA'"
assert isinstance(hidden_sizes, tuple), \
"hidden_sizes must be type tuple."
assert activation in ['softplus', 'relu'], \
"activation must be 'softplus' or 'relu'."
assert dropout >= 0, "dropout must be >= 0."
self.text_input_size = text_input_size
self.visual_input_size = visual_input_size
self.n_components = n_components
self.model_type = model_type
self.hidden_sizes = hidden_sizes
self.activation = activation
self.dropout = dropout
self.learn_priors = learn_priors
self.topic_text_word_matrix = None
self.topic_visual_word_matrix = None
if infnet == "zeroshot":
self.inf_net = ContextualInferenceNetwork(
text_input_size, visual_input_size, bert_size, n_components, hidden_sizes, activation, label_size=label_size)
elif infnet == "combined":
self.inf_net = CombinedInferenceNetwork(
text_input_size, visual_input_size, bert_size, n_components, hidden_sizes, activation, label_size=label_size)
else:
raise Exception('Missing infnet parameter, options are zeroshot and combined')
if label_size != 0:
self.label_classification = nn.Linear(n_components, label_size)
# init prior parameters
# \mu_1k = log \alpha_k + 1/K \sum_i log \alpha_i;
# \alpha = 1 \forall \alpha
topic_prior_mean = 0.0
self.prior_mean = torch.tensor(
[topic_prior_mean] * n_components)
if torch.cuda.is_available():
self.prior_mean = self.prior_mean.cuda()
if self.learn_priors:
self.prior_mean = nn.Parameter(self.prior_mean)
# \Sigma_1kk = 1 / \alpha_k (1 - 2/K) + 1/K^2 \sum_i 1 / \alpha_k;
# \alpha = 1 \forall \alpha
topic_prior_variance = 1. - (1. / self.n_components)
self.prior_variance = torch.tensor(
[topic_prior_variance] * n_components)
if torch.cuda.is_available():
self.prior_variance = self.prior_variance.cuda()
if self.learn_priors:
self.prior_variance = nn.Parameter(self.prior_variance)
self.beta = torch.Tensor(n_components, text_input_size)
if torch.cuda.is_available():
self.beta = self.beta.cuda()
self.beta = nn.Parameter(self.beta)
nn.init.xavier_uniform_(self.beta)
self.beta_batchnorm = nn.BatchNorm1d(text_input_size, affine=False)
# dropout on theta
self.drop_theta = nn.Dropout(p=self.dropout)
self.alpha = torch.Tensor(n_components, visual_input_size)
if torch.cuda.is_available():
self.alpha = self.alpha.cuda()
self.alpha = nn.Parameter(self.alpha)
nn.init.xavier_uniform_(self.alpha)
self.alpha_batchnorm = nn.BatchNorm1d(visual_input_size, affine=False)
# dropout on theta
self.drop_alpha = nn.Dropout(p=self.dropout)
@staticmethod
def reparameterize(mu, logvar):
"""Reparameterize the theta distribution."""
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
def forward(self, x, x_bert, labels=None):
"""Forward pass."""
# batch_size x n_components
posterior_mu, posterior_log_sigma = self.inf_net(x, x_bert, labels)
posterior_sigma = torch.exp(posterior_log_sigma)
# generate samples from theta
theta = F.softmax(
self.reparameterize(posterior_mu, posterior_log_sigma), dim=1)
theta = self.drop_theta(theta)
# prodLDA vs LDA
if self.model_type == 'prodLDA':
# in: batch_size x input_size x n_components
text_word_dist = F.softmax(
self.beta_batchnorm(torch.matmul(theta, self.beta)), dim=1)
# word_dist: batch_size x input_size
self.topic_text_word_matrix = self.beta
# in: batch_size x input_size x n_components
visual_word_dist = F.softmax(
self.alpha_batchnorm(torch.matmul(theta, self.alpha)), dim=1)
# visual_word_dist: batch_size x input_size
self.topic_visual_word_matrix = self.alpha
elif self.model_type == 'LDA':
# simplex constrain on Beta
beta = F.softmax(self.beta_batchnorm(self.beta), dim=1)
self.topic_text_word_matrix = beta
text_word_dist = torch.matmul(theta, beta)
# word_dist: batch_size x input_size
# simplex constrain on Beta
alpha = F.softmax(self.alpha_batchnorm(self.alpha), dim=1)
self.topic_visual_word_matrix = alpha
visual_word_dist = torch.matmul(theta, alpha)
# word_dist: batch_size x input_size
else:
raise NotImplementedError("Model Type Not Implemented")
# classify labels
estimated_labels = None
if labels is not None:
estimated_labels = self.label_classification(theta)
return self.prior_mean, self.prior_variance, \
posterior_mu, posterior_sigma, posterior_log_sigma, text_word_dist, visual_word_dist, estimated_labels
def get_theta(self, x, x_bert, labels=None):
with torch.no_grad():
# batch_size x n_components
posterior_mu, posterior_log_sigma = self.inf_net(x, x_bert, labels)
#posterior_sigma = torch.exp(posterior_log_sigma)
# generate samples from theta
theta = F.softmax(
self.reparameterize(posterior_mu, posterior_log_sigma), dim=1)
return theta
| 7,129 | 39.977011 | 125 | py |
MRE-ISE | MRE-ISE-main/cores/lamo/ctm.py | import datetime
import multiprocessing as mp
import os
import warnings
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import torch
import wordcloud
from scipy.special import softmax
from torch import optim
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader
from tqdm import tqdm
from cores.lamo.early_stopping import EarlyStopping
from cores.lamo.decoding_network import DecoderNetwork
class CTM:
"""Class to train the contextualized topic model. This is the more general class that we are keeping to
avoid braking code, users should use the two subclasses ZeroShotTM and CombinedTm to do topic modeling.
:param text_bow_size: int, dimension of input
:param contextual_size: int, dimension of input that comes from BERT embeddings
:param inference_type: string, you can choose between the contextual model and the combined model
:param n_components: int, number of topic components, (default 10)
:param model_type: string, 'prodLDA' or 'LDA' (default 'prodLDA')
:param hidden_sizes: tuple, length = n_layers, (default (100, 100))
:param activation: string, 'softplus', 'relu', (default 'softplus')
:param dropout: float, dropout to use (default 0.2)
:param learn_priors: bool, make priors a learnable parameter (default True)
:param batch_size: int, size of batch to use for training (default 64)
:param lr: float, learning rate to use for training (default 2e-3)
:param momentum: float, momentum to use for training (default 0.99)
:param solver: string, optimizer 'adam' or 'sgd' (default 'adam')
:param num_epochs: int, number of epochs to train for, (default 100)
:param reduce_on_plateau: bool, reduce learning rate by 10x on plateau of 10 epochs (default False)
:param num_data_loader_workers: int, number of data loader workers (default cpu_count). set it to 0 if you are using Windows
:param label_size: int, number of total labels (default: 0)
:param loss_weights: dict, it contains the name of the weight parameter (key) and the weight (value) for each loss.
It supports only the weight parameter beta for now. If None, then the weights are set to 1 (default: None).
"""
def __init__(self, text_bow_size, visual_bow_size, contextual_size, inference_type="combined", n_components=10,
model_type='prodLDA', hidden_sizes=(100, 100), activation='softplus', dropout=0.2,
learn_priors=True, batch_size=64, lr=2e-3, momentum=0.99, solver='adam', num_epochs=100,
reduce_on_plateau=False, num_data_loader_workers=mp.cpu_count(), label_size=0, loss_weights=None):
self.device = (
torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu")
)
if self.__class__.__name__ == "CTM":
raise Exception("You cannot call this class. Use ZeroShotTM or CombinedTM")
assert isinstance(text_bow_size, int) and text_bow_size > 0, \
"input_size must by type int > 0."
assert isinstance(visual_bow_size, int) and visual_bow_size > 0, \
"input_size must by type int > 0."
assert isinstance(n_components, int) and text_bow_size > 0, \
"n_components must by type int > 0."
assert model_type in ['LDA', 'prodLDA'], \
"model must be 'LDA' or 'prodLDA'."
assert isinstance(hidden_sizes, tuple), \
"hidden_sizes must be type tuple."
assert activation in ['softplus', 'relu'], \
"activation must be 'softplus' or 'relu'."
assert dropout >= 0, "dropout must be >= 0."
assert isinstance(learn_priors, bool), "learn_priors must be boolean."
assert isinstance(batch_size, int) and batch_size > 0, \
"batch_size must be int > 0."
assert lr > 0, "lr must be > 0."
assert isinstance(momentum, float) and 0 < momentum <= 1, \
"momentum must be 0 < float <= 1."
assert solver in ['adam', 'sgd'], "solver must be 'adam' or 'sgd'."
assert isinstance(reduce_on_plateau, bool), \
"reduce_on_plateau must be type bool."
assert isinstance(num_data_loader_workers, int) and num_data_loader_workers >= 0, \
"num_data_loader_workers must by type int >= 0. set 0 if you are using windows"
self.text_bow_size = text_bow_size
self.visual_bow_size = visual_bow_size
self.n_components = n_components
self.model_type = model_type
self.hidden_sizes = hidden_sizes
self.activation = activation
self.dropout = dropout
self.learn_priors = learn_priors
self.batch_size = batch_size
self.lr = lr
self.contextual_size = contextual_size
self.momentum = momentum
self.solver = solver
self.num_epochs = num_epochs
self.reduce_on_plateau = reduce_on_plateau
self.num_data_loader_workers = num_data_loader_workers
self.training_doc_topic_distributions = None
if loss_weights:
self.weights = loss_weights
else:
self.weights = {"beta": 1}
self.model = DecoderNetwork(
text_bow_size, visual_bow_size, self.contextual_size, inference_type, n_components, model_type, hidden_sizes, activation,
dropout, learn_priors, label_size=label_size)
self.early_stopping = None
# init optimizer
if self.solver == 'adam':
self.optimizer = optim.Adam(
self.model.parameters(), lr=lr, betas=(self.momentum, 0.99))
elif self.solver == 'sgd':
self.optimizer = optim.SGD(
self.model.parameters(), lr=lr, momentum=self.momentum)
# init lr scheduler
if self.reduce_on_plateau:
self.scheduler = ReduceLROnPlateau(self.optimizer, patience=10)
# performance attributes
self.best_loss_train = float('inf')
# training attributes
self.model_dir = None
self.nn_epoch = None
# validation attributes
self.validation_data = None
# learned topics
self.best_T_components = None
self.best_V_components = None
# Use cuda if available
if torch.cuda.is_available():
self.USE_CUDA = True
else:
self.USE_CUDA = False
self.model = self.model.to(self.device)
def _loss(self, text_inputs, visual_inputs, text_word_dists, visual_word_dists, prior_mean, prior_variance,
posterior_mean, posterior_variance, posterior_log_variance):
# KL term
# var division term
var_division = torch.sum(posterior_variance / prior_variance, dim=1)
# diff means term
diff_means = prior_mean - posterior_mean
diff_term = torch.sum(
(diff_means * diff_means) / prior_variance, dim=1)
# logvar det division term
logvar_det_division = \
prior_variance.log().sum() - posterior_log_variance.sum(dim=1)
# combine terms
KL = 0.5 * (
var_division + diff_term - self.n_components + logvar_det_division)
# Reconstruction term
T_RL = -torch.sum(text_inputs * torch.log(text_word_dists + 1e-10), dim=1)
V_RL = -torch.sum(visual_inputs * torch.log(visual_word_dists + 1e-10), dim=1)
#loss = self.weights["beta"]*KL + RL
return KL, T_RL, V_RL
def _train_epoch(self, loader):
"""Train epoch."""
self.model.train()
train_loss = 0
samples_processed = 1
for batch_samples in loader:
# batch_size x vocab_size
X_T_bow = batch_samples['X_T_bow']
X_T_bow = X_T_bow.reshape(X_T_bow.shape[0], -1)
X_V_bow = batch_samples['X_V_bow']
X_V_bow = X_V_bow.reshape(X_V_bow.shape[0], -1)
X_contextual = batch_samples['X_contextual']
if "labels" in batch_samples.keys():
labels = batch_samples["labels"]
labels = labels.reshape(labels.shape[0], -1)
labels = labels.to(self.device)
else:
labels = None
if self.USE_CUDA:
X_T_bow = X_T_bow.cuda()
X_V_bow = X_V_bow.cuda()
X_contextual = X_contextual.cuda()
# forward pass
self.model.zero_grad()
prior_mean, prior_variance, posterior_mean, posterior_variance,\
posterior_log_variance, text_word_dists, visual_word_dists, estimated_labels = self.model(X_T_bow, X_contextual, labels)
# backward pass
kl_loss, t_rl_loss, v_rl_loss = self._loss(
X_T_bow, X_V_bow, text_word_dists, visual_word_dists, prior_mean, prior_variance,
posterior_mean, posterior_variance, posterior_log_variance)
loss = self.weights["beta"]*kl_loss + t_rl_loss + v_rl_loss
loss = loss.sum()
if labels is not None:
target_labels = torch.argmax(labels, 1)
label_loss = torch.nn.CrossEntropyLoss()(estimated_labels, target_labels)
loss += label_loss
loss.backward()
self.optimizer.step()
# compute train loss
samples_processed += X_T_bow.size()[0]
train_loss += loss.item()
train_loss /= samples_processed
return samples_processed, train_loss
def fit(self, train_dataset, validation_dataset=None, save_dir=None, verbose=False, patience=5, delta=0,
n_samples=20):
"""
Train the CTM model.
:param train_dataset: PyTorch Dataset class for training data.
:param validation_dataset: PyTorch Dataset class for validation data. If not None, the training stops if validation loss doesn't improve after a given patience
:param save_dir: directory to save checkpoint models to.
:param verbose: verbose
:param patience: How long to wait after last time validation loss improved. Default: 5
:param delta: Minimum change in the monitored quantity to qualify as an improvement. Default: 0
:param n_samples: int, number of samples of the document topic distribution (default: 20)
"""
# Print settings to output file
if verbose:
print("Settings: \n\
N Components: {}\n\
Topic Prior Mean: {}\n\
Topic Prior Variance: {}\n\
Model Type: {}\n\
Hidden Sizes: {}\n\
Activation: {}\n\
Dropout: {}\n\
Learn Priors: {}\n\
Learning Rate: {}\n\
Momentum: {}\n\
Reduce On Plateau: {}\n\
Save Dir: {}".format(
self.n_components, 0.0,
1. - (1. / self.n_components), self.model_type,
self.hidden_sizes, self.activation, self.dropout, self.learn_priors,
self.lr, self.momentum, self.reduce_on_plateau, save_dir))
self.model_dir = save_dir
self.idx_2_T_token = train_dataset.idx_2_T_token
self.idx_2_V_token = train_dataset.idx_2_V_token
train_data = train_dataset
self.validation_data = validation_dataset
if self.validation_data is not None:
self.early_stopping = EarlyStopping(patience=patience, verbose=verbose, path=save_dir, delta=delta)
train_loader = DataLoader(
train_data, batch_size=self.batch_size, shuffle=True,
num_workers=self.num_data_loader_workers, drop_last=True)
# init training variables
train_loss = 0
samples_processed = 0
# train loop
pbar = tqdm(self.num_epochs, position=0, leave=True)
for epoch in range(self.num_epochs):
self.nn_epoch = epoch
# train epoch
s = datetime.datetime.now()
sp, train_loss = self._train_epoch(train_loader)
samples_processed += sp
e = datetime.datetime.now()
pbar.update(1)
if self.validation_data is not None:
validation_loader = DataLoader(self.validation_data, batch_size=self.batch_size, shuffle=True,
num_workers=self.num_data_loader_workers, drop_last=True)
# train epoch
s = datetime.datetime.now()
val_samples_processed, val_loss = self._validation(validation_loader)
e = datetime.datetime.now()
# report
if verbose:
print("Epoch: [{}/{}]\tSamples: [{}/{}]\tValidation Loss: {}\tTime: {}".format(
epoch + 1, self.num_epochs, val_samples_processed,
len(self.validation_data) * self.num_epochs, val_loss, e - s))
pbar.set_description("Epoch: [{}/{}]\t Seen Samples: [{}/{}]\tTrain Loss: {}\tValid Loss: {}\tTime: {}".format(
epoch + 1, self.num_epochs, samples_processed,
len(train_data) * self.num_epochs, train_loss, val_loss, e - s))
self.early_stopping(val_loss, self)
if self.early_stopping.early_stop:
print("Early stopping")
break
else:
# save last epoch
self.best_T_components = self.model.beta
self.best_V_components = self.model.alpha
if save_dir is not None:
self.save(save_dir)
pbar.set_description("Epoch: [{}/{}]\t Seen Samples: [{}/{}]\tTrain Loss: {}\tTime: {}".format(
epoch + 1, self.num_epochs, samples_processed,
len(train_data) * self.num_epochs, train_loss, e - s))
pbar.close()
self.training_doc_topic_distributions = self.get_doc_topic_distribution(train_dataset, n_samples)
def _validation(self, loader):
"""Validation epoch."""
self.model.eval()
val_loss = 0
samples_processed = 0
for batch_samples in loader:
# batch_size x vocab_size
X_T_bow = batch_samples['X_T_bow']
X_T_bow = X_T_bow.reshape(X_T_bow.shape[0], -1)
X_V_bow = batch_samples['X_V_bow']
X_V_bow = X_V_bow.reshape(X_V_bow.shape[0], -1)
X_contextual = batch_samples['X_contextual']
if "labels" in batch_samples.keys():
labels = batch_samples["labels"]
labels = labels.reshape(labels.shape[0], -1)
labels = labels.to(self.device)
else:
labels = None
if self.USE_CUDA:
X_T_bow = X_T_bow.cuda()
X_V_bow = X_V_bow.cuda()
X_contextual = X_contextual.cuda()
# # forward pass
# self.model.zero_grad()
# prior_mean, prior_variance, posterior_mean, posterior_variance, posterior_log_variance, word_dists, \
# estimated_labels =\
# self.model(X_bow, X_contextual, labels)
#
# kl_loss, rl_loss = self._loss(X_bow, word_dists, prior_mean, prior_variance,
# posterior_mean, posterior_variance, posterior_log_variance)
# forward pass
self.model.zero_grad()
prior_mean, prior_variance, posterior_mean, posterior_variance, \
posterior_log_variance, text_word_dists, visual_word_dists, estimated_labels = self.model(X_T_bow,
X_contextual,
labels)
# backward pass
kl_loss, t_rl_loss, v_rl_loss = self._loss(
X_T_bow, X_V_bow, text_word_dists, visual_word_dists, prior_mean, prior_variance,
posterior_mean, posterior_variance, posterior_log_variance)
loss = self.weights["beta"]*kl_loss + t_rl_loss + v_rl_loss
loss = loss.sum()
if labels is not None:
target_labels = torch.argmax(labels, 1)
label_loss = torch.nn.CrossEntropyLoss()(estimated_labels, target_labels)
loss += label_loss
# compute train loss
samples_processed += X_T_bow.size()[0]
val_loss += loss.item()
val_loss /= samples_processed
return samples_processed, val_loss
def get_thetas(self, dataset, n_samples=20):
"""
Get the document-topic distribution for a example_dataset of topics. Includes multiple sampling to reduce variation via
the parameter n_sample.
:param dataset: a PyTorch Dataset containing the documents
:param n_samples: the number of sample to collect to estimate the final distribution (the more the better).
"""
return self.get_doc_topic_distribution(dataset, n_samples=n_samples)
def get_doc_topic_distribution(self, dataset, n_samples=20):
"""
Get the document-topic distribution for a example_dataset of topics. Includes multiple sampling to reduce variation via
the parameter n_sample.
:param dataset: a PyTorch Dataset containing the documents
:param n_samples: the number of sample to collect to estimate the final distribution (the more the better).
"""
self.model.eval()
loader = DataLoader(
dataset, batch_size=self.batch_size, shuffle=False,
num_workers=self.num_data_loader_workers)
pbar = tqdm(n_samples, position=0, leave=True)
final_thetas = []
final_alphas = []
for sample_index in range(n_samples):
with torch.no_grad():
collect_theta = []
collect_alpha = []
for batch_samples in loader:
# batch_size x vocab_size
X_T_bow = batch_samples['X_T_bow']
X_T_bow = X_T_bow.reshape(X_T_bow.shape[0], -1)
X_V_bow = batch_samples['X_V_bow']
X_V_bow = X_V_bow.reshape(X_V_bow.shape[0], -1)
X_contextual = batch_samples['X_contextual']
if "labels" in batch_samples.keys():
labels = batch_samples["labels"]
labels = labels.reshape(labels.shape[0], -1)
labels = labels.to(self.device)
else:
labels = None
if self.USE_CUDA:
X_T_bow = X_T_bow.cuda()
X_V_bow = X_V_bow.cuda()
X_contextual = X_contextual.cuda()
# forward pass
self.model.zero_grad()
collect_theta.extend(self.model.get_theta(X_T_bow, X_contextual, labels).cpu().numpy().tolist())
pbar.update(1)
pbar.set_description("Sampling: [{}/{}]".format(sample_index + 1, n_samples))
final_thetas.append(np.array(collect_theta))
pbar.close()
return np.sum(final_thetas, axis=0) / n_samples
def get_most_likely_topic(self, doc_topic_distribution):
""" get the most likely topic for each document
:param doc_topic_distribution: ndarray representing the topic distribution of each document
"""
return np.argmax(doc_topic_distribution, axis=0)
def get_topics(self, k=10):
"""
Retrieve topic words.
:param k: int, number of words to return per topic, default 10.
"""
assert k <= self.text_bow_size, "k must be <= input size."
T_component_dists = self.best_T_components
V_component_dists = self.best_V_components
T_topics = defaultdict(list)
V_topics = defaultdict(list)
for i in range(self.n_components):
# obtain the topic textual word
_, idxs = torch.topk(T_component_dists[i], k)
component_words = [self.idx_2_T_token[idx]
for idx in idxs.cpu().numpy()]
T_topics[i] = component_words
# obtain the topic visual word
_, idxs = torch.topk(V_component_dists[i], k)
component_words = [self.idx_2_T_token[idx]
for idx in idxs.cpu().numpy()]
V_topics[i] = component_words
return T_topics, V_topics
def get_topic_lists(self, k=10):
"""
Retrieve the lists of topic words.
:param k: (int) number of words to return per topic, default 10.
"""
assert k <= self.text_bow_size, "k must be <= text input size."
assert k <= self.visual_bow_size, "k must be <= visual input size."
# TODO: collapse this method with the one that just returns the topics
T_component_dists = self.best_T_components
V_component_dists = self.best_V_components
T_topics, V_topics = [], []
for i in range(self.n_components):
_, idxs = torch.topk(T_component_dists[i], k)
component_words = [self.idx_2_T_token[idx]
for idx in idxs.cpu().numpy()]
T_topics.append(component_words)
_, idxs = torch.topk(V_component_dists[i], k)
component_words = [self.idx_2_V_token[idx]
for idx in idxs.cpu().numpy()]
V_topics.append(component_words)
return T_topics, V_topics
def _format_file(self):
model_dir = "contextualized_topic_model_nc_{}_tpm_{}_tpv_{}_hs_{}_ac_{}_do_{}_lr_{}_mo_{}_rp_{}". \
format(self.n_components, 0.0, 1 - (1. / self.n_components),
self.model_type, self.hidden_sizes, self.activation,
self.dropout, self.lr, self.momentum,
self.reduce_on_plateau)
return model_dir
def save(self, models_dir=None):
"""
Save model. (Experimental Feature, not tested)
:param models_dir: path to directory for saving NN models.
"""
warnings.simplefilter('always', Warning)
warnings.warn("This is an experimental feature that we has not been fully tested. Refer to the following issue:"
"https://github.com/MilaNLProc/contextualized-topic-models/issues/38",
Warning)
if (self.model is not None) and (models_dir is not None):
model_dir = self._format_file()
if not os.path.isdir(os.path.join(models_dir, model_dir)):
os.makedirs(os.path.join(models_dir, model_dir))
filename = "epoch_{}".format(self.nn_epoch) + '.pth'
fileloc = os.path.join(models_dir, model_dir, filename)
with open(fileloc, 'wb') as file:
torch.save({'state_dict': self.model.state_dict(),
'dcue_dict': self.__dict__}, file)
def load(self, model_dir, epoch):
"""
Load a previously trained model. (Experimental Feature, not tested)
:param model_dir: directory where models are saved.
:param epoch: epoch of model to load.
"""
warnings.simplefilter('always', Warning)
warnings.warn("This is an experimental feature that we has not been fully tested. Refer to the following issue:"
"https://github.com/MilaNLProc/contextualized-topic-models/issues/38",
Warning)
epoch_file = "epoch_" + str(epoch) + ".pth"
models_dir = self._format_file()
model_file = os.path.join(model_dir, models_dir, epoch_file)
with open(model_file, 'rb') as model_dict:
checkpoint = torch.load(model_dict, map_location=torch.device(self.device))
for (k, v) in checkpoint['dcue_dict'].items():
setattr(self, k, v)
self.model.load_state_dict(checkpoint['state_dict'])
def get_topic_text_word_matrix(self):
"""
Return the topic-word matrix (dimensions: number of topics x length of the vocabulary).
If model_type is LDA, the matrix is normalized; otherwise the matrix is unnormalized.
"""
return self.model.topic_text_word_matrix.cpu().detach().numpy()
def get_topic_text_word_distribution(self):
"""
Return the topic-word distribution (dimensions: number of topics x length of the vocabulary).
"""
mat = self.get_topic_text_word_matrix()
return softmax(mat, axis=1)
def get_topic_visual_word_matrix(self):
"""
Return the topic-word matrix (dimensions: number of topics x length of the vocabulary).
If model_type is LDA, the matrix is normalized; otherwise the matrix is unnormalized.
"""
return self.model.topic_visual_word_matrix.cpu().detach().numpy()
def get_topic_visual_word_distribution(self):
"""
Return the topic-word distribution (dimensions: number of topics x length of the vocabulary).
"""
mat = self.get_topic_visual_word_matrix()
return softmax(mat, axis=1)
def get_text_word_distribution_by_topic_id(self, topic):
"""
Return the word probability distribution of a topic sorted by probability.
:param topic: id of the topic (int)
:returns list of tuples (word, probability) sorted by the probability in descending order
"""
if topic >= self.n_components:
raise Exception('Topic id must be lower than the number of topics')
else:
wd = self.get_topic_text_word_distribution()
t = [(word, wd[topic][idx]) for idx, word in self.idx_2_T_token.items()]
t = sorted(t, key=lambda x: -x[1])
return t
def get_visual_word_distribution_by_topic_id(self, topic):
"""
Return the word probability distribution of a topic sorted by probability.
:param topic: id of the topic (int)
:returns list of tuples (word, probability) sorted by the probability in descending order
"""
if topic >= self.n_components:
raise Exception('Topic id must be lower than the number of topics')
else:
wd = self.get_topic_visual_word_distribution()
t = [(word, wd[topic][idx]) for idx, word in self.idx_2_V_token.items()]
t = sorted(t, key=lambda x: -x[1])
return t
def get_wordcloud(self, topic_id, n_words=5, background_color="black", width=1000, height=400):
"""
Plotting the wordcloud. It is an adapted version of the code found here:
http://amueller.github.io/word_cloud/auto_examples/simple.html#sphx-glr-auto-examples-simple-py and
here https://github.com/ddangelov/Top2Vec/blob/master/top2vec/Top2Vec.py
:param topic_id: id of the topic
:param n_words: number of words to show in word cloud
:param background_color: color of the background
:param width: width of the produced image
:param height: height of the produced image
"""
word_score_list = self.get_text_word_distribution_by_topic_id(topic_id)[:n_words]
word_score_dict = {tup[0]: tup[1] for tup in word_score_list}
plt.figure(figsize=(10, 4), dpi=200)
plt.axis("off")
plt.imshow(wordcloud.WordCloud(width=width, height=height, background_color=background_color
).generate_from_frequencies(word_score_dict))
plt.title("Displaying Topic " + str(topic_id), loc='center', fontsize=24)
plt.show()
def get_predicted_topics(self, dataset, n_samples):
"""
Return the a list containing the predicted topic for each document (length: number of documents).
:param dataset: CTMDataset to infer topics
:param n_samples: number of sampling of theta
:return: the predicted topics
"""
predicted_topics = []
thetas = self.get_doc_topic_distribution(dataset, n_samples)
for idd in range(len(dataset)):
predicted_topic = np.argmax(thetas[idd] / np.sum(thetas[idd]))
predicted_topics.append(predicted_topic)
return predicted_topics
def get_ldavis_data_format(self, vocab, dataset, n_samples):
"""
Returns the data that can be used in input to pyldavis to plot
the topics
"""
term_frequency = np.ravel(dataset.X_bow.sum(axis=0))
doc_lengths = np.ravel(dataset.X_bow.sum(axis=1))
term_topic = self.get_topic_text_word_distribution()
doc_topic_distribution = self.get_doc_topic_distribution(dataset, n_samples=n_samples)
data = {'topic_term_dists': term_topic,
'doc_topic_dists': doc_topic_distribution,
'doc_lengths': doc_lengths,
'vocab': vocab,
'term_frequency': term_frequency}
return data
def get_top_documents_per_topic_id(self, unpreprocessed_corpus, document_topic_distributions, topic_id, k=5):
probability_list = document_topic_distributions.T[topic_id]
ind = probability_list.argsort()[-k:][::-1]
res = []
for i in ind:
res.append((unpreprocessed_corpus[i], document_topic_distributions[i][topic_id]))
return res
class ZeroShotTM(CTM):
"""ZeroShotTM, as described in https://arxiv.org/pdf/2004.07737v1.pdf
"""
def __init__(self, **kwargs):
inference_type = "zeroshot"
super().__init__(**kwargs, inference_type=inference_type)
class CombinedTM(CTM):
"""CombinedTM, as described in https://arxiv.org/pdf/2004.03974.pdf
"""
def __init__(self, **kwargs):
inference_type = "combined"
super().__init__(**kwargs, inference_type=inference_type)
| 30,164 | 41.545839 | 167 | py |
MRE-ISE | MRE-ISE-main/cores/lamo/early_stopping.py | import numpy as np
import torch
class EarlyStopping:
"""Early stops the training if validation loss doesn't improve after a given patience.
Source code: https://github.com/Bjarten/early-stopping-pytorch """
def __init__(self, patience=7, verbose=False, delta=0, path='checkpoint.pt', trace_func=print):
"""
Args:
patience (int): How long to wait after last time validation loss improved.
Default: 7
verbose (bool): If True, prints a message for each validation loss improvement.
Default: False
delta (float): Minimum change in the monitored quantity to qualify as an improvement.
Default: 0
path (str): Path for the checkpoint to be saved to.
Default: 'checkpoint.pt'
trace_func (function): trace print function.
Default: print
"""
self.patience = patience
self.verbose = verbose
self.counter = 0
self.best_score = None
self.early_stop = False
self.val_loss_min = np.Inf
self.delta = delta
if path is None:
self.path = 'checkpoint.pt'
else:
self.path = path
self.trace_func = trace_func
def __call__(self, val_loss, model):
score = -val_loss
if self.best_score is None:
self.best_score = score
self.save_checkpoint(val_loss, model)
model.best_components = model.model.beta.clone()
elif score < self.best_score + self.delta:
self.counter += 1
self.trace_func(f'EarlyStopping counter: {self.counter} out of {self.patience}')
if self.counter >= self.patience:
self.early_stop = True
else:
self.best_score = score
model.best_components = model.model.beta.clone()
self.save_checkpoint(val_loss, model)
self.counter = 0
def save_checkpoint(self, val_loss, model):
"""Saves model when validation loss decrease."""
if self.verbose:
self.trace_func(f'Validation loss decreased ({self.val_loss_min:.6f} --> {val_loss:.6f}). Saving model ...')
model.save(self.path)
self.val_loss_min = val_loss
| 2,353 | 36.967742 | 121 | py |
MRE-ISE | MRE-ISE-main/cores/lamo/inference_network.py | from collections import OrderedDict
from torch import nn
import torch
class ContextualInferenceNetwork(nn.Module):
"""Inference Network."""
def __init__(self, text_input_size, visual_input_size, bert_size, output_size, hidden_sizes,
activation='softplus', dropout=0.2, label_size=0):
"""
# TODO: check dropout in main caller
Initialize InferenceNetwork.
Args
text_input_size : int, dimension of text input
visual_input_size : int, dimension of visual input
output_size : int, dimension of output
hidden_sizes : tuple, length = n_layers
activation : string, 'softplus' or 'relu', default 'softplus'
dropout : float, default 0.2, default 0.2
"""
super(ContextualInferenceNetwork, self).__init__()
assert isinstance(text_input_size, int), "text input_size must by type int."
assert isinstance(visual_input_size, int), "visual input_size must by type int."
assert isinstance(output_size, int), "output_size must be type int."
assert isinstance(hidden_sizes, tuple), \
"hidden_sizes must be type tuple."
assert activation in ['softplus', 'relu'], \
"activation must be 'softplus' or 'relu'."
assert dropout >= 0, "dropout must be >= 0."
self.text_input_size = text_input_size
self.visual_input_size = visual_input_size
self.output_size = output_size
self.hidden_sizes = hidden_sizes
self.dropout = dropout
if activation == 'softplus':
self.activation = nn.Softplus()
elif activation == 'relu':
self.activation = nn.ReLU()
self.input_layer = nn.Linear(bert_size + label_size, hidden_sizes[0])
#self.adapt_bert = nn.Linear(bert_size, hidden_sizes[0])
self.hiddens = nn.Sequential(OrderedDict([
('l_{}'.format(i), nn.Sequential(nn.Linear(h_in, h_out), self.activation))
for i, (h_in, h_out) in enumerate(zip(hidden_sizes[:-1], hidden_sizes[1:]))]))
self.f_mu = nn.Linear(hidden_sizes[-1], output_size)
self.f_mu_batchnorm = nn.BatchNorm1d(output_size, affine=False)
self.f_sigma = nn.Linear(hidden_sizes[-1], output_size)
self.f_sigma_batchnorm = nn.BatchNorm1d(output_size, affine=False)
self.dropout_enc = nn.Dropout(p=self.dropout)
def forward(self, x, x_bert, labels=None):
"""Forward pass."""
x = x_bert
if labels:
x = torch.cat((x_bert, labels), 1)
x = self.input_layer(x)
x = self.activation(x)
x = self.hiddens(x)
x = self.dropout_enc(x)
mu = self.f_mu_batchnorm(self.f_mu(x))
log_sigma = self.f_sigma_batchnorm(self.f_sigma(x))
return mu, log_sigma
class CombinedInferenceNetwork(nn.Module):
"""Inference Network."""
def __init__(self, text_input_size, visual_input_size, bert_size, output_size, hidden_sizes,
activation='softplus', dropout=0.2, label_size=0):
"""
Initialize InferenceNetwork.
Args
text_input_size : int, dimension of text input
visual_input_size : int, dimension of visual input
output_size : int, dimension of output
hidden_sizes : tuple, length = n_layers
activation : string, 'softplus' or 'relu', default 'softplus'
dropout : float, default 0.2, default 0.2
"""
super(CombinedInferenceNetwork, self).__init__()
assert isinstance(text_input_size, int), "text input_size must by type int."
assert isinstance(visual_input_size, int), "visual input_size must by type int."
assert isinstance(output_size, int), "output_size must be type int."
assert isinstance(hidden_sizes, tuple), \
"hidden_sizes must be type tuple."
assert activation in ['softplus', 'relu'], \
"activation must be 'softplus' or 'relu'."
assert dropout >= 0, "dropout must be >= 0."
self.text_input_size = text_input_size
self.visual_input_size = visual_input_size
self.output_size = output_size
self.hidden_sizes = hidden_sizes
self.dropout = dropout
if activation == 'softplus':
self.activation = nn.Softplus()
elif activation == 'relu':
self.activation = nn.ReLU()
self.adapt_bert = nn.Linear(bert_size, text_input_size)
#self.bert_layer = nn.Linear(hidden_sizes[0], hidden_sizes[0])
self.input_layer = nn.Linear(text_input_size + visual_input_size + label_size, hidden_sizes[0])
self.hiddens = nn.Sequential(OrderedDict([
('l_{}'.format(i), nn.Sequential(nn.Linear(h_in, h_out), self.activation))
for i, (h_in, h_out) in enumerate(zip(hidden_sizes[:-1], hidden_sizes[1:]))]))
self.f_mu = nn.Linear(hidden_sizes[-1], output_size)
self.f_mu_batchnorm = nn.BatchNorm1d(output_size, affine=False)
self.f_sigma = nn.Linear(hidden_sizes[-1], output_size)
self.f_sigma_batchnorm = nn.BatchNorm1d(output_size, affine=False)
self.dropout_enc = nn.Dropout(p=self.dropout)
def forward(self, x, x_bert, labels=None):
"""Forward pass."""
x_bert = self.adapt_bert(x_bert)
x = torch.cat((x, x_bert), 1)
if labels is not None:
x = torch.cat((x, labels), 1)
x = self.input_layer(x)
x = self.activation(x)
x = self.hiddens(x)
x = self.dropout_enc(x)
mu = self.f_mu_batchnorm(self.f_mu(x))
log_sigma = self.f_sigma_batchnorm(self.f_sigma(x))
return mu, log_sigma
| 5,742 | 37.033113 | 103 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.