code stringlengths 17 6.64M |
|---|
def build_vocab(imgs, params):
count_thr = params['word_count_threshold']
counts = {}
for img in imgs:
for sent in img['sentences']:
for w in sent['tokens']:
counts[w] = (counts.get(w, 0) + 1)
cw = sorted([(count, w) for (w, count) in counts.items()], reverse=True)
print('top words and their counts:')
print('\n'.join(map(str, cw[:20])))
total_words = sum(counts.values())
print('total words:', total_words)
bad_words = [w for (w, n) in counts.items() if (n <= count_thr)]
vocab = [w for (w, n) in counts.items() if (n > count_thr)]
bad_count = sum((counts[w] for w in bad_words))
print(('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), ((len(bad_words) * 100.0) / len(counts)))))
print(('number of words in vocab would be %d' % (len(vocab),)))
print(('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, ((bad_count * 100.0) / total_words))))
sent_lengths = {}
for img in imgs:
for sent in img['sentences']:
txt = sent['tokens']
nw = len(txt)
sent_lengths[nw] = (sent_lengths.get(nw, 0) + 1)
max_len = max(sent_lengths.keys())
print('max length sentence in raw data: ', max_len)
print('sentence length distribution (count, number of words):')
sum_len = sum(sent_lengths.values())
for i in range((max_len + 1)):
print(('%2d: %10d %f%%' % (i, sent_lengths.get(i, 0), ((sent_lengths.get(i, 0) * 100.0) / sum_len))))
if (bad_count > 0):
print('inserting the special UNK token')
vocab.append('UNK')
for img in imgs:
img['final_captions'] = []
for sent in img['sentences']:
txt = sent['tokens']
caption = [(w if (counts.get(w, 0) > count_thr) else 'UNK') for w in txt]
img['final_captions'].append(caption)
return vocab
|
def encode_captions(imgs, params, wtoi):
' \n encode all captions into one large array, which will be 1-indexed.\n also produces label_start_ix and label_end_ix which store 1-indexed \n and inclusive (Lua-style) pointers to the first and last caption for\n each image in the dataset.\n '
max_length = params['max_length']
N = len(imgs)
M = sum((len(img['final_captions']) for img in imgs))
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32')
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for (i, img) in enumerate(imgs):
n = len(img['final_captions'])
assert (n > 0), 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for (j, s) in enumerate(img['final_captions']):
label_length[caption_counter] = min(max_length, len(s))
caption_counter += 1
for (k, w) in enumerate(s):
if (k < max_length):
Li[(j, k)] = wtoi[w]
label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = ((counter + n) - 1)
counter += n
L = np.concatenate(label_arrays, axis=0)
assert (L.shape[0] == M), "lengths don't match? that's weird"
assert np.all((label_length > 0)), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return (L, label_start_ix, label_end_ix, label_length)
|
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
seed(123)
vocab = build_vocab(imgs, params)
itow = {(i + 1): w for (i, w) in enumerate(vocab)}
wtoi = {w: (i + 1) for (i, w) in enumerate(vocab)}
(L, label_start_ix, label_end_ix, label_length) = encode_captions(imgs, params, wtoi)
N = len(imgs)
f_lb = h5py.File((params['output_h5'] + '_label.h5'), 'w')
f_lb.create_dataset('labels', dtype='uint32', data=L)
f_lb.create_dataset('label_start_ix', dtype='uint32', data=label_start_ix)
f_lb.create_dataset('label_end_ix', dtype='uint32', data=label_end_ix)
f_lb.create_dataset('label_length', dtype='uint32', data=label_length)
f_lb.close()
out = {}
out['ix_to_word'] = itow
out['images'] = []
for (i, img) in enumerate(imgs):
jimg = {}
jimg['split'] = img['split']
if ('filename' in img):
jimg['file_path'] = os.path.join(img.get('filepath', ''), img['filename'])
if ('cocoid' in img):
jimg['id'] = img['cocoid']
elif ('imgid' in img):
jimg['id'] = img['imgid']
if (params['images_root'] != ''):
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
(jimg['width'], jimg['height']) = _img.size
out['images'].append(jimg)
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
|
def get_doc_freq(refs, params):
tmp = CiderScorer(df_mode='corpus')
for ref in refs:
tmp.cook_append(None, ref)
tmp.compute_doc_freq()
return (tmp.document_frequency, len(tmp.crefs))
|
def build_dict(imgs, wtoi, params):
wtoi['<eos>'] = 0
count_imgs = 0
refs_words = []
refs_idxs = []
for img in imgs:
if ((params['split'] == img['split']) or ((params['split'] == 'train') and (img['split'] == 'restval')) or (params['split'] == 'all')):
ref_words = []
ref_idxs = []
for sent in img['sentences']:
if hasattr(params, 'bpe'):
sent['tokens'] = params.bpe.segment(' '.join(sent['tokens'])).strip().split(' ')
tmp_tokens = (sent['tokens'] + ['<eos>'])
tmp_tokens = [(_ if (_ in wtoi) else 'UNK') for _ in tmp_tokens]
ref_words.append(' '.join(tmp_tokens))
ref_idxs.append(' '.join([str(wtoi[_]) for _ in tmp_tokens]))
refs_words.append(ref_words)
refs_idxs.append(ref_idxs)
count_imgs += 1
print('total imgs:', count_imgs)
(ngram_words, count_refs) = get_doc_freq(refs_words, params)
(ngram_idxs, count_refs) = get_doc_freq(refs_idxs, params)
print('count_refs:', count_refs)
return (ngram_words, ngram_idxs, count_refs)
|
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
dict_json = json.load(open(params['dict_json'], 'r'))
itow = dict_json['ix_to_word']
wtoi = {w: i for (i, w) in itow.items()}
if ('bpe' in dict_json):
import tempfile
import codecs
codes_f = tempfile.NamedTemporaryFile(delete=False)
codes_f.close()
with open(codes_f.name, 'w') as f:
f.write(dict_json['bpe'])
with codecs.open(codes_f.name, encoding='UTF-8') as codes:
bpe = apply_bpe.BPE(codes)
params.bpe = bpe
imgs = imgs['images']
(ngram_words, ngram_idxs, ref_len) = build_dict(imgs, wtoi, params)
utils.pickle_dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open((params['output_pkl'] + '-words.p'), 'wb'))
utils.pickle_dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open((params['output_pkl'] + '-idxs.p'), 'wb'))
|
def main(params):
imgs = json.load(open(params['input_json'][0], 'r'))['images']
out = {'info': {'description': 'This is stable 1.0 version of the 2014 MS COCO dataset.', 'url': 'http://mscoco.org', 'version': '1.0', 'year': 2014, 'contributor': 'Microsoft COCO group', 'date_created': '2015-01-27 09:11:52.357475'}, 'licenses': [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nc/2.0/', 'id': 2, 'name': 'Attribution-NonCommercial License'}, {'url': 'http://creativecommons.org/licenses/by-nc-nd/2.0/', 'id': 3, 'name': 'Attribution-NonCommercial-NoDerivs License'}, {'url': 'http://creativecommons.org/licenses/by/2.0/', 'id': 4, 'name': 'Attribution License'}, {'url': 'http://creativecommons.org/licenses/by-sa/2.0/', 'id': 5, 'name': 'Attribution-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nd/2.0/', 'id': 6, 'name': 'Attribution-NoDerivs License'}, {'url': 'http://flickr.com/commons/usage/', 'id': 7, 'name': 'No known copyright restrictions'}, {'url': 'http://www.usa.gov/copyright.shtml', 'id': 8, 'name': 'United States Government Work'}], 'type': 'captions'}
out.update({'images': [], 'annotations': []})
cnt = 0
empty_cnt = 0
for (i, img) in enumerate(imgs):
if (img['split'] == 'train'):
continue
out['images'].append({'id': img.get('cocoid', img['imgid'])})
for (j, s) in enumerate(img['sentences']):
if (len(s) == 0):
continue
s = ' '.join(s['tokens'])
out['annotations'].append({'image_id': out['images'][(- 1)]['id'], 'caption': s, 'id': cnt})
cnt += 1
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
|
def test_folder():
x = pickle_load(open('log_trans/infos_trans.pkl', 'rb'))
dataset = CaptionDataset(x['opt'])
ds = torch.utils.data.Subset(dataset, dataset.split_ix['train'])
ds[0]
|
def test_lmdb():
x = pickle_load(open('log_trans/infos_trans.pkl', 'rb'))
x['opt'].input_att_dir = 'data/vilbert_att.lmdb'
dataset = CaptionDataset(x['opt'])
ds = torch.utils.data.Subset(dataset, dataset.split_ix['train'])
ds[0]
|
def add_summary_value(writer, key, value, iteration):
if writer:
writer.add_scalar(key, value, iteration)
|
def train(opt):
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
infos = {'iter': 0, 'epoch': 0, 'loader_state_dict': None, 'vocab': loader.get_vocab()}
if ((opt.start_from is not None) and os.path.isfile(os.path.join(opt.start_from, (('infos_' + opt.id) + '.pkl')))):
with open(os.path.join(opt.start_from, (('infos_' + opt.id) + '.pkl')), 'rb') as f:
infos = utils.pickle_load(f)
saved_model_opt = infos['opt']
need_be_same = ['caption_model', 'rnn_type', 'rnn_size', 'num_layers']
for checkme in need_be_same:
assert (getattr(saved_model_opt, checkme) == getattr(opt, checkme)), ("Command line argument and saved model disagree on '%s' " % checkme)
infos['opt'] = opt
histories = defaultdict(dict)
if ((opt.start_from is not None) and os.path.isfile(os.path.join(opt.start_from, (('histories_' + opt.id) + '.pkl')))):
with open(os.path.join(opt.start_from, (('histories_' + opt.id) + '.pkl')), 'rb') as f:
histories.update(utils.pickle_load(f))
tb_summary_writer = SummaryWriter(opt.checkpoint_path)
opt.vocab = loader.get_vocab()
model = models.setup(opt).cuda()
del opt.vocab
if ((opt.start_from is not None) and os.path.isfile(os.path.join(opt.start_from, 'model.pth'))):
model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))
lw_model = LossWrapper(model, opt)
dp_model = torch.nn.DataParallel(model)
dp_model.vocab = getattr(model, 'vocab', None)
dp_lw_model = torch.nn.DataParallel(lw_model)
if opt.noamopt:
assert (opt.caption_model in ['transformer', 'bert', 'm2transformer']), 'noamopt can only work with transformer'
optimizer = utils.get_std_opt(model, optim_func=opt.optim, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
elif opt.reduce_on_plateau:
optimizer = utils.build_optimizer(model.parameters(), opt)
optimizer = utils.ReduceLROnPlateau(optimizer, factor=opt.reduce_on_plateau_factor, patience=opt.reduce_on_plateau_patience)
else:
optimizer = utils.build_optimizer(model.parameters(), opt)
if ((opt.start_from is not None) and os.path.isfile(os.path.join(opt.start_from, 'optimizer.pth'))):
optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer.pth')))
iteration = infos['iter']
epoch = infos['epoch']
if ('iterators' in infos):
infos['loader_state_dict'] = {split: {'index_list': infos['split_ix'][split], 'iter_counter': infos['iterators'][split]} for split in ['train', 'val', 'test']}
loader.load_state_dict(infos['loader_state_dict'])
if (opt.load_best_score == 1):
best_val_score = infos.get('best_val_score', None)
if opt.noamopt:
optimizer._step = iteration
epoch_done = True
dp_lw_model.train()
try:
while True:
if ((epoch >= opt.max_epochs) and (opt.max_epochs != (- 1))):
break
if epoch_done:
if ((not opt.noamopt) and (not opt.reduce_on_plateau)):
if ((epoch > opt.learning_rate_decay_start) and (opt.learning_rate_decay_start >= 0)):
frac = ((epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every)
decay_factor = (opt.learning_rate_decay_rate ** frac)
opt.current_lr = (opt.learning_rate * decay_factor)
else:
opt.current_lr = opt.learning_rate
utils.set_lr(optimizer, opt.current_lr)
if ((epoch > opt.scheduled_sampling_start) and (opt.scheduled_sampling_start >= 0)):
frac = ((epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every)
opt.ss_prob = min((opt.scheduled_sampling_increase_prob * frac), opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
if ((opt.self_critical_after != (- 1)) and (epoch >= opt.self_critical_after)):
sc_flag = True
init_scorer(opt.cached_tokens)
else:
sc_flag = False
if ((opt.structure_after != (- 1)) and (epoch >= opt.structure_after)):
struc_flag = True
init_scorer(opt.cached_tokens)
else:
struc_flag = False
if ((opt.drop_worst_after != (- 1)) and (epoch >= opt.drop_worst_after)):
drop_worst_flag = True
else:
drop_worst_flag = False
epoch_done = False
start = time.time()
if (opt.use_warmup and (iteration < opt.noamopt_warmup)):
opt.current_lr = ((opt.learning_rate * (iteration + 1)) / opt.noamopt_warmup)
utils.set_lr(optimizer, opt.current_lr)
data = loader.get_batch('train')
print('Read data:', (time.time() - start))
torch.cuda.synchronize()
start = time.time()
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]
tmp = [(_ if (_ is None) else _.cuda()) for _ in tmp]
(fc_feats, att_feats, labels, masks, att_masks) = tmp
optimizer.zero_grad()
model_out = dp_lw_model(fc_feats, att_feats, labels, masks, att_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag, struc_flag, drop_worst_flag)
if (not drop_worst_flag):
loss = model_out['loss'].mean()
else:
loss = model_out['loss']
loss = torch.topk(loss, k=int((loss.shape[0] * (1 - opt.drop_worst_rate))), largest=False)[0].mean()
loss.backward()
if (opt.grad_clip_value != 0):
getattr(torch.nn.utils, ('clip_grad_%s_' % opt.grad_clip_mode))(model.parameters(), opt.grad_clip_value)
optimizer.step()
train_loss = loss.item()
torch.cuda.synchronize()
end = time.time()
if struc_flag:
print('iter {} (epoch {}), train_loss = {:.3f}, lm_loss = {:.3f}, struc_loss = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, train_loss, model_out['lm_loss'].mean().item(), model_out['struc_loss'].mean().item(), (end - start)))
elif (not sc_flag):
print('iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, train_loss, (end - start)))
else:
print('iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}'.format(iteration, epoch, model_out['reward'].mean(), (end - start)))
iteration += 1
if data['bounds']['wrapped']:
epoch += 1
epoch_done = True
if ((iteration % opt.losses_log_every) == 0):
tb_summary_writer.add_scalar('train_loss', train_loss, iteration)
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif opt.reduce_on_plateau:
opt.current_lr = optimizer.current_lr
tb_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)
tb_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
tb_summary_writer.add_scalar('avg_reward', model_out['reward'].mean(), iteration)
elif struc_flag:
tb_summary_writer.add_scalar('lm_loss', model_out['lm_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('struc_loss', model_out['struc_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('reward', model_out['reward'].mean().item(), iteration)
tb_summary_writer.add_scalar('reward_var', model_out['reward'].var(1).mean(), iteration)
histories['loss_history'][iteration] = (train_loss if (not sc_flag) else model_out['reward'].mean())
histories['lr_history'][iteration] = opt.current_lr
histories['ss_prob_history'][iteration] = model.ss_prob
infos['iter'] = iteration
infos['epoch'] = epoch
infos['loader_state_dict'] = loader.state_dict()
if ((((iteration % opt.save_checkpoint_every) == 0) and (not opt.save_every_epoch)) or (epoch_done and opt.save_every_epoch)):
eval_kwargs = {'split': 'val', 'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
(val_loss, predictions, lang_stats) = eval_utils.eval_split(dp_model, lw_model.crit, loader, eval_kwargs)
if opt.reduce_on_plateau:
if ('CIDEr' in lang_stats):
optimizer.scheduler_step((- lang_stats['CIDEr']))
else:
optimizer.scheduler_step(val_loss)
tb_summary_writer.add_scalar('validation loss', val_loss, iteration)
if (lang_stats is not None):
for (k, v) in lang_stats.items():
tb_summary_writer.add_scalar(k, v, iteration)
histories['val_result_history'][iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
if (opt.language_eval == 1):
current_score = lang_stats['CIDEr']
else:
current_score = (- val_loss)
best_flag = False
if ((best_val_score is None) or (current_score > best_val_score)):
best_val_score = current_score
best_flag = True
infos['best_val_score'] = best_val_score
utils.save_checkpoint(opt, model, infos, optimizer, histories)
if opt.save_history_ckpt:
utils.save_checkpoint(opt, model, infos, optimizer, append=(str(epoch) if opt.save_every_epoch else str(iteration)))
if best_flag:
utils.save_checkpoint(opt, model, infos, optimizer, append='best')
except (RuntimeError, KeyboardInterrupt):
print('Save ckpt on exception ...')
utils.save_checkpoint(opt, model, infos, optimizer)
print('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
|
class Dataset(torch.utils.data.Dataset):
def __init__(self, treated_patient_list, control_patient_list, diag_code_vocab=None, med_code_vocab=None):
self.treated_patient_list = treated_patient_list
self.control_patient_list = control_patient_list
self.diagnoses_visits = []
self.medication_visits = []
self.sexes = []
self.ages = []
self.outcome = []
self.treatment = []
for (_, patient_confounder, patient_outcome) in tqdm(self.treated_patient_list):
self.outcome.append(patient_outcome)
self.treatment.append(1)
(med_visit, diag_visit, age, sex) = patient_confounder
self.medication_visits.append(med_visit)
self.diagnoses_visits.append(diag_visit)
self.sexes.append(sex)
self.ages.append(age)
for (_, patient_confounder, patient_outcome) in tqdm(self.control_patient_list):
self.outcome.append(patient_outcome)
self.treatment.append(0)
(med_visit, diag_visit, age, sex) = patient_confounder
self.medication_visits.append(med_visit)
self.diagnoses_visits.append(diag_visit)
self.sexes.append(sex)
self.ages.append(age)
if (diag_code_vocab is None):
self.diag_code_vocab = CodeVocab()
self.diag_code_vocab.add_patients_visits(self.diagnoses_visits)
if (med_code_vocab is None):
self.med_code_vocab = CodeVocab()
self.med_code_vocab.add_patients_visits(self.medication_visits)
logger.info(('Created Diagnoses Vocab: %s' % self.diag_code_vocab))
logger.info(('Created Medication Vocab: %s' % self.med_code_vocab))
self.diag_visit_max_length = max([len(patient_visit) for patient_visit in self.diagnoses_visits])
self.med_visit_max_length = max([len(patient_visit) for patient_visit in self.medication_visits])
self.diag_vocab_length = len(self.diag_code_vocab)
self.med_vocab_length = len(self.med_code_vocab)
logger.info(('Diagnoses Visit Max Length: %d' % self.diag_visit_max_length))
logger.info(('Medication Visit Max Length: %d' % self.med_visit_max_length))
self.ages = ((self.ages - np.min(self.ages)) / np.ptp(self.ages))
def _process_visits(self, visits, max_len_visit, vocab):
res = np.zeros((max_len_visit, len(vocab)))
for (i, visit) in enumerate(visits):
res[i] = self._process_code(vocab, visit)
return res
def _process_code(self, vocab, codes):
multi_hot = np.zeros(len(vocab), dtype='float')
for code in codes:
multi_hot[vocab.code2id[code]] = 1
return multi_hot
def __getitem__(self, index):
diag = self.diagnoses_visits[index]
diag = self._process_visits(diag, self.diag_visit_max_length, self.diag_code_vocab)
med = self.medication_visits[index]
med = self._process_visits(med, self.med_visit_max_length, self.med_code_vocab)
sex = self.sexes[index]
age = self.ages[index]
outcome = self.outcome[index]
treatment = self.treatment[index]
confounder = (diag, med, sex, age)
return (confounder, treatment, outcome)
def __len__(self):
return len(self.diagnoses_visits)
|
class LSTMModel(torch.nn.Module):
def __init__(self, diag_vocab_size, med_vocab_size, diag_embedding_size, med_embedding_size, diag_hidden_size, med_hidden_size, hidden_size, end_index, pad_index, bidirectional=True):
super().__init__()
self.pad_index = pad_index
self.end_index = end_index
self.diag_embedding = torch.nn.Linear(diag_vocab_size, diag_embedding_size, bias=False)
self.med_embedding = torch.nn.Linear(med_vocab_size, med_embedding_size, bias=False)
self.diag_encoder = torch.nn.LSTM(diag_embedding_size, diag_hidden_size, batch_first=True, bidirectional=bidirectional)
self.med_encoder = torch.nn.LSTM(med_embedding_size, med_hidden_size, batch_first=True, bidirectional=bidirectional)
if bidirectional:
diag_hidden_size = (diag_hidden_size * 2)
med_hidden_size = (med_hidden_size * 2)
self.attention_diag_encoder = torch.nn.Sequential(torch.nn.Linear(diag_hidden_size, 1), torch.nn.Tanh())
self.attention_med_encoder = torch.nn.Sequential(torch.nn.Linear(med_hidden_size, 1), torch.nn.Tanh())
self.lstm2hidden_ipw = torch.nn.Sequential(torch.nn.Linear(((med_hidden_size + diag_hidden_size) + 2), hidden_size), torch.nn.ReLU())
self.hidden2out_ipw = torch.nn.Linear(hidden_size, 1, bias=False)
def softmax_masked(self, inputs, mask, dim=1, epsilon=1e-07):
inputs_exp = torch.exp(inputs)
inputs_exp = (inputs_exp * mask.float())
inputs_exp_sum = inputs_exp.sum(dim=dim)
inputs_attention = (inputs_exp / (inputs_exp_sum.unsqueeze(dim) + epsilon))
return inputs_attention
def diag_encode(self, inputs):
inputs_mask = (inputs.sum(dim=(- 1)) != 0).long()
inputs_emb = self.diag_embedding(inputs.float())
(outputs, (h, c)) = self.diag_encoder(inputs_emb)
att_enc = self.attention_diag_encoder(outputs).squeeze((- 1))
att_normalized = self.softmax_masked(att_enc, inputs_mask)
hidden = torch.sum((outputs * att_normalized.unsqueeze((- 1))), dim=1)
original = torch.sum((inputs.float() * att_normalized.unsqueeze((- 1))), dim=1)
return (hidden, original)
def med_encode(self, inputs):
inputs_mask = (inputs.sum(dim=(- 1)) != 0).long()
inputs_emb = self.med_embedding(inputs.float())
(outputs, (h, c)) = self.med_encoder(inputs_emb)
att_enc = self.attention_med_encoder(outputs).squeeze((- 1))
att_normalized = self.softmax_masked(att_enc, inputs_mask)
hidden = torch.sum((outputs * att_normalized.unsqueeze((- 1))), dim=1)
original = torch.sum((inputs.float() * att_normalized.unsqueeze((- 1))), dim=1)
return (hidden, original)
def forward(self, confounder):
(diag_inputs, med_inputs, sexes, ages) = confounder
(diag_hidden, diag_original) = self.diag_encode(diag_inputs)
(med_hidden, med_original) = self.med_encode(med_inputs)
original = torch.cat((diag_original, med_original, sexes.float().view(sexes.size(0), 1), ages.float().view(ages.size(0), 1)), dim=1)
hidden = torch.cat((diag_hidden, med_hidden, sexes.float().view(sexes.size(0), 1), ages.float().view(ages.size(0), 1)), dim=1)
hidden = self.lstm2hidden_ipw(hidden)
outputs_logits_ipw = self.hidden2out_ipw(hidden)
return (outputs_logits_ipw.view(outputs_logits_ipw.size(0)), original)
|
class CodeVocab(object):
END_CODE = '<end>'
PAD_CODE = '<pad>'
UNK_CODE = '<unk>'
def __init__(self):
super().__init__()
special_codes = [CodeVocab.END_CODE, CodeVocab.PAD_CODE, CodeVocab.UNK_CODE]
self.special_codes = special_codes
self.code2id = {}
self.id2code = {}
if (self.special_codes is not None):
self.add_code_list(self.special_codes)
def add_code_list(self, code_list, rebuild=True):
for code in code_list:
if (code not in self.code2id):
self.code2id[code] = len(self.code2id)
if rebuild:
self._rebuild_id2code()
def add_patients_visits(self, patients_visits):
for patient in patients_visits:
for visit in patient:
self.add_code_list(visit)
self._rebuild_id2code()
def _rebuild_id2code(self):
self.id2code = {i: t for (t, i) in self.code2id.items()}
def get(self, item, default=None):
return self.code2id.get(item, default)
def __getitem__(self, item):
return self.code2id[item]
def __contains__(self, item):
return (item in self.code2id)
def __len__(self):
return len(self.code2id)
def __str__(self):
return f'{len(self)} codes'
|
def get_patient_cohort(root_file):
patient_1stDX_date = {}
patient_start_date = {}
for dir in ['CAD2012', 'CAD2013-2016']:
file = ((root_file + dir) + '/Cohort.csv')
with open(file, 'r') as f:
next(f)
for row in f:
row = row.split(',')
(enrolid, dx_date, start_date) = (row[0], row[3], row[4])
patient_1stDX_date[enrolid] = datetime.strptime(dx_date, '%m/%d/%Y')
patient_start_date[enrolid] = datetime.strptime(start_date, '%m/%d/%Y')
my_dump(patient_1stDX_date, '../pickles/patient_1stDX_data.pkl')
my_dump(patient_start_date, '../pickles/patient_start_date.pkl')
return (patient_1stDX_date, patient_start_date)
|
def exclude(cad_prescription_taken_by_patient, patient_1stDX_date, patient_start_date, interval, followup, baseline):
cad_prescription_taken_by_patient_exclude = defaultdict(dict)
cad_patient_take_prescription_exclude = defaultdict(dict)
for (drug, taken_by_patient) in cad_prescription_taken_by_patient.items():
for (patient, take_times) in taken_by_patient.items():
dates = [datetime.strptime(date, '%m/%d/%Y') for (date, days) in take_times if (date and days)]
dates = sorted(dates)
dates_days = {datetime.strptime(date, '%m/%d/%Y'): int(days) for (date, days) in take_times if (date and days)}
DX = patient_1stDX_date.get(patient, datetime.max)
index_date = dates[0]
start_date = patient_start_date.get(patient, datetime.max)
if (criteria_1_is_valid(index_date, DX) and criteria_2_is_valid(dates, interval, followup, dates_days) and criteria_3_is_valid(index_date, start_date, baseline)):
cad_prescription_taken_by_patient_exclude[drug][patient] = dates
cad_patient_take_prescription_exclude[patient][drug] = dates
return (cad_prescription_taken_by_patient_exclude, cad_patient_take_prescription_exclude)
|
def criteria_1_is_valid(index_date, DX):
return ((index_date - DX).days > 0)
|
def criteria_2_is_valid(dates, interval, followup, dates_days):
if ((dates[(- 1)] - dates[0]).days <= (followup - 89)):
return False
for i in range(1, len(dates)):
sup_day = dates_days.get(dates[(i - 1)])
if (((dates[i] - dates[(i - 1)]).days - sup_day) > interval):
return False
return True
|
def criteria_3_is_valid(index_date, start_date, baseline):
return ((index_date - start_date).days >= baseline)
|
def user_cohort_extractor(cad_prescription_taken_by_patient, n_patients, n_prescriptions, time_interval):
cad_prescription_taken_by_patient_small = defaultdict(dict)
print('number of drugs: {}'.format(len(cad_prescription_taken_by_patient)), flush=True)
for (drug, patient_take_times) in cad_prescription_taken_by_patient.items():
patient_take_times = cad_prescription_taken_by_patient.get(drug)
if minimal_criteria_is_valid(patient_take_times, n_patients, time_interval, n_prescriptions):
cad_prescription_taken_by_patient[drug] = patient_take_times
return cad_prescription_taken_by_patient_small
|
def minimal_criteria_is_valid(patient_take_times, n_patients, time_interval, n_prescriptions):
if (len(patient_take_times) < n_patients):
return False
count = 0
for (patient, take_times) in patient_take_times.items():
if drug_time_interval_is_valid(take_times, n_prescriptions, time_interval):
count += 1
if (count >= n_patients):
return True
return False
|
def drug_time_interval_is_valid(take_times, n_prescription, time_interval):
count = 0
dates = [datetime.strptime(pair[0], '%m/%d/%Y') for pair in take_times if (pair[0] and pair[1])]
dates = sorted(dates)
for i in range(1, len(dates)):
if ((dates[i] - dates[(i - 1)]).days >= time_interval):
count += 2
if (count >= n_prescription):
return True
return False
|
def my_dump(obj, file_name):
print('dumping...', flush=True)
pickle.dump(obj, open(file_name, 'wb'))
print('dumped...', flush=True)
|
def my_load(file_name):
print('loading...', flush=True)
return pickle.load(open(file_name, 'rb'))
|
def get_user_dx(indir, patient_list, icd9_to_css, icd10_to_css):
user_dx = defaultdict(dict)
inpatient_dir = os.path.join(indir, 'inpatient')
inpatient_files = os.listdir(inpatient_dir)
outpatient_dir = os.path.join(indir, 'outpatient')
outpatient_files = os.listdir(outpatient_dir)
files = ([os.path.join(inpatient_dir, file) for file in inpatient_files] + [os.path.join(outpatient_dir, file) for file in outpatient_files])
DXVER_dict = {'9': icd9_to_css, '0': icd10_to_css}
for file in files:
inpat = pd.read_csv(file, dtype=str)
DATE_NAME = [col for col in inpat.columns if ('DATE' in col)][0]
inpat = inpat[inpat['ENROLID'].isin(list(patient_list))]
inpat = inpat[(~ inpat[DATE_NAME].isnull())]
DX_col = [col for col in inpat.columns if ('DX' in col)]
for (index, row) in tqdm(inpat.iterrows(), total=len(inpat)):
dxs = list(row[DX_col])
enrolid = row['ENROLID']
date = row[DATE_NAME]
DXVER = '9'
if dxs:
if ('DXVER' in inpat.columns):
DXVER = row['DXVER']
dxs = get_css_code_for_icd(dxs, DXVER_dict[DXVER])
if (enrolid not in user_dx):
user_dx[enrolid][date] = dxs
elif (date not in user_dx[enrolid]):
user_dx[enrolid][date] = dxs
else:
user_dx[enrolid][date].extend(dxs)
return user_dx
|
def get_css_code_for_icd(icd_codes, icd_to_css):
css_codes = []
for icd_code in icd_codes:
if (not pd.isnull(icd_code)):
for i in range(len(icd_code), (- 1), (- 1)):
if (icd_code[:i] in icd_to_css):
css_codes.append(icd_to_css.get(icd_code[:i]))
break
return css_codes
|
def pre_user_cohort_dx(user_dx, cad_prescription_taken_by_patient, min_patients):
user_cohort_dx = AutoVivification()
for (drug, taken_by_patient) in tqdm(cad_prescription_taken_by_patient.items()):
if (len(taken_by_patient.keys()) >= min_patients):
for (patient, taken_times) in taken_by_patient.items():
index_date = taken_times[0]
date_codes = user_dx.get(patient)
for (date, codes) in date_codes.items():
date = datetime.strptime(date, '%m/%d/%Y')
if (date < index_date):
if (drug not in user_cohort_dx):
user_cohort_dx[drug][patient][date] = set(codes)
elif (patient not in user_cohort_dx[drug]):
user_cohort_dx[drug][patient][date] = set(codes)
elif (date not in user_cohort_dx[drug][patient]):
user_cohort_dx[drug][patient][date] = set(codes)
else:
user_cohort_dx[drug][patient][date].union(codes)
return user_cohort_dx
|
class AutoVivification(dict):
"Implementation of perl's autovivification feature."
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
|
def get_user_cohort_dx(indir, cad_prescription_taken_by_patient, icd9_to_css, icd10_to_css, min_patient, patient_list):
user_dx = get_user_dx(indir, patient_list, icd9_to_css, icd10_to_css)
return pre_user_cohort_dx(user_dx, cad_prescription_taken_by_patient, min_patient)
|
def pre_user_cohort_rx(cad_prescription_taken_by_patient, cad_patient_take_prescription, min_patients):
cad_user_cohort_rx = defaultdict(dict)
for (drug, taken_by_patient) in tqdm(cad_prescription_taken_by_patient.items()):
if (len(taken_by_patient.keys()) >= min_patients):
for (patient, take_dates) in taken_by_patient.items():
index_date = take_dates[0]
patient_prescription_list = cad_patient_take_prescription.get(patient)
for (prescription, dates_days) in patient_prescription_list.items():
dates = [datetime.strptime(date, '%m/%d/%Y') for (date, days) in dates_days]
dates = sorted(dates)
if drug_is_taken_in_baseline(index_date, dates):
if (drug not in cad_user_cohort_rx):
cad_user_cohort_rx[drug][patient] = [prescription]
elif (patient not in cad_user_cohort_rx[drug]):
cad_user_cohort_rx[drug][patient] = [prescription]
else:
cad_user_cohort_rx[drug][patient].append(prescription)
return cad_user_cohort_rx
|
def get_prescription_taken_times(index_date, dates, dates_2_days):
cnt = 0
for date in dates:
if (((index_date - date).days - dates_2_days[date]) > 0):
cnt += 1
else:
return cnt
return cnt
|
def drug_is_taken_in_baseline(index_date, dates):
for date in dates:
if ((index_date - date).days > 0):
return True
return False
|
def pre_user_cohort_rx_v2(cad_prescription_taken_by_patient, cad_patient_take_prescription, min_patients):
cad_user_cohort_rx = AutoVivification()
for (drug, taken_by_patient) in tqdm(cad_prescription_taken_by_patient.items()):
if (len(taken_by_patient.keys()) >= min_patients):
for (patient, take_dates) in taken_by_patient.items():
index_date = take_dates[0]
patient_prescription_list = cad_patient_take_prescription.get(patient)
for (prescription, dates_days) in patient_prescription_list.items():
dates = sorted(dates_days)
dates = drug_is_taken_in_baseline_v2(index_date, dates)
if dates:
for date in dates:
if (drug not in cad_user_cohort_rx):
cad_user_cohort_rx[drug][patient][date] = [prescription]
elif (patient not in cad_user_cohort_rx[drug]):
cad_user_cohort_rx[drug][patient][date] = [prescription]
elif (date not in cad_user_cohort_rx[drug][patient]):
cad_user_cohort_rx[drug][patient][date] = [prescription]
else:
cad_user_cohort_rx[drug][patient][date].append(prescription)
return cad_user_cohort_rx
|
def drug_is_taken_in_baseline_v2(index_date, dates):
res = []
for date in dates:
if ((index_date - date).days > 0):
res.append(date)
if (len(res) > 0):
return res
return False
|
class AutoVivification(dict):
"Implementation of perl's autovivification feature."
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
|
def pre_user_cohort_demo(indir, patient_list):
cad_user_cohort_demo = {}
file = '{}/demo.csv'.format(indir)
with open(file, 'r') as f:
next(f)
for row in f:
row = row.split(',')
(id, db, sex) = (row[0], row[1], row[2])
if (id in patient_list):
cad_user_cohort_demo[id] = (db, sex)
return cad_user_cohort_demo
|
def get_user_cohort_demo(indir, patient_list):
return pre_user_cohort_demo(indir, patient_list)
|
def parse_args():
parser = argparse.ArgumentParser(description='process parameters')
parser.add_argument('--input_data_dir', default='../data/synthetic/drug', help='input data directory')
parser.add_argument('--output_data_dir', default='pickles/cad_prescription_taken_by_patient.pkl', help='output data directory')
args = parser.parse_args()
return args
|
def ndc2rxing():
mapping = np.loadtxt(fname='../data/NDC_complete_mapping.csv', delimiter=',', dtype='str', skiprows=1, usecols=(1, 2))
ndc2rx_mapping = {ndc: rx for (rx, ndc) in mapping}
return ndc2rx_mapping
|
def pre_drug_table(args):
cad_prescription_taken_by_patient = defaultdict(dict)
ndc2rx_mapping = ndc2rxing()
files = os.listdir(args.input_data_dir)
for file in files:
print('dir: {}\tfile: {}'.format(args.input_data_dir, file), flush=True)
df = os.path.join(args.input_data_dir, file)
with open(df, 'r') as f:
next(f)
for row in f:
row = row.strip('\n')
row = row.split(',')
(enroll_id, prescription, date, day) = (row[0], row[1], row[2], row[3])
if (date and day):
if (prescription in ndc2rx_mapping):
prescription_rx = ndc2rx_mapping.get(prescription)
if (prescription_rx not in cad_prescription_taken_by_patient):
cad_prescription_taken_by_patient[prescription_rx][enroll_id] = set([(date, day)])
elif (enroll_id not in cad_prescription_taken_by_patient.get(prescription_rx)):
cad_prescription_taken_by_patient[prescription_rx][enroll_id] = set([(date, day)])
else:
cad_prescription_taken_by_patient[prescription_rx][enroll_id].add((date, day))
try:
print('dumping...', flush=True)
pickle.dump(cad_prescription_taken_by_patient, open(args.output_data_dir, 'wb'))
except Exception as e:
print(e)
print('finish dump', flush=True)
print('# of Drugs: {}\t'.format(len(cad_prescription_taken_by_patient)))
return cad_prescription_taken_by_patient
|
def drug_time_interval_is_valid(take_times, n_prescription, time_interval):
count = 0
dates = [datetime.strptime(pair[0], '%m/%d/%Y') for pair in take_times if (pair[0] and pair[1])]
dates = sorted(dates)
for i in range(1, len(dates)):
if ((dates[i] - dates[(i - 1)]).days >= time_interval):
count += 1
if (count >= n_prescription):
return True
return False
|
def is_valid_outcome_range(dx, code_range):
for code in code_range:
if dx.startswith(code):
return True
return False
|
def pre_user_cohort_outcome(indir, patient_list, codes9, codes0):
cad_user_cohort_outcome = defaultdict(list)
inpatient_dir = os.path.join(indir, 'inpatient')
inpatient_files = os.listdir(inpatient_dir)
outpatient_dir = os.path.join(indir, 'outpatient')
outpatient_files = os.listdir(outpatient_dir)
files = ([os.path.join(inpatient_dir, file) for file in inpatient_files] + [os.path.join(outpatient_dir, file) for file in outpatient_files])
DXVER_dict = {'9': codes9, '0': codes0}
for file in files:
inpat = pd.read_csv(file, dtype=str)
DATE_NAME = [col for col in inpat.columns if ('DATE' in col)][0]
inpat = inpat[inpat['ENROLID'].isin(list(patient_list))]
inpat = inpat[(~ inpat[DATE_NAME].isnull())]
DX_col = [col for col in inpat.columns if ('DX' in col)]
for (index, row) in tqdm(inpat.iterrows(), total=len(inpat)):
dxs = list(row[DX_col])
enrolid = row['ENROLID']
date = row[DATE_NAME]
DXVER = '9'
if dxs:
if ('DXVER' in inpat.columns):
DXVER = row['DXVER']
for dx in dxs:
if (not pd.isnull(dx)):
if is_valid_outcome_range(dx, DXVER_dict[DXVER]):
cad_user_cohort_outcome[enrolid].append(date)
return cad_user_cohort_outcome
|
def parse_args():
parser = argparse.ArgumentParser(description='process parameters')
parser.add_argument('--min_patients', default=500, type=int, help='minimum number of patients for each cohort.')
parser.add_argument('--min_prescription', default=2, type=int, help='minimum times of prescriptions of each drug.')
parser.add_argument('--time_interval', default=30, type=int, help='minimum time interval for every two prescriptions')
parser.add_argument('--followup', default=730, type=int, help='number of days of followup period')
parser.add_argument('--baseline', default=365, type=int, help='number of days of baseline period')
parser.add_argument('--input_data', default='../data/CAD')
parser.add_argument('--pickles', default='pickles')
parser.add_argument('--outcome_icd9', default='outcome_icd9.txt', help='outcome definition with ICD-9 codes')
parser.add_argument('--outcome_icd10', default='outcome_icd10.txt', help='outcome definition with ICD-10 codes')
parser.add_argument('--save_cohort_all', default='save_cohort_all/')
args = parser.parse_args()
return args
|
def get_patient_list(min_patient, cad_prescription_taken_by_patient):
patients_list = set()
for (drug, patients) in cad_prescription_taken_by_patient.items():
if (len(patients) >= min_patient):
for patient in patients:
patients_list.add(patient)
return patients_list
|
def main(args):
print('Loading prescription data...')
cad_prescription_taken_by_patient = pickle.load(open(os.path.join(args.pickles, 'cad_prescription_taken_by_patient.pkl'), 'rb'))
(patient_1stDX_date, patient_start_date) = get_patient_init_date(args.input_data, args.pickles)
icd9_to_css = pickle.load(open(os.path.join(args.pickles, 'icd9_to_css.pkl'), 'rb'))
icd10_to_css = pickle.load(open(os.path.join(args.pickles, 'icd10_to_css.pkl'), 'rb'))
print('Preprocessing patient data...')
(save_prescription, save_patient) = exclude(cad_prescription_taken_by_patient, patient_1stDX_date, patient_start_date, args.time_interval, args.followup, args.baseline)
patient_list = get_patient_list(args.min_patients, save_prescription)
save_cohort_rx = pre_user_cohort_rx_v2(save_prescription, save_patient, args.min_patients)
save_cohort_dx = get_user_cohort_dx(args.input_data, save_prescription, icd9_to_css, icd10_to_css, args.min_patients, patient_list)
save_cohort_demo = get_user_cohort_demo(args.input_data, patient_list)
save_cohort_outcome = {}
codes9 = ['425', '428', '40201', '40211', '40291', '40401', '40403', '40411', '40413', '40491', '40493', 'K77']
codes0 = ['I11', 'I13', 'I50', 'I42', 'K77']
save_cohort_outcome['heart-failure'] = pre_user_cohort_outcome(args.input_data, patient_list, codes9, codes0)
codes9 = ['4380', '4381', '4382', '4383', '4384', '4385', '4386', '4387', '4388', '4389', 'V1254']
codes0 = ['Z8673', 'I60', 'I61', 'I62', 'I63', 'I64', 'I65', 'I66', 'I67', 'I68', 'I69', 'G458', 'G459']
save_cohort_outcome['stroke'] = pre_user_cohort_outcome(args.input_data, patient_list, codes9, codes0)
print('Generating patient cohort...')
pre_user_cohort_triplet(save_prescription, save_cohort_rx, save_cohort_dx, save_cohort_outcome, save_cohort_demo, args.save_cohort_all)
|
def pre_user_cohort_triplet(cad_prescription_taken_by_patient, cad_user_cohort_rx, cad_user_cohort_dx, save_cohort_outcome, cad_user_cohort_demo, out_file_root):
cohorts_size = dict()
for (drug, taken_by_patient) in tqdm(cad_user_cohort_rx.items()):
file_x = '{}/{}.pkl'.format(out_file_root, drug)
triples = []
for (patient, taken_times) in taken_by_patient.items():
index_date = cad_prescription_taken_by_patient.get(drug).get(patient)[0]
dx = cad_user_cohort_dx.get(drug).get(patient)
demo = cad_user_cohort_demo.get(patient)
demo_feature_vector = get_demo_feature_vector(demo, index_date)
outcome_feature_vector = []
for (outcome_name, outcome_map) in save_cohort_outcome.items():
outcome_dates = outcome_map.get(patient, [])
dates = [datetime.strptime(date.strip('\n'), '%m/%d/%Y') for date in outcome_dates if date]
dates = sorted(dates)
outcome_feature_vector.append(get_outcome_feature_vector(dates, index_date))
outcome = max(outcome_feature_vector)
(rx_codes, dx_codes) = ([], [])
if taken_times:
rx_codes = [rx_code for (date, rx_code) in sorted(taken_times.items(), key=(lambda x: x[0]))]
if dx:
dx_codes = [list(dx_code) for (date, dx_code) in sorted(dx.items(), key=(lambda x: x[0]))]
triple = (patient, [rx_codes, dx_codes, demo_feature_vector[0], demo_feature_vector[1]], outcome)
triples.append(triple)
cohorts_size['{}.pkl'.format(drug)] = len(triples)
pickle.dump(triples, open(file_x, 'wb'))
pickle.dump(cohorts_size, open('{}/cohorts_size.pkl'.format(out_file_root), 'wb'))
|
def get_outcome_feature_vector(dates, index_date):
for date in dates:
if ((date > index_date) and ((date - index_date).days <= 730)):
return 1
return 0
|
def get_rx_feature_vector(taken_times, RX2id, size):
feature_vector = ([0] * size)
for rx in taken_times:
if (rx in RX2id):
id = RX2id.get(rx)
feature_vector[id] = 1
return feature_vector
|
def get_dx_feature_vector(dx, CCS2id, size):
feature_vector = ([0] * size)
not_find = set()
for code in dx:
for c in code:
if (c in CCS2id):
id = CCS2id.get(c)
feature_vector[id] = 1
return (feature_vector, not_find)
|
def get_demo_feature_vector(demo, index_date):
if (not demo):
return [0, 0]
(db, sex) = demo
index_date_y = index_date.year
age = (index_date_y - int(db))
sex_n = (int(sex) - 1)
return [age, sex_n]
|
def get_patient_init_date(indir, outdir):
patient_1stDX_date = {}
patient_start_date = {}
file = '{}/Cohort.csv'.format(indir)
with open(file, 'r') as f:
next(f)
for row in f:
row = row.split(',')
(enrolid, dx_date, start_date) = (row[0], row[1], row[2])
patient_1stDX_date[enrolid] = datetime.strptime(dx_date, '%m/%d/%Y')
patient_start_date[enrolid] = datetime.strptime(start_date, '%m/%d/%Y')
out1 = '{}/patient_1stDX_data.pkl'.format(outdir)
pickle.dump(patient_1stDX_date, open(out1, 'wb'))
out2 = '{}/patient_start_date.pkl'.format(outdir)
pickle.dump(patient_start_date, open(out2, 'wb'))
return (patient_1stDX_date, patient_start_date)
|
class Dataset(InMemoryDataset):
def __init__(self, root, dataset, rating_file, sep, args, transform=None, pre_transform=None):
self.path = root
self.dataset = dataset
self.rating_file = rating_file
self.sep = sep
self.store_backup = True
self.args = args
super(Dataset, self).__init__(root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
self.stat_info = torch.load(self.processed_paths[1])
self.data_num = self.stat_info['data_num']
self.feature_num = self.stat_info['feature_num']
@property
def raw_file_names(self):
return ['{}{}/user_dict.pkl'.format(self.path, self.dataset), '{}{}/item_dict.pkl'.format(self.path, self.dataset), '{}{}/feature_dict.pkl'.format(self.path, self.dataset), '{}{}/{}'.format(self.path, self.dataset, self.rating_file)]
@property
def processed_file_names(self):
return ['{}/{}.dataset'.format(self.dataset, self.dataset), '{}/{}.statinfo'.format(self.dataset, self.dataset)]
def download(self):
pass
def data_2_graphs(self, ratings_df, dataset='train'):
graphs = []
processed_graphs = 0
num_graphs = ratings_df.shape[0]
one_per = int((num_graphs / 1000))
percent = 0.0
for i in range(len(ratings_df)):
if ((processed_graphs % one_per) == 0):
print(f'Processing [{dataset}]: {(percent / 10.0)}%, {processed_graphs}/{num_graphs}', end='\r')
percent += 1
processed_graphs += 1
line = ratings_df.iloc[i]
user_index = self.user_key_type(line[0])
item_index = self.item_key_type(line[1])
rating = int(line[2])
if ((item_index not in self.item_dict) or (user_index not in self.user_dict)):
error_num += 1
continue
user_id = self.user_dict[user_index]['name']
item_id = self.item_dict[item_index]['title']
user_attr_list = self.user_dict[user_index]['attribute']
item_attr_list = self.item_dict[item_index]['attribute']
user_list = ([user_id] + user_attr_list)
item_list = ([item_id] + item_attr_list)
graph = self.construct_graphs(user_list, item_list, rating)
graphs.append(graph)
print()
return graphs
def read_data(self):
self.user_dict = pickle.load(open(self.userfile, 'rb'))
self.item_dict = pickle.load(open(self.itemfile, 'rb'))
self.user_key_type = type(list(self.user_dict.keys())[0])
self.item_key_type = type(list(self.item_dict.keys())[0])
feature_dict = pickle.load(open(self.featurefile, 'rb'))
data = []
error_num = 0
ratings_df = pd.read_csv(self.ratingfile, sep=self.sep, header=None)
(train_df, test_df) = train_test_split(ratings_df, test_size=0.4, random_state=self.args.random_seed, stratify=ratings_df[[0, 2]])
(test_df, valid_df) = train_test_split(test_df, test_size=0.5, random_state=self.args.random_seed, stratify=test_df[[0, 2]])
if self.store_backup:
backup_path = f'{self.path}{self.dataset}/split_data_backup/'
if (not os.path.exists(backup_path)):
os.mkdir(backup_path)
train_df.to_csv(f'{backup_path}train_data.csv', index=False)
valid_df.to_csv(f'{backup_path}valid_data.csv', index=False)
test_df.to_csv(f'{backup_path}test_data.csv', index=False)
print('(Only run at the first time training the dataset)')
train_graphs = self.data_2_graphs(train_df, dataset='train')
valid_graphs = self.data_2_graphs(valid_df, dataset='valid')
test_graphs = self.data_2_graphs(test_df, dataset='test')
graphs = ((train_graphs + valid_graphs) + test_graphs)
stat_info = {}
stat_info['data_num'] = len(graphs)
stat_info['feature_num'] = len(feature_dict)
stat_info['train_test_split_index'] = [len(train_graphs), (len(train_graphs) + len(valid_graphs))]
print('error number of data:', error_num)
return (graphs, stat_info)
def construct_graphs(self, user_list, item_list, rating):
u_n = len(user_list)
i_n = len(item_list)
inner_edge_index = [[], []]
for i in range(u_n):
for j in range(i, u_n):
inner_edge_index[0].append(i)
inner_edge_index[1].append(j)
for i in range(u_n, (u_n + i_n)):
for j in range(i, (u_n + i_n)):
inner_edge_index[0].append(i)
inner_edge_index[1].append(j)
outer_edge_index = [[], []]
for i in range(u_n):
for j in range(i_n):
outer_edge_index[0].append(i)
outer_edge_index[1].append((u_n + j))
inner_edge_index = torch.LongTensor(inner_edge_index)
inner_edge_index = to_undirected(inner_edge_index)
outer_edge_index = torch.LongTensor(outer_edge_index)
outer_edge_index = to_undirected(outer_edge_index)
graph = self.construct_graph((user_list + item_list), inner_edge_index, outer_edge_index, rating)
return graph
def construct_graph(self, node_list, edge_index_inner, edge_index_outer, rating):
x = torch.LongTensor(node_list).unsqueeze(1)
rating = torch.FloatTensor([rating])
return Data(x=x, edge_index=edge_index_inner, edge_attr=torch.transpose(edge_index_outer, 0, 1), y=rating)
def process(self):
self.userfile = self.raw_file_names[0]
self.itemfile = self.raw_file_names[1]
self.featurefile = self.raw_file_names[2]
self.ratingfile = self.raw_file_names[3]
(graphs, stat_info) = self.read_data()
if (not os.path.exists(f'{self.path}processed/{self.dataset}')):
os.mkdir(f'{self.path}processed/{self.dataset}')
(data, slices) = self.collate(graphs)
torch.save((data, slices), self.processed_paths[0])
torch.save(stat_info, self.processed_paths[1])
def feature_N(self):
return self.feature_num
def data_N(self):
return self.data_num
|
class inner_GNN(MessagePassing):
def __init__(self, dim, hidden_layer):
super(inner_GNN, self).__init__(aggr='mean')
self.lin1 = nn.Linear(dim, hidden_layer)
self.lin2 = nn.Linear(hidden_layer, dim)
self.act = nn.ReLU()
self.drop = nn.Dropout(p=0.5)
def forward(self, x, edge_index, edge_weight=None):
x = x.squeeze()
return self.propagate(edge_index, x=x, edge_weight=edge_weight)
def message(self, x_i, x_j, edge_weight):
pairwise_analysis = (x_i * x_j)
pairwise_analysis = self.lin1(pairwise_analysis)
pairwise_analysis = self.act(pairwise_analysis)
pairwise_analysis = self.lin2(pairwise_analysis)
pairwise_analysis = self.drop(pairwise_analysis)
if (edge_weight != None):
interaction_analysis = (pairwise_analysis * edge_weight.view((- 1), 1))
else:
interaction_analysis = pairwise_analysis
return interaction_analysis
def update(self, aggr_out):
return aggr_out
|
class cross_GNN(MessagePassing):
def __init__(self, dim, hidden_layer):
super(cross_GNN, self).__init__(aggr='mean')
def forward(self, x, edge_index, edge_weight=None):
x = x.squeeze()
return self.propagate(edge_index, x=x, edge_weight=edge_weight)
def message(self, x_i, x_j, edge_weight):
pairwise_analysis = (x_i * x_j)
if (edge_weight != None):
interaction_analysis = (pairwise_analysis * edge_weight.view((- 1), 1))
else:
interaction_analysis = pairwise_analysis
return interaction_analysis
def update(self, aggr_out):
return aggr_out
|
class GMCF(nn.Module):
'\n GMCF main model\n '
def __init__(self, args, n_features, device):
super(GMCF, self).__init__()
self.n_features = n_features
self.dim = args.dim
self.hidden_layer = args.hidden_layer
self.device = device
self.batch_size = args.batch_size
self.num_user_features = args.num_user_features
self.feature_embedding = nn.Embedding((self.n_features + 1), self.dim)
self.inner_gnn = inner_GNN(self.dim, self.hidden_layer)
self.outer_gnn = cross_GNN(self.dim, self.hidden_layer)
self.node_weight = nn.Embedding((self.n_features + 1), 1)
self.node_weight.weight.data.normal_(0.0, 0.01)
self.update_f = nn.GRU(input_size=self.dim, hidden_size=self.dim, dropout=0.5)
self.g = nn.Linear(self.dim, 1, bias=False)
def forward(self, data, is_training=True):
node_id = data.x.to(self.device)
batch = data.batch
node_w = torch.squeeze(self.node_weight(node_id))
sum_weight = global_add_pool(node_w, batch)
node_emb = self.feature_embedding(node_id)
inner_edge_index = data.edge_index
outer_edge_index = torch.transpose(data.edge_attr, 0, 1)
outer_edge_index = self.outer_offset(batch, self.num_user_features, outer_edge_index)
inner_node_message = self.inner_gnn(node_emb, inner_edge_index)
outer_node_message = self.outer_gnn(node_emb, outer_edge_index)
if (len(outer_node_message.size()) < len(node_emb.size())):
outer_node_message = outer_node_message.unsqueeze(1)
inner_node_message = inner_node_message.unsqueeze(1)
updated_node_input = torch.cat((node_emb, inner_node_message, outer_node_message), 1)
updated_node_input = torch.transpose(updated_node_input, 0, 1)
gru_h0 = torch.normal(0, 0.01, (1, node_emb.size(0), self.dim)).to(self.device)
(gru_output, hn) = self.update_f(updated_node_input, gru_h0)
updated_node = gru_output[(- 1)]
new_batch = self.split_batch(batch, self.num_user_features)
updated_graph = torch.squeeze(global_mean_pool(updated_node, new_batch))
(item_graphs, user_graphs) = torch.split(updated_graph, int((updated_graph.size(0) / 2)))
y = torch.unsqueeze((torch.sum((user_graphs * item_graphs), 1) + sum_weight), 1)
y = torch.sigmoid(y)
return y
def split_batch(self, batch, user_node_num):
'\n split batch id into user nodes and item nodes \n '
ones = torch.ones_like(batch)
nodes_per_graph = global_add_pool(ones, batch)
cum_num = torch.cat((torch.LongTensor([0]).to(self.device), torch.cumsum(nodes_per_graph, dim=0)[:(- 1)]))
cum_num_list = [(cum_num + i) for i in range(user_node_num)]
multi_hot = torch.cat(cum_num_list)
test = (torch.sum(F.one_hot(multi_hot, ones.size(0)), dim=0) * (torch.max(batch) + 1))
return (batch + test)
def outer_offset(self, batch, user_node_num, outer_edge_index):
ones = torch.ones_like(batch)
nodes_per_graph = global_add_pool(ones, batch)
inter_per_graph = (((nodes_per_graph - user_node_num) * user_node_num) * 2)
cum_num = torch.cat((torch.LongTensor([0]).to(self.device), torch.cumsum(nodes_per_graph, dim=0)[:(- 1)]))
offset_list = torch.repeat_interleave(cum_num, inter_per_graph, dim=0).repeat(2, 1)
outer_edge_index_offset = (outer_edge_index + offset_list)
return outer_edge_index_offset
|
def train(args, data_info, show_loss):
train_loader = data_info['train']
val_loader = data_info['val']
test_loader = data_info['test']
feature_num = data_info['feature_num']
(train_num, val_num, test_num) = data_info['data_num']
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
model = GMCF(args, feature_num, device)
model = model.to(device)
optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), model.parameters()), weight_decay=args.l2_weight, lr=args.lr)
crit = torch.nn.BCELoss()
print([i.size() for i in filter((lambda p: p.requires_grad), model.parameters())])
print('start training...')
for step in range(args.n_epoch):
loss_all = 0
edge_all = 0
model.train()
for data in train_loader:
data = data.to(device)
output = model(data)
label = data.y
label = label.to(device)
baseloss = crit(torch.squeeze(output), label)
loss = baseloss
loss_all += (data.num_graphs * loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
cur_loss = (loss_all / train_num)
(val_auc, val_logloss, val_ndcg5, val_ndcg10) = evaluate(model, val_loader, device)
(test_auc, test_logloss, test_ndcg5, test_ndcg10) = evaluate(model, test_loader, device)
print('Epoch: {:03d}, Loss: {:.5f}, AUC: {:.5f}/{:.5f}, Logloss: {:.5f}/{:.5f}, NDCG@5: {:.5f}/{:.5f} NDCG@10: {:.5f}/{:.5f}'.format(step, cur_loss, val_auc, test_auc, val_logloss, test_logloss, val_ndcg5, test_ndcg5, val_ndcg10, test_ndcg10))
|
def evaluate(model, data_loader, device):
model.eval()
predictions = []
labels = []
user_ids = []
edges_all = [0, 0]
with torch.no_grad():
for data in data_loader:
(_, user_id_index) = np.unique(data.batch.detach().cpu().numpy(), return_index=True)
user_id = data.x.detach().cpu().numpy()[user_id_index]
user_ids.append(user_id)
data = data.to(device)
pred = model(data)
pred = pred.squeeze().detach().cpu().numpy().astype('float64')
if (pred.size == 1):
pred = np.expand_dims(pred, axis=0)
label = data.y.detach().cpu().numpy()
predictions.append(pred)
labels.append(label)
predictions = np.concatenate(predictions, 0)
labels = np.concatenate(labels, 0)
user_ids = np.concatenate(user_ids, 0)
ndcg5 = cal_ndcg(predictions, labels, user_ids, 5)
ndcg10 = cal_ndcg(predictions, labels, user_ids, 10)
auc = roc_auc_score(labels, predictions)
logloss = log_loss(labels, predictions)
return (auc, logloss, ndcg5, ndcg10)
|
def cal_ndcg(predicts, labels, user_ids, k):
d = {'user': np.squeeze(user_ids), 'predict': np.squeeze(predicts), 'label': np.squeeze(labels)}
df = pd.DataFrame(d)
user_unique = df.user.unique()
ndcg = []
for user_id in user_unique:
user_srow = df.loc[(df['user'] == user_id)]
upred = user_srow['predict'].tolist()
if (len(upred) < 2):
continue
ulabel = user_srow['label'].tolist()
ndcg.append(ndcg_score([ulabel], [upred], k=k))
return np.mean(np.array(ndcg))
|
class InpaintingTrainDataset(Dataset):
def __init__(self, paths, n_references, mask_generator, transform_shared, transform_individual):
self.in_files = paths
self.n_references = n_references
self.mask_generator = mask_generator
self.transform_shared = transform_shared
self.transform_individual = transform_individual
self.iter_i = 0
def __len__(self):
return len(self.in_files)
def __getitem__(self, item):
path = self.in_files[item]
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
data = self.transform_shared(image=img)
frame = data['image']
frame = self.transform_individual(image=frame)['image']
images = [frame]
for i in range(self.n_references):
frame = A.ReplayCompose.replay(data['replay'], image=img)['image']
frame = self.transform_individual(image=frame)['image']
images.append(frame)
images = np.stack(images, 0)
img_t = np.transpose(images[0], (2, 0, 1))
masks = list()
for _ in range((self.n_references + 1)):
mask = self.mask_generator(img_t, iter_i=self.iter_i)
mask = mask.transpose(1, 2, 0)
self.iter_i += 1
masks.append(mask)
masks = np.stack(masks, 0)
result = dict(images=images, masks=masks, masked_images=(images * (1 - masks)))
for k in ['images', 'masks', 'masked_images']:
result[k] = ((result[k] * 2.0) - 1.0)
return result
|
def get_transforms(transform_variant, out_size, easy=False):
assert (transform_variant == 'distortions')
if (transform_variant == 'default'):
transform = A.Compose([A.RandomScale(scale_limit=0.2), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()])
elif (transform_variant == 'distortions'):
if easy:
max_shift = 25
transform_shared = A.ReplayCompose([A.PadIfNeeded(min_height=(out_size + max_shift), min_width=(out_size + max_shift)), A.RandomCrop(height=(out_size + max_shift), width=(out_size + max_shift)), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5)])
transform_individual = A.Compose([A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.ToFloat()])
else:
max_shift = 200
transform_shared = A.ReplayCompose([A.PadIfNeeded(min_height=(out_size + max_shift), min_width=(out_size + max_shift)), A.RandomCrop(height=(out_size + max_shift), width=(out_size + max_shift)), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5)])
transform_individual = A.Compose([IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.7, 1.3), rotate=((- 40), 40), shear=((- 0.1), 0.1)), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.RandomBrightnessContrast(brightness_limit=0.01, contrast_limit=0.01), A.HueSaturationValue(hue_shift_limit=0, sat_shift_limit=3, val_shift_limit=1), A.ToFloat()])
elif (transform_variant == 'distortions_scale05_1'):
transform = A.Compose([IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.5, 1.0), rotate=((- 40), 40), shear=((- 0.1), 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()])
elif (transform_variant == 'distortions_scale03_12'):
transform = A.Compose([IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.3, 1.2), rotate=((- 40), 40), shear=((- 0.1), 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()])
elif (transform_variant == 'distortions_scale03_07'):
transform = A.Compose([IAAPerspective2(scale=(0.0, 0.06)), IAAAffine2(scale=(0.3, 0.7), rotate=((- 40), 40), shear=((- 0.1), 0.1), p=1), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.OpticalDistortion(), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()])
elif (transform_variant == 'distortions_light'):
transform = A.Compose([IAAPerspective2(scale=(0.0, 0.02)), IAAAffine2(scale=(0.8, 1.8), rotate=((- 20), 20), shear=((- 0.03), 0.03)), A.PadIfNeeded(min_height=out_size, min_width=out_size), A.RandomCrop(height=out_size, width=out_size), A.HorizontalFlip(), A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()])
elif (transform_variant == 'non_space_transform'):
transform = A.Compose([A.CLAHE(), A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2), A.HueSaturationValue(hue_shift_limit=5, sat_shift_limit=30, val_shift_limit=5), A.ToFloat()])
elif (transform_variant == 'no_augs'):
transform = A.Compose([A.ToFloat()])
else:
raise ValueError(f'Unexpected transform_variant {transform_variant}')
return (transform_shared, transform_individual)
|
def make_default_train_dataset(root, filelist, kind='default', out_size=512, mask_gen_kwargs=None, transform_variant='default', mask_generator_kind='mixed', easy=False, **kwargs):
if (kind != 'default'):
raise ValueError(f'Dropped support for other datasets: {kind}')
LOGGER.info(f'Make train dataloader from {filelist}. Using mask generator={mask_generator_kind}')
with open(filelist, 'r') as f:
paths = f.read().splitlines()
paths = [os.path.join(root, x) for x in paths]
mask_generator = get_mask_generator(kind=mask_generator_kind, kwargs=mask_gen_kwargs)
(transform_shared, transform_individual) = get_transforms(transform_variant, out_size, easy=easy)
dataset = InpaintingTrainDataset(paths=paths, mask_generator=mask_generator, transform_shared=transform_shared, transform_individual=transform_individual, **kwargs)
return dataset
|
def make_default_val_dataset(indir, kind='default', out_size=512, **kwargs):
if (kind != 'default'):
raise ValueError(f'Dropped support for other datasets: {kind}')
if (OmegaConf.is_list(indir) or isinstance(indir, (tuple, list))):
return ConcatDataset([make_default_val_dataset(idir, kind=kind, out_size=out_size, **kwargs) for idir in indir])
LOGGER.info(f'Make val dataloader {kind} from {indir}')
mask_generator = get_mask_generator(kind=kwargs.get('mask_generator_kind'), kwargs=kwargs.get('mask_gen_kwargs'))
dataset = InpaintingEvaluationDataset(indir, **kwargs)
return dataset
|
def load_image(fname, mode='RGB', return_orig=False):
img = np.array(Image.open(fname).convert(mode))
if (img.ndim == 3):
img = np.transpose(img, (2, 0, 1))
out_img = (img.astype('float32') / 255)
if return_orig:
return (out_img, img)
else:
return out_img
|
def ceil_modulo(x, mod):
if ((x % mod) == 0):
return x
return (((x // mod) + 1) * mod)
|
def pad_img_to_modulo(img, mod):
(channels, height, width) = img.shape
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return np.pad(img, ((0, 0), (0, (out_height - height)), (0, (out_width - width))), mode='symmetric')
|
def pad_tensor_to_modulo(img, mod):
(batch_size, channels, height, width) = img.shape
out_height = ceil_modulo(height, mod)
out_width = ceil_modulo(width, mod)
return F.pad(img, pad=(0, (out_width - width), 0, (out_height - height)), mode='reflect')
|
def scale_image(img, factor, interpolation=cv2.INTER_AREA):
if (img.shape[0] == 1):
img = img[0]
else:
img = np.transpose(img, (1, 2, 0))
img = cv2.resize(img, dsize=None, fx=factor, fy=factor, interpolation=interpolation)
if (img.ndim == 2):
img = img[(None, ...)]
else:
img = np.transpose(img, (2, 0, 1))
return img
|
class InpaintingEvaluationDataset(Dataset):
def __init__(self, datadir, img_suffix='.jpg', pad_out_to_modulo=None, scale_factor=None):
self.datadir = datadir
self.mask_filenames = sorted(list(glob.glob(os.path.join(self.datadir, '**', '*mask*.png'), recursive=True)))
self.img_filenames = [(fname.rsplit('_mask', 1)[0] + img_suffix) for fname in self.mask_filenames]
self.pad_out_to_modulo = pad_out_to_modulo
self.scale_factor = scale_factor
def __len__(self):
return len(self.mask_filenames)
def __getitem__(self, i):
image = load_image(self.img_filenames[i], mode='RGB')
mask = load_image(self.mask_filenames[i], mode='L')
result = dict(image=image, mask=mask[(None, ...)])
if (self.scale_factor is not None):
result['image'] = scale_image(result['image'], self.scale_factor)
result['mask'] = scale_image(result['mask'], self.scale_factor, interpolation=cv2.INTER_NEAREST)
if ((self.pad_out_to_modulo is not None) and (self.pad_out_to_modulo > 1)):
result['image'] = pad_img_to_modulo(result['image'], self.pad_out_to_modulo)
result['mask'] = pad_img_to_modulo(result['mask'], self.pad_out_to_modulo)
for k in ['image', 'mask']:
result[k] = ((result[k] * 2.0) - 1.0).transpose(1, 2, 0)
return result
|
class PRNGMixin(object):
'Adds a prng property which is a numpy RandomState which gets\n reinitialized whenever the pid changes to avoid synchronized sampling\n behavior when used in conjunction with multiprocessing.'
@property
def prng(self):
currentpid = os.getpid()
if (getattr(self, '_initpid', None) != currentpid):
self._initpid = currentpid
self._prng = np.random.RandomState()
return self._prng
|
class LamaPropagation(Dataset, PRNGMixin):
def __init__(self, **kwargs):
self.clean_prob = kwargs.pop('clean_prob', (1.0 / kwargs['n_references']))
for k in default_mask_config:
if (not (k in kwargs)):
kwargs[k] = default_mask_config[k]
self.base_data = make_default_train_dataset(**kwargs)
def __len__(self):
return len(self.base_data)
def __getitem__(self, i):
example = self.base_data[i]
T = example['images'].shape[0]
values = list()
for i in range(T):
if (self.prng.random() < self.clean_prob):
values.append(np.concatenate([example['images'][i], ((- 1) * np.ones_like(example['masks'][i]))], axis=(- 1)))
else:
values.append(np.concatenate([example['masked_images'][i], example['masks'][i]], axis=(- 1)))
return {'rgbs': np.concatenate([example['images'], example['masks']], axis=(- 1)), 'keys': np.concatenate([example['masked_images'], example['masks']], axis=(- 1)), 'values': np.stack(values, axis=0), 'targets': example['images']}
|
class LamaGI(Dataset, PRNGMixin):
def __init__(self, **kwargs):
self.clean_prob = kwargs.pop('clean_prob', (1.0 / kwargs['n_references']))
for k in default_mask_config:
if (not (k in kwargs)):
kwargs[k] = default_mask_config[k]
self.base_data = make_default_train_dataset(**kwargs)
def __len__(self):
return len(self.base_data)
def __getitem__(self, i):
example = self.base_data[i]
T = example['images'].shape[0]
image = example['images'][0]
mask = example['masks'][0]
srcs = example['images'][1:]
srcs_masks = example['masks'][1:]
return {'image': image, 'mask': mask, 'srcs': srcs, 'srcs_masks': srcs_masks}
|
class LamaGIValidation(Dataset):
def __init__(self, filenames, n_references, pad_out_to_modulo=None, scale_factor=None):
self.n_references = n_references
with open(filenames, 'r') as f:
filenames = f.read().splitlines()
self.mask_filenames = [fname for fname in filenames if fnmatch.fnmatch(fname, '*image*mask*.png')]
self.img_filenames = [(fname.rsplit('_mask', 1)[0] + '.png') for fname in self.mask_filenames]
self.srcs_masks_filenames_by_img = dict()
self.srcs_filenames_by_img = dict()
for parent in self.img_filenames:
self.srcs_masks_filenames_by_img[parent] = list()
self.srcs_filenames_by_img[parent] = list()
for fname in filenames:
if fnmatch.fnmatch(fname, '*image*_src*_m.png'):
parent = (fname.rsplit('_src', 1)[0] + '.png')
self.srcs_masks_filenames_by_img[parent].append(fname)
src = (fname.rsplit('_m.png', 1)[0] + '.png')
self.srcs_filenames_by_img[parent].append(src)
self.pad_out_to_modulo = pad_out_to_modulo
self.scale_factor = scale_factor
def __len__(self):
return len(self.mask_filenames)
def __getitem__(self, i):
image = load_image(self.img_filenames[i], mode='RGB')
mask = load_image(self.mask_filenames[i], mode='L')
mask[(mask < 0.5)] = 0
mask[(mask >= 0.5)] = 1
mask = mask[None]
srcs = list()
srcs_masks = list()
for t in range(self.n_references):
srcs.append(load_image(self.srcs_filenames_by_img[self.img_filenames[i]][t], mode='RGB'))
src_mask = load_image(self.srcs_masks_filenames_by_img[self.img_filenames[i]][t], mode='L')
src_mask[(src_mask < 0.5)] = 0
src_mask[(src_mask >= 0.5)] = 1
src_mask = src_mask[None]
srcs_masks.append(src_mask)
srcs = np.stack(srcs)
srcs_masks = np.stack(srcs_masks)
result = dict(image=image, mask=mask, srcs=srcs, srcs_masks=srcs_masks)
if (self.scale_factor is not None):
result['image'] = scale_image(result['image'], self.scale_factor)
result['mask'] = scale_image(result['mask'], self.scale_factor, interpolation=cv2.INTER_NEAREST)
for t in range(srcs_masks.shape[0]):
result['srcs'][t] = scale_image(result['srcs'][t], self.scale_factor)
result['srcs_masks'][t] = scale_image(result['srcs_masks'][t], self.scale_factor, interpolation=cv2.INTER_NEAREST)
if ((self.pad_out_to_modulo is not None) and (self.pad_out_to_modulo > 1)):
result['image'] = pad_img_to_modulo(result['image'], self.pad_out_to_modulo)
result['mask'] = pad_img_to_modulo(result['mask'], self.pad_out_to_modulo)
for t in range(srcs_masks.shape[0]):
result['srcs'][t] = pad_img_to_modulo(result['srcs'][t], self.pad_out_to_modulo)
result['srcs_masks'][t] = pad_img_to_modulo(result['srcs_masks'][t], self.pad_out_to_modulo)
for k in ['image', 'mask']:
result[k] = ((result[k] * 2.0) - 1.0).transpose(1, 2, 0)
for k in ['srcs', 'srcs_masks']:
result[k] = ((result[k] * 2.0) - 1.0).transpose(0, 2, 3, 1)
return result
|
class CorrBlock():
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.corr_pyramid = []
corr = CorrBlock.corr(fmap1, fmap2)
(batch, h1, w1, dim, h2, w2) = corr.shape
corr = corr.reshape(((batch * h1) * w1), dim, h2, w2)
self.corr_pyramid.append(corr)
for i in range((self.num_levels - 1)):
corr = F.avg_pool2d(corr, 2, stride=2)
self.corr_pyramid.append(corr)
def __call__(self, coords):
r = self.radius
coords = coords.permute(0, 2, 3, 1)
(batch, h1, w1, _) = coords.shape
out_pyramid = []
for i in range(self.num_levels):
corr = self.corr_pyramid[i]
dx = torch.linspace((- r), r, ((2 * r) + 1))
dy = torch.linspace((- r), r, ((2 * r) + 1))
delta = torch.stack(torch.meshgrid(dy, dx), axis=(- 1)).to(coords.device)
centroid_lvl = (coords.reshape(((batch * h1) * w1), 1, 1, 2) / (2 ** i))
delta_lvl = delta.view(1, ((2 * r) + 1), ((2 * r) + 1), 2)
coords_lvl = (centroid_lvl + delta_lvl)
corr = bilinear_sampler(corr, coords_lvl)
corr = corr.view(batch, h1, w1, (- 1))
out_pyramid.append(corr)
out = torch.cat(out_pyramid, dim=(- 1))
return out.permute(0, 3, 1, 2).contiguous().float()
@staticmethod
def corr(fmap1, fmap2):
(batch, dim, ht, wd) = fmap1.shape
fmap1 = fmap1.view(batch, dim, (ht * wd))
fmap2 = fmap2.view(batch, dim, (ht * wd))
corr = torch.matmul(fmap1.transpose(1, 2), fmap2)
corr = corr.view(batch, ht, wd, 1, ht, wd)
return (corr / torch.sqrt(torch.tensor(dim).float()))
|
class CorrLayer(torch.autograd.Function):
@staticmethod
def forward(ctx, fmap1, fmap2, coords, r):
fmap1 = fmap1.contiguous()
fmap2 = fmap2.contiguous()
coords = coords.contiguous()
ctx.save_for_backward(fmap1, fmap2, coords)
ctx.r = r
(corr,) = correlation_cudaz.forward(fmap1, fmap2, coords, ctx.r)
return corr
@staticmethod
def backward(ctx, grad_corr):
(fmap1, fmap2, coords) = ctx.saved_tensors
grad_corr = grad_corr.contiguous()
(fmap1_grad, fmap2_grad, coords_grad) = correlation_cudaz.backward(fmap1, fmap2, coords, grad_corr, ctx.r)
return (fmap1_grad, fmap2_grad, coords_grad, None)
|
class AlternateCorrBlock():
def __init__(self, fmap1, fmap2, num_levels=4, radius=4):
self.num_levels = num_levels
self.radius = radius
self.pyramid = [(fmap1, fmap2)]
for i in range(self.num_levels):
fmap1 = F.avg_pool2d(fmap1, 2, stride=2)
fmap2 = F.avg_pool2d(fmap2, 2, stride=2)
self.pyramid.append((fmap1, fmap2))
def __call__(self, coords):
coords = coords.permute(0, 2, 3, 1)
(B, H, W, _) = coords.shape
corr_list = []
for i in range(self.num_levels):
r = self.radius
fmap1_i = self.pyramid[0][0].permute(0, 2, 3, 1)
fmap2_i = self.pyramid[i][1].permute(0, 2, 3, 1)
coords_i = (coords / (2 ** i)).reshape(B, 1, H, W, 2).contiguous()
corr = alt_cuda_corr(fmap1_i, fmap2_i, coords_i, r)
corr_list.append(corr.squeeze(1))
corr = torch.stack(corr_list, dim=1)
corr = corr.reshape(B, (- 1), H, W)
return (corr / 16.0)
|
class ResidualBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
num_groups = (planes // 8)
if (norm_fn == 'group'):
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if (not (stride == 1)):
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif (norm_fn == 'batch'):
self.norm1 = nn.BatchNorm2d(planes)
self.norm2 = nn.BatchNorm2d(planes)
if (not (stride == 1)):
self.norm3 = nn.BatchNorm2d(planes)
elif (norm_fn == 'instance'):
self.norm1 = nn.InstanceNorm2d(planes)
self.norm2 = nn.InstanceNorm2d(planes)
if (not (stride == 1)):
self.norm3 = nn.InstanceNorm2d(planes)
elif (norm_fn == 'none'):
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
if (not (stride == 1)):
self.norm3 = nn.Sequential()
if (stride == 1):
self.downsample = None
else:
self.downsample = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
if (self.downsample is not None):
x = self.downsample(x)
return self.relu((x + y))
|
class BottleneckBlock(nn.Module):
def __init__(self, in_planes, planes, norm_fn='group', stride=1):
super(BottleneckBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, (planes // 4), kernel_size=1, padding=0)
self.conv2 = nn.Conv2d((planes // 4), (planes // 4), kernel_size=3, padding=1, stride=stride)
self.conv3 = nn.Conv2d((planes // 4), planes, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
num_groups = (planes // 8)
if (norm_fn == 'group'):
self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=(planes // 4))
self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=(planes // 4))
self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
if (not (stride == 1)):
self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes)
elif (norm_fn == 'batch'):
self.norm1 = nn.BatchNorm2d((planes // 4))
self.norm2 = nn.BatchNorm2d((planes // 4))
self.norm3 = nn.BatchNorm2d(planes)
if (not (stride == 1)):
self.norm4 = nn.BatchNorm2d(planes)
elif (norm_fn == 'instance'):
self.norm1 = nn.InstanceNorm2d((planes // 4))
self.norm2 = nn.InstanceNorm2d((planes // 4))
self.norm3 = nn.InstanceNorm2d(planes)
if (not (stride == 1)):
self.norm4 = nn.InstanceNorm2d(planes)
elif (norm_fn == 'none'):
self.norm1 = nn.Sequential()
self.norm2 = nn.Sequential()
self.norm3 = nn.Sequential()
if (not (stride == 1)):
self.norm4 = nn.Sequential()
if (stride == 1):
self.downsample = None
else:
self.downsample = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4)
def forward(self, x):
y = x
y = self.relu(self.norm1(self.conv1(y)))
y = self.relu(self.norm2(self.conv2(y)))
y = self.relu(self.norm3(self.conv3(y)))
if (self.downsample is not None):
x = self.downsample(x)
return self.relu((x + y))
|
class BasicEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(BasicEncoder, self).__init__()
self.norm_fn = norm_fn
if (self.norm_fn == 'group'):
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=64)
elif (self.norm_fn == 'batch'):
self.norm1 = nn.BatchNorm2d(64)
elif (self.norm_fn == 'instance'):
self.norm1 = nn.InstanceNorm2d(64)
elif (self.norm_fn == 'none'):
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 64
self.layer1 = self._make_layer(64, stride=1)
self.layer2 = self._make_layer(96, stride=2)
self.layer3 = self._make_layer(128, stride=2)
self.conv2 = nn.Conv2d(128, output_dim, kernel_size=1)
self.dropout = None
if (dropout > 0):
self.dropout = nn.Dropout2d(p=dropout)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if (m.weight is not None):
nn.init.constant_(m.weight, 1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
is_list = (isinstance(x, tuple) or isinstance(x, list))
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if (self.training and (self.dropout is not None)):
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
|
class SmallEncoder(nn.Module):
def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0):
super(SmallEncoder, self).__init__()
self.norm_fn = norm_fn
if (self.norm_fn == 'group'):
self.norm1 = nn.GroupNorm(num_groups=8, num_channels=32)
elif (self.norm_fn == 'batch'):
self.norm1 = nn.BatchNorm2d(32)
elif (self.norm_fn == 'instance'):
self.norm1 = nn.InstanceNorm2d(32)
elif (self.norm_fn == 'none'):
self.norm1 = nn.Sequential()
self.conv1 = nn.Conv2d(3, 32, kernel_size=7, stride=2, padding=3)
self.relu1 = nn.ReLU(inplace=True)
self.in_planes = 32
self.layer1 = self._make_layer(32, stride=1)
self.layer2 = self._make_layer(64, stride=2)
self.layer3 = self._make_layer(96, stride=2)
self.dropout = None
if (dropout > 0):
self.dropout = nn.Dropout2d(p=dropout)
self.conv2 = nn.Conv2d(96, output_dim, kernel_size=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)):
if (m.weight is not None):
nn.init.constant_(m.weight, 1)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def _make_layer(self, dim, stride=1):
layer1 = BottleneckBlock(self.in_planes, dim, self.norm_fn, stride=stride)
layer2 = BottleneckBlock(dim, dim, self.norm_fn, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
is_list = (isinstance(x, tuple) or isinstance(x, list))
if is_list:
batch_dim = x[0].shape[0]
x = torch.cat(x, dim=0)
x = self.conv1(x)
x = self.norm1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.conv2(x)
if (self.training and (self.dropout is not None)):
x = self.dropout(x)
if is_list:
x = torch.split(x, [batch_dim, batch_dim], dim=0)
return x
|
class RAFT(nn.Module):
def __init__(self, args):
super(RAFT, self).__init__()
self.args = args
if args.small:
self.hidden_dim = hdim = 96
self.context_dim = cdim = 64
args.corr_levels = 4
args.corr_radius = 3
else:
self.hidden_dim = hdim = 128
self.context_dim = cdim = 128
args.corr_levels = 4
args.corr_radius = 4
if ('dropout' not in args._get_kwargs()):
args.dropout = 0
if ('alternate_corr' not in args._get_kwargs()):
args.alternate_corr = False
if args.small:
self.fnet = SmallEncoder(output_dim=128, norm_fn='instance', dropout=args.dropout)
self.cnet = SmallEncoder(output_dim=(hdim + cdim), norm_fn='none', dropout=args.dropout)
self.update_block = SmallUpdateBlock(self.args, hidden_dim=hdim)
else:
self.fnet = BasicEncoder(output_dim=256, norm_fn='instance', dropout=args.dropout)
self.cnet = BasicEncoder(output_dim=(hdim + cdim), norm_fn='batch', dropout=args.dropout)
self.update_block = BasicUpdateBlock(self.args, hidden_dim=hdim)
def freeze_bn(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
def initialize_flow(self, img):
'Flow is represented as difference between two coordinate grids flow = coords1 - coords0'
(N, C, H, W) = img.shape
coords0 = coords_grid(N, (H // 8), (W // 8)).to(img.device)
coords1 = coords_grid(N, (H // 8), (W // 8)).to(img.device)
return (coords0, coords1)
def upsample_flow(self, flow, mask):
'Upsample flow field [H/8, W/8, 2] -> [H, W, 2] using convex combination'
(N, _, H, W) = flow.shape
mask = mask.view(N, 1, 9, 8, 8, H, W)
mask = torch.softmax(mask, dim=2)
up_flow = F.unfold((8 * flow), [3, 3], padding=1)
up_flow = up_flow.view(N, 2, 9, 1, 1, H, W)
up_flow = torch.sum((mask * up_flow), dim=2)
up_flow = up_flow.permute(0, 1, 4, 2, 5, 3)
return up_flow.reshape(N, 2, (8 * H), (8 * W))
def forward(self, image1, image2, iters=20, flow_init=None, upsample=True, test_mode=True, is_normalized=False):
'Estimate optical flow between pair of frames'
if (not is_normalized):
image1 = ((2 * (image1 / 255.0)) - 1.0)
image2 = ((2 * (image2 / 255.0)) - 1.0)
image1 = image1.contiguous()
image2 = image2.contiguous()
hdim = self.hidden_dim
cdim = self.context_dim
with autocast(enabled=self.args.mixed_precision):
(fmap1, fmap2) = self.fnet([image1, image2])
fmap1 = fmap1.float()
fmap2 = fmap2.float()
if self.args.alternate_corr:
corr_fn = CorrBlockAlternate(fmap1, fmap2, radius=self.args.corr_radius)
else:
corr_fn = CorrBlock(fmap1, fmap2, radius=self.args.corr_radius)
with autocast(enabled=self.args.mixed_precision):
cnet = self.cnet(image1)
(net, inp) = torch.split(cnet, [hdim, cdim], dim=1)
net = torch.tanh(net)
inp = torch.relu(inp)
(coords0, coords1) = self.initialize_flow(image1)
if (flow_init is not None):
coords1 = (coords1 + flow_init)
flow_predictions = []
for itr in range(iters):
coords1 = coords1.detach()
corr = corr_fn(coords1)
flow = (coords1 - coords0)
with autocast(enabled=self.args.mixed_precision):
(net, up_mask, delta_flow) = self.update_block(net, inp, corr, flow)
coords1 = (coords1 + delta_flow)
if (up_mask is None):
flow_up = upflow8((coords1 - coords0))
else:
flow_up = self.upsample_flow((coords1 - coords0), up_mask)
flow_predictions.append(flow_up)
if test_mode:
return ((coords1 - coords0), flow_up)
return flow_predictions
|
class FlowHead(nn.Module):
def __init__(self, input_dim=128, hidden_dim=256):
super(FlowHead, self).__init__()
self.conv1 = nn.Conv2d(input_dim, hidden_dim, 3, padding=1)
self.conv2 = nn.Conv2d(hidden_dim, 2, 3, padding=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
return self.conv2(self.relu(self.conv1(x)))
|
class ConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=(192 + 128)):
super(ConvGRU, self).__init__()
self.convz = nn.Conv2d((hidden_dim + input_dim), hidden_dim, 3, padding=1)
self.convr = nn.Conv2d((hidden_dim + input_dim), hidden_dim, 3, padding=1)
self.convq = nn.Conv2d((hidden_dim + input_dim), hidden_dim, 3, padding=1)
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz(hx))
r = torch.sigmoid(self.convr(hx))
q = torch.tanh(self.convq(torch.cat([(r * h), x], dim=1)))
h = (((1 - z) * h) + (z * q))
return h
|
class SepConvGRU(nn.Module):
def __init__(self, hidden_dim=128, input_dim=(192 + 128)):
super(SepConvGRU, self).__init__()
self.convz1 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (1, 5), padding=(0, 2))
self.convr1 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (1, 5), padding=(0, 2))
self.convq1 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (1, 5), padding=(0, 2))
self.convz2 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (5, 1), padding=(2, 0))
self.convr2 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (5, 1), padding=(2, 0))
self.convq2 = nn.Conv2d((hidden_dim + input_dim), hidden_dim, (5, 1), padding=(2, 0))
def forward(self, h, x):
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz1(hx))
r = torch.sigmoid(self.convr1(hx))
q = torch.tanh(self.convq1(torch.cat([(r * h), x], dim=1)))
h = (((1 - z) * h) + (z * q))
hx = torch.cat([h, x], dim=1)
z = torch.sigmoid(self.convz2(hx))
r = torch.sigmoid(self.convr2(hx))
q = torch.tanh(self.convq2(torch.cat([(r * h), x], dim=1)))
h = (((1 - z) * h) + (z * q))
return h
|
class SmallMotionEncoder(nn.Module):
def __init__(self, args):
super(SmallMotionEncoder, self).__init__()
cor_planes = (args.corr_levels * (((2 * args.corr_radius) + 1) ** 2))
self.convc1 = nn.Conv2d(cor_planes, 96, 1, padding=0)
self.convf1 = nn.Conv2d(2, 64, 7, padding=3)
self.convf2 = nn.Conv2d(64, 32, 3, padding=1)
self.conv = nn.Conv2d(128, 80, 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
|
class BasicMotionEncoder(nn.Module):
def __init__(self, args):
super(BasicMotionEncoder, self).__init__()
cor_planes = (args.corr_levels * (((2 * args.corr_radius) + 1) ** 2))
self.convc1 = nn.Conv2d(cor_planes, 256, 1, padding=0)
self.convc2 = nn.Conv2d(256, 192, 3, padding=1)
self.convf1 = nn.Conv2d(2, 128, 7, padding=3)
self.convf2 = nn.Conv2d(128, 64, 3, padding=1)
self.conv = nn.Conv2d((64 + 192), (128 - 2), 3, padding=1)
def forward(self, flow, corr):
cor = F.relu(self.convc1(corr))
cor = F.relu(self.convc2(cor))
flo = F.relu(self.convf1(flow))
flo = F.relu(self.convf2(flo))
cor_flo = torch.cat([cor, flo], dim=1)
out = F.relu(self.conv(cor_flo))
return torch.cat([out, flow], dim=1)
|
class SmallUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=96):
super(SmallUpdateBlock, self).__init__()
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=(82 + 64))
self.flow_head = FlowHead(hidden_dim, hidden_dim=128)
def forward(self, net, inp, corr, flow):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
return (net, None, delta_flow)
|
class BasicUpdateBlock(nn.Module):
def __init__(self, args, hidden_dim=128, input_dim=128):
super(BasicUpdateBlock, self).__init__()
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=(128 + hidden_dim))
self.flow_head = FlowHead(hidden_dim, hidden_dim=256)
self.mask = nn.Sequential(nn.Conv2d(128, 256, 3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, (64 * 9), 1, padding=0))
def forward(self, net, inp, corr, flow, upsample=True):
motion_features = self.encoder(flow, corr)
inp = torch.cat([inp, motion_features], dim=1)
net = self.gru(net, inp)
delta_flow = self.flow_head(net)
mask = (0.25 * self.mask(net))
return (net, mask, delta_flow)
|
def conv_bn(inp, oup, stride):
return nn.Sequential(nn.Conv2d(inp, oup, 3, stride, 1, bias=False), BatchNorm2d(oup), nn.ReLU6(inplace=True))
|
def conv_1x1_bn(inp, oup):
return nn.Sequential(nn.Conv2d(inp, oup, 1, 1, 0, bias=False), BatchNorm2d(oup), nn.ReLU6(inplace=True))
|
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert (stride in [1, 2])
hidden_dim = round((inp * expand_ratio))
self.use_res_connect = ((self.stride == 1) and (inp == oup))
if (expand_ratio == 1):
self.conv = nn.Sequential(nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), BatchNorm2d(oup))
else:
self.conv = nn.Sequential(nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False), BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False), BatchNorm2d(hidden_dim), nn.ReLU6(inplace=True), nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), BatchNorm2d(oup))
def forward(self, x):
if self.use_res_connect:
return (x + self.conv(x))
else:
return self.conv(x)
|
class MobileNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1.0):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
interverted_residual_setting = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2], [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2], [6, 320, 1, 1]]
assert ((input_size % 32) == 0)
input_channel = int((input_channel * width_mult))
self.last_channel = (int((last_channel * width_mult)) if (width_mult > 1.0) else last_channel)
self.features = [conv_bn(3, input_channel, 2)]
for (t, c, n, s) in interverted_residual_setting:
output_channel = int((c * width_mult))
for i in range(n):
if (i == 0):
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
self.features = nn.Sequential(*self.features)
self.classifier = nn.Sequential(nn.Dropout(0.2), nn.Linear(self.last_channel, n_class))
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
if (m.bias is not None):
m.bias.data.zero_()
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
|
def mobilenetv2(pretrained=False, **kwargs):
'Constructs a MobileNet_V2 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = MobileNetV2(n_class=1000, **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['mobilenetv2']), strict=False)
return model
|
def conv3x3(in_planes, out_planes, stride=1):
'3x3 convolution with padding'
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = BatchNorm2d((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
|
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(64, 64)
self.bn2 = BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = conv3x3(64, 128)
self.bn3 = BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x
|
def resnet50(pretrained=False, **kwargs):
'Constructs a ResNet-50 model.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet50']), strict=False)
return model
|
def resnet18(pretrained=False, **kwargs):
'Constructs a ResNet-18 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n '
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(load_url(model_urls['resnet18']))
return model
|
def _sum_ft(tensor):
'sum over the first and last dimention'
return tensor.sum(dim=0).sum(dim=(- 1))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.