repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
bluebert | bluebert-master/mt-bluebert/mt_bluebert/data_utils/utils.py | # Copyright (c) Microsoft. All rights reserved.
import random
import torch
import numpy
import subprocess
class AverageMeter(object):
"""Computes and stores the average and current value."""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def set_environment(seed, set_cuda=False):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available() and set_cuda:
torch.cuda.manual_seed_all(seed)
def patch_var(v, cuda=True):
if cuda:
v = v.cuda(non_blocking=True)
return v
def get_gpu_memory_map():
result = subprocess.check_output(
[
'nvidia-smi', '--query-gpu=memory.used',
'--format=csv,nounits,noheader'
], encoding='utf-8')
gpu_memory = [int(x) for x in result.strip().split('\n')]
gpu_memory_map = dict(zip(range(len(gpu_memory)), gpu_memory))
return gpu_memory_map
def get_pip_env():
result = subprocess.call(["pip", "freeze"])
return result
| 1,244 | 24.408163 | 66 | py |
bluebert | bluebert-master/mt-bluebert/scripts/convert_tf_to_pt.py | # This scripts is to convert Google's TF BERT to the pytorch version which is used by mt-dnn.
# It is a supplementary script.
# Note that it relies on tensorflow==1.12.0 which does not support by our released docker.
# If you want to use this, please install tensorflow==1.12.0 by: pip install tensorflow==1.12.0
# Some codes are adapted from https://github.com/huggingface/pytorch-pretrained-BERT
# by: xiaodl
from __future__ import absolute_import
from __future__ import division
import re
import os
import argparse
import tensorflow as tf
import torch
import numpy as np
from pytorch_pretrained_bert.modeling import BertConfig
from sys import path
path.append(os.getcwd())
from mt_bluebert.mt_dnn.matcher import SANBertNetwork
from mt_bluebert.data_utils.log_wrapper import create_logger
logger = create_logger(__name__, to_disk=False)
def model_config(parser):
parser.add_argument('--update_bert_opt', default=0, type=int)
parser.add_argument('--multi_gpu_on', action='store_true')
parser.add_argument('--mem_cum_type', type=str, default='simple',
help='bilinear/simple/defualt')
parser.add_argument('--answer_num_turn', type=int, default=5)
parser.add_argument('--answer_mem_drop_p', type=float, default=0.1)
parser.add_argument('--answer_att_hidden_size', type=int, default=128)
parser.add_argument('--answer_att_type', type=str, default='bilinear',
help='bilinear/simple/defualt')
parser.add_argument('--answer_rnn_type', type=str, default='gru',
help='rnn/gru/lstm')
parser.add_argument('--answer_sum_att_type', type=str, default='bilinear',
help='bilinear/simple/defualt')
parser.add_argument('--answer_merge_opt', type=int, default=1)
parser.add_argument('--answer_mem_type', type=int, default=1)
parser.add_argument('--answer_dropout_p', type=float, default=0.1)
parser.add_argument('--answer_weight_norm_on', action='store_true')
parser.add_argument('--dump_state_on', action='store_true')
parser.add_argument('--answer_opt', type=int, default=0, help='0,1')
parser.add_argument('--label_size', type=str, default='3')
parser.add_argument('--mtl_opt', type=int, default=0)
parser.add_argument('--ratio', type=float, default=0)
parser.add_argument('--mix_opt', type=int, default=0)
parser.add_argument('--max_seq_len', type=int, default=512)
parser.add_argument('--init_ratio', type=float, default=1)
parser.add_argument('--encoder_type', type=int, default=1)
return parser
def train_config(parser):
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available(),
help='whether to use GPU acceleration.')
parser.add_argument('--log_per_updates', type=int, default=500)
parser.add_argument('--epochs', type=int, default=5)
parser.add_argument('--batch_size', type=int, default=8)
parser.add_argument('--batch_size_eval', type=int, default=8)
parser.add_argument('--optimizer', default='adamax',
help='supported optimizer: adamax, sgd, adadelta, adam')
parser.add_argument('--grad_clipping', type=float, default=0)
parser.add_argument('--global_grad_clipping', type=float, default=1.0)
parser.add_argument('--weight_decay', type=float, default=0)
parser.add_argument('--learning_rate', type=float, default=5e-5)
parser.add_argument('--momentum', type=float, default=0)
parser.add_argument('--warmup', type=float, default=0.1)
parser.add_argument('--warmup_schedule', type=str, default='warmup_linear')
parser.add_argument('--vb_dropout', action='store_false')
parser.add_argument('--dropout_p', type=float, default=0.1)
parser.add_argument('--tasks_dropout_p', type=float, default=0.1)
parser.add_argument('--dropout_w', type=float, default=0.000)
parser.add_argument('--bert_dropout_p', type=float, default=0.1)
parser.add_argument('--dump_feature', action='store_false')
# EMA
parser.add_argument('--ema_opt', type=int, default=0)
parser.add_argument('--ema_gamma', type=float, default=0.995)
# scheduler
parser.add_argument('--have_lr_scheduler', dest='have_lr_scheduler', action='store_false')
parser.add_argument('--multi_step_lr', type=str, default='10,20,30')
parser.add_argument('--freeze_layers', type=int, default=-1)
parser.add_argument('--embedding_opt', type=int, default=0)
parser.add_argument('--lr_gamma', type=float, default=0.5)
parser.add_argument('--bert_l2norm', type=float, default=0.0)
parser.add_argument('--scheduler_type', type=str, default='ms', help='ms/rop/exp')
parser.add_argument('--output_dir', default='checkpoint')
parser.add_argument('--seed', type=int, default=2018,
help='random seed for data shuffling, embedding init, etc.')
return parser
def convert(args):
tf_checkpoint_path = args.tf_checkpoint_root
bert_config_file = os.path.join(tf_checkpoint_path, 'bert_config.json')
pytorch_dump_path = args.pytorch_checkpoint_path
config = BertConfig.from_json_file(bert_config_file)
opt = vars(args)
opt.update(config.to_dict())
model = SANBertNetwork(opt)
path = os.path.join(tf_checkpoint_path, 'bert_model.ckpt')
logger.info('Converting TensorFlow checkpoint from {}'.format(path))
init_vars = tf.train.list_variables(path)
names = []
arrays = []
for name, shape in init_vars:
logger.info('Loading {} with shape {}'.format(name, shape))
array = tf.train.load_variable(path, name)
logger.info('Numpy array shape {}'.format(array.shape))
# new layer norm var name
# make sure you use the latest huggingface's new layernorm implementation
# if you still use beta/gamma, remove line: 48-52
if name.endswith('LayerNorm/beta'):
name = name[:-14] + 'LayerNorm/bias'
if name.endswith('LayerNorm/gamma'):
name = name[:-15] + 'LayerNorm/weight'
if name.endswith('bad_steps'):
print('bad_steps')
continue
if name.endswith('steps'):
print('step')
continue
if name.endswith('step'):
print('step')
continue
if name.endswith('adam_m'):
print('adam_m')
continue
if name.endswith('adam_v'):
print('adam_v')
continue
if name.endswith('loss_scale'):
print('loss_scale')
continue
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
flag = False
if name == 'cls/squad/output_bias':
name = 'out_proj/bias'
flag = True
if name == 'cls/squad/output_weights':
name = 'out_proj/weight'
flag = True
logger.info('Loading {}'.format(name))
name = name.split('/')
if name[0] in ['redictions', 'eq_relationship', 'cls', 'output']:
logger.info('Skipping')
continue
pointer = model
for m_name in name:
if flag: continue
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
elif flag:
continue
pointer = getattr(getattr(pointer, name[0]), name[1])
try:
assert tuple(pointer.shape) == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
pointer.data = torch.from_numpy(array)
nstate_dict = model.state_dict()
params = {'state':nstate_dict, 'config': config.to_dict()}
torch.save(params, pytorch_dump_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--tf_checkpoint_root', type=str, required=True)
parser.add_argument('--pytorch_checkpoint_path', type=str, required=True)
parser = model_config(parser)
parser = train_config(parser)
args = parser.parse_args()
logger.info(args)
convert(args)
| 8,570 | 42.729592 | 95 | py |
bluebert | bluebert-master/mribert/sequence_classification.py | """
Usage:
my_program.py [options]
Options:
--do_train Whether to run training.
--do_test Whether to run testing.
--do_predict Whether to run predicting.
--do_debug
--log_file=<file> path for log file. [default: log.txt]
--csv_logger=<file> path for training log file. [default: log.csv]
--output_dir=<dir> The output directory where the model checkpoints will be written. [default: checkpoint]
--dataset=<file> dataset to use [default: dataset.csv]
--train_dataset=<str> [default: 1,2,3,4,5,6,7]
--val_dataset=<str> [default: 8]
--test_dataset=<str> [default: 9,10]
--pred_dataset=<file dataset to predict [default: unlabelled_dataset.csv]
--test_predictions=<str> [default: test_predictions.csv]
--pred_predictions=<str> [default: pred_predictions.csv]
--best_model=<str> best trained model. [default: best_model.h5]
--bert_config_file=<str> The config json file corresponding to the pre-trained BERT model.
--init_checkpoint=<str> Initial checkpoint (usually from a pre-trained BERT model).
--vocab_file=<str> The vocabulary file that the BERT model was trained on.
--do_lower_case Whether to lower case the input text.
--epochs=<int> Total number of training epochs to perform. [default: 3]
--batch_size=<int> Total batch size. [default: 8]
--learning_rate=<float> The initial learning rate for Adam. [default: 5e-5]
--workers=<int> number of workers for data processing [default: 3]
--earlystop=<int> [default: 5]
--seed=<int> random seed to use. [default: 123]
--warmup_proportion=<float> Proportion of training to perform linear learning rate warmup for. [default: 0.1]
--max_seq_length=<int> The maximum total input sequence length after WordPiece tokenization. [default: 128]
--text_col=<str> [default: x]
--label_col=<str> [default: y]
--fold_col=<str> [default: fold]
--verbose
"""
import argparse
import json
import logging as log
import math
import os
import sys
from typing import List
import docopt
import keras
import numpy as np
import pandas as pd
from keras import backend as K
from keras import callbacks
from keras.initializers import TruncatedNormal
from keras.layers import Dense, Dropout, Masking, GlobalAveragePooling1D, Lambda
from keras.models import Model, load_model
from keras.utils import Sequence, to_categorical
from keras_bert import load_trained_model_from_checkpoint, AdamWarmup, calc_train_steps, load_vocabulary, Tokenizer, \
get_custom_objects
from tabulate import tabulate
from keras_image_app import pmetrics
from keras_image_app.image_utils import set_logger, load_instances, dump_model, dict_to_namespace, print_model, pick_device, \
get_class_weights
class TextDataFrameIterator(Sequence):
def __init__(self,
dataframe: pd.DataFrame,
tokenizer,
classes: List[str] = None,
x_col="text",
y_col="class",
seq_len=128,
batch_size=32,
shuffle=True,
seed=None,
do_lower_case=True):
self.dataframe = dataframe # type: pd.DataFrame
self.shuffle = shuffle
self.batch_size = batch_size
self.x_col = x_col
self.y_col = y_col
self.seq_len = seq_len
self.do_lower_case = do_lower_case
self.tokenizer = tokenizer
self.seed = seed
if classes is None:
self.classes = list(sorted(set(self.dataframe[self.y_col])))
else:
self.classes = classes
self.class_indices = {cls: i for i, cls in enumerate(self.classes)}
def on_epoch_end(self):
if self.shuffle:
self.dataframe = self.dataframe.sample(frac=1, random_state=self.seed).reset_index(drop=True)
def __getitem__(self, idx):
tokens, labels = [], []
batch = self.dataframe.iloc[idx * self.batch_size: (idx + 1) * self.batch_size]
for text, label in zip(batch[self.x_col], batch[self.y_col]):
if self.do_lower_case:
text = text.lower()
token, _ = self.tokenizer.encode(text, max_len=self.seq_len)
tokens.append(token)
labels.append(self.class_indices[label])
tokens = np.array(tokens)
labels = to_categorical(labels, len(self.classes)).astype(int)
return [tokens, np.zeros_like(tokens)], np.array(labels)
def __len__(self):
return math.ceil(len(self.dataframe) / self.batch_size)
def get_args() -> argparse.Namespace:
args = docopt.docopt(__doc__)
namespace = dict_to_namespace(args, donot_convert={'val_dataset'})
return namespace
def get_model(args):
with open(args.bert_config_file, 'r') as fp:
config = json.load(fp)
bert_model = load_trained_model_from_checkpoint(
config_file=args.bert_config_file,
checkpoint_file=args.init_checkpoint,
training=False,
trainable=True,
output_layer_num=1,
seq_len=args.max_seq_length
)
bert_output = bert_model.outputs[0]
# first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
# x = Masking(mask_value=0.)(bert_output)
# x = GlobalAveragePooling1D()(x)
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
x = Lambda(lambda x: K.squeeze(x[:, 0:1, :], axis=1))(bert_output)
x = Dense(config['hidden_size'], activation='tanh',
kernel_initializer=TruncatedNormal(mean=0., stddev=config['initializer_range']))(x)
x = Dropout(0.1)(x)
predictions = Dense(args.n_classes, activation='softmax',
kernel_initializer=TruncatedNormal(mean=0., stddev=0.02))(x)
final_model = Model(inputs=bert_model.inputs, outputs=predictions)
print_model(final_model)
dump_model(os.path.join(args.output_dir, 'bert_structure.json'), final_model)
return final_model
def main():
args = get_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
np.random.seed(args.seed)
if args.verbose:
log.basicConfig(level=log.DEBUG, stream=sys.stdout)
else:
log.basicConfig(level=log.INFO, stream=sys.stdout)
log.info('\n' + tabulate(sorted(vars(args).items())))
set_logger(os.path.join(args.output_dir, args.log_file))
pick_device()
data = load_instances(args.dataset, args.label_col)
classes = list(sorted(set(data[args.label_col])))
args.n_classes = len(classes)
token_dict = load_vocabulary(args.vocab_file)
tokenizer = Tokenizer(token_dict)
if args.do_train:
folds = [i for i in args.train_dataset.split(',')]
train_df = data[data['fold'].isin(folds)].reset_index(drop=True)
train_generator = TextDataFrameIterator(
dataframe=train_df,
tokenizer=tokenizer,
classes=classes,
x_col=args.text_col,
y_col=args.label_col,
batch_size=args.batch_size,
shuffle=True,
seq_len=args.max_seq_length,
seed=args.seed,
do_lower_case=args.do_lower_case
)
folds = [i for i in args.val_dataset.split(',')]
val_df = data[data['fold'].isin(folds)].reset_index(drop=True)
val_generator = TextDataFrameIterator(
dataframe=val_df,
tokenizer=tokenizer,
classes=classes,
x_col=args.text_col,
y_col=args.label_col,
batch_size=args.batch_size,
shuffle=False,
seq_len=args.max_seq_length,
do_lower_case=args.do_lower_case
)
total_steps, warmup_steps = calc_train_steps(
num_example=len(train_df),
batch_size=args.batch_size,
epochs=args.epochs,
warmup_proportion=args.warmup_proportion,
)
model = get_model(args)
earlystop = callbacks.EarlyStopping(
monitor='val_loss', min_delta=K.epsilon(), patience=args.earlystop,
verbose=1, mode='auto')
best_checkpoint = callbacks.ModelCheckpoint(
os.path.join(args.output_dir, args.best_model),
save_best_only=True, save_weights_only=False,
monitor='val_loss', mode='min', verbose=1)
csv_logger = callbacks.CSVLogger(os.path.join(args.output_dir, args.csv_logger))
callbacks_list = [earlystop, best_checkpoint, csv_logger]
optimizer = AdamWarmup(
decay_steps=total_steps,
warmup_steps=warmup_steps,
lr=args.learning_rate,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
min_lr=1e-5,
weight_decay=0.01,
weight_decay_pattern=['embeddings', 'kernel', 'W1', 'W2', 'Wk', 'Wq', 'Wv', 'Wo']
)
model.compile(optimizer, loss='categorical_crossentropy', metrics=['accuracy'])
cw = get_class_weights(data, args.label_col, train_generator.class_indices)
model.fit_generator(
train_generator,
class_weight=cw,
use_multiprocessing=False,
workers=args.workers,
callbacks=callbacks_list,
epochs=args.epochs,
validation_data=val_generator,
verbose=1)
if args.do_test:
folds = [i for i in args.test_dataset.split(',')]
test_df = data[data['fold'].isin(folds)].reset_index(drop=True)
test_generator = TextDataFrameIterator(
dataframe=test_df,
tokenizer=tokenizer,
classes=classes,
x_col=args.text_col,
y_col=args.label_col,
batch_size=args.batch_size,
shuffle=False,
seq_len=args.max_seq_length,
do_lower_case=args.do_lower_case
)
print('Load from %s', os.path.join(args.output_dir, args.best_model))
model = load_model(os.path.join(args.output_dir, args.best_model), custom_objects=get_custom_objects())
# model.summary()
y_score = model.predict_generator(
test_generator,
use_multiprocessing=False,
workers=args.workers,
verbose=1)
y_pred = np.argmax(y_score, axis=1)
pred_df = pd.DataFrame(y_score, columns=classes)
pred_df = pred_df.assign(predictions=[classes[lbl] for lbl in y_pred])
y_true = test_df.loc[:, args.label_col].values
y_pred = pred_df['predictions'].values
report = pmetrics.classification_report(y_true, y_pred, classes=classes)
print(report.summary())
# print('auc', pmetrics.auc(y_true, y_score, y_column=1)[0])
result = pd.concat([test_df, pred_df], axis=1)
result.to_csv(os.path.join(args.output_dir, args.test_predictions), index=False)
if args.do_predict:
test_df = load_instances(args.pred_dataset, args.label_col)
test_generator = TextDataFrameIterator(
dataframe=test_df,
tokenizer=tokenizer,
classes=None,
x_col=args.text_col,
y_col=args.label_col,
batch_size=args.batch_size,
shuffle=False,
seq_len=args.max_seq_length,
do_lower_case=args.do_lower_case
)
print('Load from %s', os.path.join(args.output_dir, args.best_model))
model = load_model(os.path.join(args.output_dir, args.best_model), custom_objects=get_custom_objects())
# model.summary()
y_score = model.predict_generator(
test_generator,
use_multiprocessing=False,
workers=args.workers,
verbose=1)
y_pred = np.argmax(y_score, axis=1)
pred_df = pd.DataFrame(y_score, columns=classes)
pred_df = pred_df.assign(predictions=[classes[lbl] for lbl in y_pred])
result = pd.concat([test_df, pred_df], axis=1)
result.to_csv(os.path.join(args.output_dir, args.pred_predictions), index=False)
if args.do_debug:
for dataset in [args.train_dataset, args.val_dataset, args.test_dataset]:
folds = [i for i in dataset.split(',')]
print('folds:', folds)
sub_df = data[data['fold'].isin(folds)]
generator = TextDataFrameIterator(
dataframe=sub_df,
tokenizer=tokenizer,
x_col=args.text_col,
y_col=args.label_col,
batch_size=args.batch_size,
shuffle=False,
seq_len=args.max_seq_length,
)
for i, ([tokens, _], labels) in enumerate(generator):
print(tokens.shape, type(tokens), labels.shape, type(labels))
if i == 2:
break
if __name__ == '__main__':
main()
| 13,154 | 37.464912 | 126 | py |
bluebert | bluebert-master/elmo/elmoft.py | #!/usr/bin/env python
# -*- coding=utf-8 -*-
###########################################################################
# File Name: elmo_finetuning.py
# Author: Shankai Yan
# E-mail: shankai.yan@nih.gov
# Created Time: 2019-03-29 19:27:12
###########################################################################
#
import os, sys, time, copy, pickle, logging, itertools
from collections import OrderedDict
from optparse import OptionParser
from tqdm import tqdm
import numpy as np
import pandas as pd
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import WeightedRandomSampler
from sklearn import metrics
from allennlp.modules.elmo import Elmo, batch_to_ids
from allennlp.modules.augmented_lstm import AugmentedLstm
from allennlp.modules.stacked_alternating_lstm import StackedAlternatingLstm
from allennlp.modules.conditional_random_field import ConditionalRandomField
from allennlp.modules.seq2seq_encoders import FeedForwardEncoder, PytorchSeq2SeqWrapper, GatedCnnEncoder, IntraSentenceAttentionEncoder, QaNetEncoder, StackedSelfAttentionEncoder
from allennlp.modules.seq2vec_encoders import BagOfEmbeddingsEncoder, PytorchSeq2VecWrapper, Seq2VecEncoder, CnnEncoder, CnnHighwayEncoder
import spacy
nlp = spacy.load('en_core_sci_md')
FILE_DIR = os.path.dirname(os.path.realpath(__file__))
PAR_DIR = os.path.abspath(os.path.join(FILE_DIR, os.path.pardir))
DATA_PATH = os.path.join(os.path.expanduser('~'), 'data', 'elmoft')
SC=';;'
opts, args = {}, []
class BaseClfHead(nn.Module):
""" Classifier Head for the Basic Language Model """
def __init__(self, lm_model, config, task_type, num_lbs=1, pdrop=0.1, mlt_trnsfmr=False, **kwargs):
super(BaseClfHead, self).__init__()
self.lm_model = lm_model
self.task_type = task_type
self.dropout = nn.Dropout2d(pdrop) if task_type == 'nmt' else nn.Dropout(pdrop)
self.last_dropout = nn.Dropout(pdrop)
self.lm_logit = self._mlt_lm_logit if mlt_trnsfmr else self._lm_logit
self.clf_h = self._mlt_clf_h if mlt_trnsfmr else self._clf_h
self.num_lbs = num_lbs
self.kwprop = {}
for k, v in kwargs.items():
setattr(self, k, v)
def _clf_h(self, hidden_states, pool_idx, past=None):
return hidden_states.view(-1, self.n_embd), pool_idx
def _mlt_clf_h(self, hidden_states, pool_idx, past=None):
return hidden_states.sum(1).view(-1, self.n_embd), pool_idx.max(1)[0]
def transformer(self, input_ids):
return self.lm_model.transformer(input_ids=input_ids)
def _lm_logit(self, input_ids, hidden_states, past=None):
lm_h = hidden_states[:,:-1]
return self.lm_model.lm_head(lm_h), input_ids[:,1:]
def _mlt_lm_logit(self, input_ids, hidden_states, past=None):
lm_h = hidden_states[:,:,:-1].contiguous().view(-1, self.n_embd)
lm_target = input_ids[:,:,1:].contiguous().view(-1)
return self.lm_model.lm_head(lm_h), lm_target.view(-1)
class ELMoClfHead(BaseClfHead):
def __init__(self, lm_model, config, task_type, iactvtn='relu', oactvtn='sigmoid', fchdim=0, w2v_path=None, num_lbs=1, mlt_trnsfmr=False, pdrop=0.2, pool=None, seq2seq=None, seq2vec=None, do_norm=True, norm_type='batch', do_lastdrop=True, do_crf=False, initln=False, initln_mean=0., initln_std=0.02, task_params={}, **kwargs):
from gensim.models import KeyedVectors
from gensim.models.keyedvectors import Word2VecKeyedVectors
super(ELMoClfHead, self).__init__(lm_model, config, task_type, num_lbs=num_lbs, pdrop=pdrop, mlt_trnsfmr=mlt_trnsfmr, do_norm=do_norm, do_lastdrop=do_lastdrop, do_crf=do_crf, task_params=task_params, **kwargs)
self.vocab_size = 793471
self.dim_mulriple = 2 if task_type == 'entlmnt' or (task_type == 'sentsim' and (self.task_params.setdefault('sentsim_func', None) is None or self.task_params['sentsim_func'] == 'concat')) else 1 # two or one sentence
self.n_embd = config['elmoedim'] * 2 # two ELMo layer * sentence number * ELMo embedding dimensions
self.w2v_model = w2v_path if type(w2v_path) is Word2VecKeyedVectors else (KeyedVectors.load(w2v_path, mmap='r') if w2v_path and os.path.isfile(w2v_path) else None)
self._int_actvtn = ACTVTN_MAP[iactvtn]
self._out_actvtn = ACTVTN_MAP[oactvtn]
self.fchdim = fchdim
self.crf = ConditionalRandomField(num_lbs) if do_crf else None
if task_type == 'nmt':
self.pool = None
self.seq2vec = None
if seq2seq:
params = {}
if seq2seq.startswith('pytorch-'):
pth_mdl = '-'.join(seq2seq.split('-')[1:])
_ = [params.update(x) for x in [SEQ2SEQ_MDL_PARAMS.setdefault('pytorch', {}).setdefault('elmo', {}), SEQ2SEQ_TASK_PARAMS.setdefault(seq2seq, {}).setdefault(task_type, {})]]
self.seq2seq = gen_pytorch_wrapper('seq2seq', pth_mdl, **params[pth_mdl])
encoder_odim = SEQ2SEQ_DIM_INFER[seq2seq]([self.n_embd, self.dim_mulriple, params[pth_mdl]])
else:
_ = [params.update(x) for x in [SEQ2SEQ_MDL_PARAMS.setdefault(seq2seq, {}).setdefault('elmo', {}), SEQ2SEQ_TASK_PARAMS.setdefault(seq2seq, {}).setdefault(task_type, {})]]
self.seq2seq = SEQ2SEQ_MAP[seq2seq](**params)
if hasattr(self.seq2seq, 'get_output_dim'):
encoder_odim = self.seq2seq.get_output_dim()
else:
encoder_odim = SEQ2SEQ_DIM_INFER[seq2seq]([self.n_embd, self.dim_mulriple, params])
else:
self.seq2seq = None
encoder_odim = self.n_embd
self.maxlen = self.task_params.setdefault('maxlen', 128)
self.norm = NORM_TYPE_MAP[norm_type](self.maxlen)
self.linear = nn.Sequential(nn.Linear(encoder_odim, fchdim), self._int_actvtn(), nn.Linear(fchdim, fchdim), self._int_actvtn(), nn.Linear(fchdim, num_lbs), self._out_actvtn()) if fchdim else nn.Sequential(nn.Linear(encoder_odim, num_lbs), self._out_actvtn())
elif seq2vec:
self.pool = None
params = {}
if seq2vec.startswith('pytorch-'):
pth_mdl = '-'.join(seq2vec.split('-')[1:])
_ = [params.update(x) for x in [SEQ2VEC_MDL_PARAMS.setdefault('pytorch', {}).setdefault('elmo', {}), SEQ2VEC_TASK_PARAMS.setdefault('pytorch', {}).setdefault(task_type, {})]]
_ = [params.update({p:kwargs[k]}) for k, p in SEQ2VEC_LM_PARAMS_MAP.setdefault('pytorch', []) if k in kwargs]
self.seq2vec = gen_pytorch_wrapper('seq2vec', pth_mdl, **params[pth_mdl])
encoder_odim = SEQ2VEC_DIM_INFER[seq2vec]([self.n_embd, self.dim_mulriple, params[pth_mdl]])
else:
_ = [params.update(x) for x in [SEQ2VEC_MDL_PARAMS.setdefault(seq2vec, {}).setdefault('elmo', {}), SEQ2VEC_TASK_PARAMS.setdefault(seq2vec, {}).setdefault(task_type, {})]]
_ = [params.update({p:kwargs[k]}) for k, p in SEQ2VEC_LM_PARAMS_MAP.setdefault(seq2vec, []) if k in kwargs]
self.seq2vec = SEQ2VEC_MAP[seq2vec](**params)
if hasattr(self.seq2vec, 'get_output_dim') and seq2vec != 'boe':
encoder_odim = self.seq2vec.get_output_dim()
else:
encoder_odim = SEQ2VEC_DIM_INFER[seq2vec]([self.n_embd, self.dim_mulriple, params])
self.maxlen = self.task_params.setdefault('maxlen', 128)
self.norm = NORM_TYPE_MAP[norm_type](encoder_odim)
self.linear = (nn.Sequential(nn.Linear(self.dim_mulriple * encoder_odim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), *([] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.fchdim, num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Sequential(nn.Linear(encoder_odim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, num_lbs))) if self.fchdim else (nn.Sequential(*([nn.Linear(self.dim_mulriple * encoder_odim, self.dim_mulriple * encoder_odim), self._int_actvtn()] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.dim_mulriple * encoder_odim, num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Linear(encoder_odim, num_lbs))
elif pool:
self.seq2vec = None
self.pool = nn.MaxPool2d(8, stride=4) if pool == 'max' else nn.AvgPool2d(8, stride=4)
self.norm = NORM_TYPE_MAP[norm_type](32130 if self.task_type == 'sentsim' or self.task_type == 'entlmnt' else 16065)
self.linear = (nn.Sequential(nn.Linear(self.dim_mulriple * encoder_odim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), *([] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.fchdim, num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Sequential(nn.Linear(encoder_odim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, num_lbs))) if self.fchdim else (nn.Sequential(*([nn.Linear(self.dim_mulriple * encoder_odim, self.dim_mulriple * encoder_odim), self._int_actvtn()] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.dim_mulriple * encoder_odim, num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Linear(encoder_odim, num_lbs))
else:
self.pool = None
self.seq2vec = None
self.norm = NORM_TYPE_MAP[norm_type](self.n_embd)
self.linear = (nn.Sequential(nn.Linear(self.n_embd, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), *([] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.fchdim, num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Sequential(nn.Linear(self.n_embd, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, self.fchdim), self._int_actvtn(), nn.Linear(self.fchdim, num_lbs))) if fchdim else (nn.Sequential(*([nn.Linear(self.n_embd, self.n_embd), self._int_actvtn()] if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else [nn.Linear(self.n_embd, num_lbs), self._out_actvtn()])) if self.task_type in ['entlmnt', 'sentsim'] else nn.Linear(self.n_embd, num_lbs))
if (initln): self.linear.apply(_weights_init(mean=initln_mean, std=initln_std))
def forward(self, input_ids, pool_idx, w2v_ids=None, char_ids=None, labels=None, past=None, weights=None):
use_gpu = next(self.parameters()).is_cuda
if self.task_type in ['entlmnt', 'sentsim']:
mask = [torch.arange(input_ids[x].size()[1]).to('cuda').unsqueeze(0).expand(input_ids[x].size()[:2]) <= pool_idx[x].unsqueeze(1).expand(input_ids[x].size()[:2]) if use_gpu else torch.arange(input_ids[x].size()[1]).unsqueeze(0).expand(input_ids[x].size()[:2]) <= pool_idx[x].unsqueeze(1).expand(input_ids[x].size()[:2]) for x in [0,1]]
embeddings = (self.lm_model(input_ids[0]), self.lm_model(input_ids[1]))
clf_h = torch.cat(embeddings[0]['elmo_representations'], dim=-1), torch.cat(embeddings[1]['elmo_representations'], dim=-1)
if (w2v_ids is not None and self.w2v_model):
wembd_tnsr = [torch.tensor([self.w2v_model.syn0[s] for s in w2v_ids[x]]) for x in [0,1]]
if use_gpu: wembd_tnsr = [x.to('cuda') for x in wembd_tnsr]
clf_h = [torch.cat([clf_h[x], wembd_tnsr[x]], dim=-1) for x in [0,1]]
if self.seq2vec:
clf_h = [self.seq2vec(clf_h[x], mask=mask[x]) for x in [0,1]]
elif self.pool:
clf_h = [clf_h[x].view(clf_h[x].size(0), 2*clf_h[x].size(1), -1) for x in [0,1]]
clf_h = [self.pool(clf_h[x]).view(clf_h[x].size(0), -1) for x in [0,1]]
else:
clf_h = [clf_h[x].gather(1, pool_idx[x].unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h[x].size(2))).squeeze(1) for x in [0,1]]
else:
mask = torch.arange(input_ids.size()[1]).to('cuda').unsqueeze(0).expand(input_ids.size()[:2]) <= pool_idx.unsqueeze(1).expand(input_ids.size()[:2]) if use_gpu else torch.arange(input_ids.size()[1]).unsqueeze(0).expand(input_ids.size()[:2]) <= pool_idx.unsqueeze(1).expand(input_ids.size()[:2])
embeddings = self.lm_model(input_ids)
clf_h = torch.cat(embeddings['elmo_representations'], dim=-1)
if self.task_type == 'nmt':
clf_h = clf_h
if (self.seq2seq): clf_h = self.seq2seq(clf_h, mask=mask)
elif self.seq2vec:
clf_h = self.seq2vec(clf_h, mask=mask)
elif self.pool:
clf_h = clf_h.view(clf_h.size(0), 2*clf_h.size(1), -1)
clf_h = self.pool(clf_h).view(clf_h.size(0), -1)
else:
clf_h = clf_h.gather(1, pool_idx.unsqueeze(-1).unsqueeze(-1).expand(-1, 1, clf_h.size(2))).squeeze(1)
if self.task_type in ['entlmnt', 'sentsim']:
if self.do_norm: clf_h = [self.norm(clf_h[x]) for x in [0,1]]
clf_h = [self.dropout(clf_h[x]) for x in [0,1]]
if (self.task_type == 'entlmnt' or self.task_params.setdefault('sentsim_func', None) is None or self.task_params['sentsim_func'] == 'concat'):
# clf_h = (torch.cat(clf_h, dim=-1) + torch.cat(clf_h[::-1], dim=-1))
clf_h = torch.cat(clf_h, dim=-1)
clf_logits = self.linear(clf_h) if self.linear else clf_h
else:
clf_logits = clf_h = F.pairwise_distance(self.linear(clf_h[0]), self.linear(clf_h[1]), 2, eps=1e-12) if self.task_params['sentsim_func'] == 'dist' else F.cosine_similarity(self.linear(clf_h[0]), self.linear(clf_h[1]), dim=1, eps=1e-12)
else:
if self.do_norm: clf_h = self.norm(clf_h)
clf_h = self.dropout(clf_h)
clf_logits = self.linear(clf_h)
if self.do_lastdrop: clf_logits = self.last_dropout(clf_logits)
if (labels is None):
if self.crf:
tag_seq, score = zip(*self.crf.viterbi_tags(clf_logits.view(input_ids.size()[0], -1, self.num_lbs), torch.ones(*(input_ids.size()[:2])).int()))
tag_seq = torch.tensor(tag_seq).to('cuda') if use_gpu else torch.tensor(tag_seq)
clf_logits = torch.zeros((*tag_seq.size(), self.num_lbs)).to('cuda') if use_gpu else torch.zeros((*tag_seq.size(), self.num_lbs))
clf_logits = clf_logits.scatter(-1, tag_seq.unsqueeze(-1), 1)
return clf_logits
if (self.task_type == 'sentsim' and self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != self.task_params['ymode']): return 1 - clf_logits.view(-1, self.num_lbs)
return clf_logits.view(-1, self.num_lbs)
if self.crf:
clf_loss = -self.crf(clf_logits.view(input_ids.size()[0], -1, self.num_lbs), mask.long())
elif self.task_type == 'mltc-clf' or self.task_type == 'entlmnt' or self.task_type == 'nmt':
loss_func = nn.CrossEntropyLoss(weight=weights, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.num_lbs), labels.view(-1))
elif self.task_type == 'mltl-clf':
loss_func = nn.MultiLabelSoftMarginLoss(weight=weights, reduction='none')
clf_loss = loss_func(clf_logits.view(-1, self.num_lbs), labels.view(-1, self.num_lbs).float())
elif self.task_type == 'sentsim':
loss_func = ContrastiveLoss(reduction='none', x_mode=SIM_FUNC_MAP.setdefault(self.task_params['sentsim_func'], 'dist'), y_mode=self.task_params.setdefault('ymode', 'sim')) if self.task_params.setdefault('sentsim_func', None) and self.task_params['sentsim_func'] != 'concat' else nn.MSELoss(reduction='none')
clf_loss = loss_func(clf_logits.view(-1), labels.view(-1))
return clf_loss, None
class BaseDataset(Dataset):
"""Basic dataset class"""
def __init__(self, csv_file, text_col, label_col, encode_func, tokenizer, sep='\t', binlb=None, transforms=[], transforms_args={}, transforms_kwargs=[], mltl=False, **kwargs):
self.text_col = [str(s) for s in text_col] if hasattr(text_col, '__iter__') and type(text_col) is not str else str(text_col)
self.label_col = [str(s) for s in label_col] if hasattr(label_col, '__iter__') and type(label_col) is not str else str(label_col)
self.df = self._df = csv_file if type(csv_file) is pd.DataFrame else pd.read_csv(csv_file, sep=sep, encoding='utf-8', engine='python', error_bad_lines=False, dtype={self.label_col:'float' if binlb == 'rgrsn' else str}, **kwargs)
self.df.columns = self.df.columns.astype(str, copy=False)
self.df = self.df[self.df[self.label_col].notnull()]
self.mltl = mltl
if (binlb == 'rgrsn'):
self.binlb = None
self.binlbr = None
elif (type(binlb) is str and binlb.startswith('mltl')):
sc = binlb.split(SC)[-1]
lb_df = self.df[self.df[self.label_col].notnull()][self.label_col]
labels = sorted(set([lb for lbs in lb_df for lb in lbs.split(sc)])) if type(lb_df.iloc[0]) is not list else sorted(set([lb for lbs in lb_df for lb in lbs]))
self.binlb = OrderedDict([(lb, i) for i, lb in enumerate(labels)])
self.binlbr = OrderedDict([(i, lb) for i, lb in enumerate(labels)])
self.mltl = True
elif (binlb is None):
lb_df = self.df[self.df[self.label_col].notnull()][self.label_col]
labels = sorted(set(lb_df)) if type(lb_df.iloc[0]) is not list else sorted(set([lb for lbs in lb_df for lb in lbs]))
self.binlb = OrderedDict([(lb, i) for i, lb in enumerate(labels)])
self.binlbr = OrderedDict([(i, lb) for i, lb in enumerate(labels)])
else:
self.binlb = binlb
self.binlbr = OrderedDict([(i, lb) for lb, i in binlb.items()])
self.encode_func = encode_func
self.tokenizer = tokenizer
if hasattr(tokenizer, 'vocab'):
self.vocab_size = len(tokenizer.vocab)
elif hasattr(tokenizer, 'vocab_size'):
self.vocab_size = tokenizer.vocab_size
self.transforms = transforms
self.transforms_args = transforms_args
self.transforms_kwargs = transforms_kwargs
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
record = self.df.iloc[idx]
sample = self.encode_func(record[self.text_col], self.tokenizer), record[self.label_col]
sample = self._transform_chain(sample)
return self.df.index[idx], (sample[0] if type(sample[0]) is str or type(sample[0][0]) is str else torch.tensor(sample[0])), torch.tensor(sample[1])
def _transform_chain(self, sample):
if self.transforms:
self.transforms = self.transforms if type(self.transforms) is list else [self.transforms]
self.transforms_kwargs = self.transforms_kwargs if type(self.transforms_kwargs) is list else [self.transforms_kwargs]
for transform, transform_kwargs in zip(self.transforms, self.transforms_kwargs):
transform_kwargs.update(self.transforms_args)
sample = transform(sample, **transform_kwargs) if callable(transform) else getattr(self, transform)(sample, **transform_kwargs)
return sample
def _nmt_transform(self, sample, options=None, binlb={}):
if (len(binlb) > 0): self.binlb = binlb
return sample[0], [self.binlb.setdefault(y, len(self.binlb)) for y in sample[1]]
def _mltc_transform(self, sample, options=None, binlb={}):
if (len(binlb) > 0): self.binlb = binlb
return sample[0], self.binlb.setdefault(sample[1], len(self.binlb))
def _mltl_transform(self, sample, options=None, binlb={}, get_lb=lambda x: x.split(SC)):
if (len(binlb) > 0): self.binlb = binlb
labels = get_lb(sample[1])
return sample[0], [1 if lb in labels else 0 for lb in self.binlb.keys()]
def fill_labels(self, lbs, binlb=True, index=None, saved_path=None, **kwargs):
if binlb and self.binlbr is not None:
lbs = [(';'.join([self.binlbr[l] for l in np.where(lb == 1)[0]]) if self.mltl else ','.join(['_'.join([str(i), str(l)]) for i, l in enumerate(lb)])) if hasattr(lb, '__iter__') else self.binlbr[lb] for lb in lbs]
filled_df = self._df.copy(deep=True)[~self._df.index.duplicated(keep='first')]
try:
if index:
filled_df.loc[index, self.label_col] = lbs
else:
filled_df[self.label_col] = lbs
except Exception as e:
print(e)
with open('pred_lbs.tmp', 'wb') as fd:
pickle.dump((filled_df, index, self.label_col, lbs), fd)
raise e
if (saved_path is not None):
filled_df.to_csv(saved_path, **kwargs)
return filled_df
def rebalance(self):
if (self.binlb is None): return
task_cols, task_trsfm, task_extparms = TASK_COL_MAP[opts.task], TASK_TRSFM[opts.task], TASK_EXT_PARAMS[opts.task]
lb_trsfm = [x['get_lb'] for x in task_trsfm[1] if 'get_lb' in x]
self.df = self._df
if len(lb_trsfm) > 0:
lb_df = self.df[task_cols['y']].apply(lb_trsfm[0])
else:
lb_df = self.df[task_cols['y']]
if (type(lb_df.iloc[0]) is list):
lb_df[:] = [self._mltl_transform((None, SC.join(lbs)))[1] for lbs in lb_df]
max_lb_df = lb_df.loc[[idx for idx, lbs in lb_df.iteritems() if np.sum(list(map(int, lbs))) == 0]]
max_num, avg_num = max_lb_df.shape[0], 1.0 * lb_df[~lb_df.index.isin(max_lb_df.index)].shape[0] / len(lb_df.iloc[0])
else:
class_count = np.array([[1 if lb in y else 0 for lb in self.binlb.keys()] for y in lb_df if y is not None]).sum(axis=0)
max_num, max_lb_bin = class_count.max(), class_count.argmax()
max_lb_df = lb_df[lb_df == self.binlbr[max_lb_bin]]
avg_num = np.mean([class_count[x] for x in range(len(class_count)) if x != max_lb_bin])
removed_idx = max_lb_df.sample(n=int(max_num-avg_num), random_state=1).index
self.df = self.df.loc[list(set(self.df.index)-set(removed_idx))]
def remove_mostfrqlb(self):
if (self.binlb is None or self.binlb == 'rgrsn'): return
task_cols, task_trsfm, task_extparms = TASK_COL_MAP[opts.task], TASK_TRSFM[opts.task], TASK_EXT_PARAMS[opts.task]
lb_trsfm = [x['get_lb'] for x in task_trsfm[1] if 'get_lb' in x]
self.df = self._df
if len(lb_trsfm) > 0:
lb_df = self.df[task_cols['y']].apply(lb_trsfm[0])
else:
lb_df = self.df[task_cols['y']]
class_count = np.array([[1 if lb in y else 0 for lb in self.binlb.keys()] for y in lb_df if y]).sum(axis=0)
max_num, max_lb_bin = class_count.max(), class_count.argmax()
max_lb_df = lb_df[lb_df == self.binlbr[max_lb_bin]]
self.df = self.df.loc[list(set(self.df.index)-set(max_lb_df.index))]
class SentSimDataset(BaseDataset):
"""Sentence Similarity task dataset class"""
def __getitem__(self, idx):
record = self.df.iloc[idx]
sample = [self.encode_func(record[sent_idx], self.tokenizer) for sent_idx in self.text_col], record[self.label_col]
sample = self._transform_chain(sample)
return self.df.index[idx], (sample[0] if type(sample[0][0]) is str or type(sample[0][0][0]) is str else torch.tensor(sample[0])), torch.tensor(0 if sample[1] is np.nan else float(sample[1]) / 5.0)
def fill_labels(self, lbs, index=None, saved_path=None, **kwargs):
lbs = 5.0 * lbs
filled_df = self._df.copy(deep=True)[~self._df.index.duplicated(keep='first')]
if index:
filled_df.loc[index, self.label_col] = lbs
else:
filled_df[self.label_col] = lbs
if (saved_path is not None):
filled_df.to_csv(saved_path, **kwargs)
return filled_df
class EntlmntDataset(BaseDataset):
"""Entailment task dataset class"""
def __getitem__(self, idx):
record = self.df.iloc[idx]
sample = [self.encode_func(record[sent_idx], self.tokenizer) for sent_idx in self.text_col], record[self.label_col]
sample = self._transform_chain(sample)
return self.df.index[idx], (sample[0] if type(sample[0][0]) is str or (type(sample[0][0]) is list and type(sample[0][0][0]) is str) else torch.tensor(sample[0])), torch.tensor(sample[1])
class NERDataset(BaseDataset):
"""NER task dataset class"""
def __init__(self, csv_file, text_col, label_col, encode_func, tokenizer, sep='\t', binlb=None, transforms=[], transforms_args={}, transforms_kwargs=[], **kwargs):
super(NERDataset, self).__init__(csv_file, text_col, label_col, encode_func, tokenizer, sep=sep, header=None, skip_blank_lines=False, keep_default_na=False, na_values=[], binlb=binlb, transforms=transforms, transforms_args=transforms_args, transforms_kwargs=transforms_kwargs, **kwargs)
sep_selector = self.df[self.text_col].apply(lambda x: True if x=='.' else False)
sep_selector.iloc[-1] = True
int_idx = pd.DataFrame(np.arange(self.df.shape[0]), index=self.df.index)
self.boundaries = [0] + list(itertools.chain.from_iterable((int_idx[sep_selector.values].values+1).tolist()))
def __len__(self):
return len(self.boundaries) - 1
def __getitem__(self, idx):
record = self.df.iloc[self.boundaries[idx]:self.boundaries[idx+1]].dropna()
sample = self.encode_func(record[self.text_col].values.tolist(), self.tokenizer), record[self.label_col].values.tolist()
num_samples = [len(x) for x in sample[0]] if (len(sample[0]) > 0 and type(sample[0][0]) is list) else [1] * len(sample[0])
record_idx = [0] + np.cumsum(num_samples).tolist()
is_empty = (type(sample[0]) is list and len(sample[0]) == 0) or (type(sample[0]) is list and len(sample[0]) > 0 and all([type(x) is list and len(x) == 0 for x in sample[0]]))
if (is_empty): return SC.join(map(str, record.index.values.tolist())), '' if self.encode_func == _tokenize else torch.LongTensor([-1]*opts.maxlen), '' if self.encode_func == _tokenize else torch.LongTensor([-1]*opts.maxlen), SC.join(map(str, record_idx))
is_encoded = (type(sample[0]) is list and type(sample[0][0]) is int) or (type(sample[0]) is list and len(sample[0]) > 0 and type(sample[0][0]) is list and len(sample[0][0]) > 0 and type(sample[0][0][0]) is int)
sample = list(itertools.chain.from_iterable(sample[0])) if is_encoded else sample[0], list(itertools.chain.from_iterable([[x] * ns for x, ns in zip(sample[1], num_samples)]))
sample = self._transform_chain(sample)
return SC.join(map(str, record.index.values.tolist())), (torch.tensor(sample[0]) if is_encoded else SC.join(sample[0])), (torch.tensor(sample[1]) if is_encoded else SC.join(map(str, sample[1]))), SC.join(map(str, record_idx))
def fill_labels(self, lbs, saved_path=None, binlb=True, index=None, **kwargs):
if binlb and self.binlbr is not None:
lbs = [self.binlbr[lb] for lb in lbs]
filled_df = self._df.copy(deep=True)[~self._df.index.duplicated(keep='first')]
if index:
filled_df[self.label_col] = ''
filled_df.loc[index, self.label_col] = lbs
else:
filled_df[self.label_col] = lbs
if (saved_path is not None):
filled_df.to_csv(saved_path, sep='\t', header=None, index=None, **kwargs)
return filled_df
def _sentclf_transform(sample, options=None, start_tknids=[], clf_tknids=[]):
X, y = sample
X = [start_tknids + x + clf_tknids for x in X] if hasattr(X, '__iter__') and len(X) > 0 and type(X[0]) is not str and hasattr(X[0], '__iter__') else start_tknids + X + clf_tknids
return X, y
def _entlmnt_transform(sample, options=None, start_tknids=[], clf_tknids=[], delim_tknids=[]):
X, y = sample
X = start_tknids + X[0] + delim_tknids + X[1] + clf_tknids
return X, y
def _sentsim_transform(sample, options=None, start_tknids=[], clf_tknids=[], delim_tknids=[]):
X, y = sample
X = [start_tknids + X[0] + delim_tknids + X[1] + clf_tknids, start_tknids + X[1] + delim_tknids + X[0] + clf_tknids]
return X, y
def _padtrim_transform(sample, options=None, seqlen=32, xpad_val=0, ypad_val=None):
X, y = sample
X = [x[:min(seqlen, len(x))] + [xpad_val] * (seqlen - len(x)) for x in X] if hasattr(X, '__iter__') and len(X) > 0 and type(X[0]) is not str and hasattr(X[0], '__iter__') else X[:min(seqlen, len(X))] + [xpad_val] * (seqlen - len(X))
if ypad_val is not None: y = [x[:min(seqlen, len(x))] + [ypad_val] * (seqlen - len(x)) for x in y] if hasattr(y, '__iter__') and len(y) > 0 and type(y[0]) is not str and hasattr(y[0], '__iter__') else y[:min(seqlen, len(y))] + [ypad_val] * (seqlen - len(y))
return X, y
def _trim_transform(sample, options=None, seqlen=32, trimlbs=False, special_tkns={}):
seqlen -= sum([len(v) for v in special_tkns.values()])
X, y = sample
X = [x[:min(seqlen, len(x))] for x in X] if hasattr(X, '__iter__') and len(X) > 0 and type(X[0]) is not str and hasattr(X[0], '__iter__') else X[:min(seqlen, len(X))]
if trimlbs: y = [x[:min(seqlen, len(x))] for x in y] if hasattr(y, '__iter__') and len(y) > 0 and type(y[0]) is not str and hasattr(y[0], '__iter__') else y[:min(seqlen, len(y))]
return X, y
def _pad_transform(sample, options=None, seqlen=32, xpad_val=0, ypad_val=None):
X, y = sample
X = [x + [xpad_val] * (seqlen - len(x)) for x in X] if hasattr(X, '__iter__') and len(X) > 0 and type(X[0]) is not str and hasattr(X[0], '__iter__') else X + [xpad_val] * (seqlen - len(X))
if ypad_val is not None: y = [x + [ypad_val] * (seqlen - len(x)) for x in y] if hasattr(y, '__iter__') and len(y) > 0 and type(y[0]) is not str and hasattr(y[0], '__iter__') else y + [ypad_val] * (seqlen - len(y))
return X, y
def _adjust_encoder(mdl_name, tokenizer, extra_tokens=[], ret_list=False):
return [[tkn] if ret_list else tkn for tkn in extra_tokens]
def _tokenize(text, tokenizer):
return text
def _weights_init(mean=0., std=0.02):
def _wi(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(mean, std)
elif classname.find('Linear') != -1 or classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight, mean, std)
nn.init.normal_(m.bias, 0)
return _wi
def elmo_config(options_path, weights_path, elmoedim=1024, dropout=0.5):
return {'options_file':options_path, 'weight_file':weights_path, 'num_output_representations':2, 'elmoedim':elmoedim, 'dropout':dropout}
TASK_TYPE_MAP = {'bc5cdr-chem':'nmt', 'bc5cdr-dz':'nmt', 'shareclefe':'nmt', 'ddi':'mltc-clf', 'chemprot':'mltc-clf', 'i2b2':'mltc-clf', 'hoc':'mltl-clf', 'mednli':'entlmnt', 'biosses':'sentsim', 'clnclsts':'sentsim'}
TASK_PATH_MAP = {'bc5cdr-chem':'BC5CDR-chem', 'bc5cdr-dz':'BC5CDR-disease', 'shareclefe':'ShAReCLEFEHealthCorpus', 'ddi':'ddi2013-type', 'chemprot':'ChemProt', 'i2b2':'i2b2-2010', 'hoc':'hoc', 'mednli':'mednli', 'biosses':'BIOSSES', 'clnclsts':'clinicalSTS'}
TASK_DS_MAP = {'bc5cdr-chem':NERDataset, 'bc5cdr-dz':NERDataset, 'shareclefe':NERDataset, 'ddi':BaseDataset, 'chemprot':BaseDataset, 'i2b2':BaseDataset, 'hoc':BaseDataset, 'mednli':EntlmntDataset, 'biosses':SentSimDataset, 'clnclsts':SentSimDataset}
TASK_COL_MAP = {'bc5cdr-chem':{'index':False, 'X':'0', 'y':'3'}, 'bc5cdr-dz':{'index':False, 'X':'0', 'y':'3'}, 'shareclefe':{'index':False, 'X':'0', 'y':'3'}, 'ddi':{'index':'index', 'X':'sentence', 'y':'label'}, 'chemprot':{'index':'index', 'X':'sentence', 'y':'label'}, 'i2b2':{'index':'index', 'X':'sentence', 'y':'label'}, 'hoc':{'index':'index', 'X':'sentence', 'y':'labels'}, 'mednli':{'index':'id', 'X':['sentence1','sentence2'], 'y':'label'}, 'biosses':{'index':'index', 'X':['sentence1','sentence2'], 'y':'score'}, 'clnclsts':{'index':'index', 'X':['sentence1','sentence2'], 'y':'score'}}
TASK_TRSFM = {'bc5cdr-chem':(['_nmt_transform'], [{}]), 'bc5cdr-dz':(['_nmt_transform'], [{}]), 'shareclefe':(['_nmt_transform'], [{}]), 'ddi':(['_mltc_transform'], [{}]), 'chemprot':(['_mltc_transform'], [{}]), 'i2b2':(['_mltc_transform'], [{}]), 'hoc':(['_mltl_transform'], [{ 'get_lb':lambda x: [s.split('_')[0] for s in x.split(',') if s.split('_')[1] == '1'], 'binlb': dict([(str(x),x) for x in range(10)])}]), 'mednli':(['_mltc_transform'], [{}]), 'biosses':([], []), 'clnclsts':([], [])}
TASK_EXT_TRSFM = {'bc5cdr-chem':([_padtrim_transform], [{}]), 'bc5cdr-dz':([_padtrim_transform], [{}]), 'shareclefe':([_padtrim_transform], [{}]), 'ddi':([_trim_transform, _sentclf_transform, _pad_transform], [{},{},{}]), 'chemprot':([_trim_transform, _sentclf_transform, _pad_transform], [{},{},{}]), 'i2b2':([_trim_transform, _sentclf_transform, _pad_transform], [{},{},{}]), 'hoc':([_trim_transform, _sentclf_transform, _pad_transform], [{},{},{}]), 'mednli':([_trim_transform, _entlmnt_transform, _pad_transform], [{},{},{}]), 'biosses':([_trim_transform, _sentsim_transform, _pad_transform], [{},{},{}]), 'clnclsts':([_trim_transform, _sentsim_transform, _pad_transform], [{},{},{}])}
TASK_EXT_PARAMS = {'bc5cdr-chem':{'ypad_val':'O', 'trimlbs':True, 'mdlcfg':{'maxlen':128}}, 'bc5cdr-dz':{'ypad_val':'O', 'trimlbs':True, 'mdlcfg':{'maxlen':128}}, 'shareclefe':{'ypad_val':'O', 'trimlbs':True, 'mdlcfg':{'maxlen':128}}, 'ddi':{'mdlcfg':{'maxlen':128}}, 'chemprot':{'mdlcfg':{'maxlen':128}}, 'i2b2':{'mdlcfg':{'maxlen':128}}, 'hoc':{'binlb': OrderedDict([(str(x),x) for x in range(10)]), 'mdlcfg':{'maxlen':128}}, 'mednli':{'mdlcfg':{'maxlen':128}}, 'biosses':{'binlb':'rgrsn', 'mdlcfg':{'maxlen':128}}, 'clnclsts':{'binlb':'rgrsn', 'ymode':'sim', 'mdlcfg':{'sentsim_func':None, 'maxlen':128}}}
MDL_NAME_MAP = {'elmo':'elmo'}
PARAMS_MAP = {'elmo':'ELMo'}
ENCODE_FUNC_MAP = {'elmo':_tokenize}
MODEL_MAP = {'elmo':Elmo}
CLF_MAP = {'elmo':ELMoClfHead}
CLF_EXT_PARAMS = {'elmo':{'pool':False, 'seq2seq':'isa', 'seq2vec':'boe', 'fchdim':768, 'pdrop':0.2, 'do_norm':True, 'norm_type':'batch', 'do_lastdrop':True, 'do_crf':False, 'initln':False, 'initln_mean':0., 'initln_std':0.02}}
CONFIG_MAP = {'elmo':elmo_config}
TKNZR_MAP = {'elmo':None}
PYTORCH_WRAPPER = {'lstm':nn.LSTM, 'rnn':nn.RNN, 'gru':nn.GRU, 'agmnlstm':AugmentedLstm, 'stkaltlstm':StackedAlternatingLstm}
SEQ2SEQ_MAP = {'ff':FeedForwardEncoder, 'pytorch':PytorchSeq2SeqWrapper, 'cnn':GatedCnnEncoder, 'isa':IntraSentenceAttentionEncoder, 'qanet':QaNetEncoder, 'ssae':StackedSelfAttentionEncoder}
SEQ2SEQ_MDL_PARAMS = {'pytorch':{'elmo':{'lstm':{'input_size':2048,'hidden_size':768, 'batch_first':True}, 'rnn':{'input_size':2048,'hidden_size':768, 'batch_first':True}, 'gru':{'input_size':2048,'hidden_size':768, 'batch_first':True},'agmnlstm':{'input_size':2048,'hidden_size':768},'stkaltlstm':{'input_size':2048,'hidden_size':768, 'num_layers':3}}}, 'cnn':{'elmo':{'input_dim':2048, 'dropout':0.5, 'layers':[[[4, 2048]],[[4, 2048],[4, 2048]]]}}, 'isa':{'elmo':{'input_dim':2048}}, 'qanet':{'elmo':{}}, 'ssae':{'elmo':{'input_dim':2048, 'hidden_dim':1024, 'projection_dim':768, 'feedforward_hidden_dim':768, 'num_layers':1, 'num_attention_heads':8}}}
SEQ2SEQ_TASK_PARAMS = {}
SEQ2VEC_MAP = {'boe':BagOfEmbeddingsEncoder, 'pytorch':PytorchSeq2VecWrapper, 'allennlp':Seq2VecEncoder, 'cnn':CnnEncoder, 'cnn_highway':CnnHighwayEncoder}
SEQ2VEC_MDL_PARAMS = { \
'boe':{ \
'elmo':{'embedding_dim':768, 'averaged':True} \
}, \
'pytorch':{ \
'elmo':{ \
'lstm':{'input_size':2048,'hidden_size':768, 'batch_first':True}, \
'rnn':{'input_size':2048,'hidden_size':768, 'batch_first':True}, \
'gru':{'input_size':2048,'hidden_size':768, 'batch_first':True}, \
'agmnlstm':{'input_size':2048,'hidden_size':768}, \
'stkaltlstm':{'input_size':2048,'hidden_size':768, 'num_layers':3} \
} \
}, \
'cnn':{ \
'elmo':{'embedding_dim':2048, 'num_filters':768} \
}, \
'cnn_highway':{ \
'elmo':{'embedding_dim':2048, 'filters':[(2, 768),(3, 768),(4, 768),(5, 768)], 'num_highway':5, 'projection_dim':2048} \
} \
}
SEQ2VEC_TASK_PARAMS = {}
SEQ2VEC_LM_PARAMS_MAP = {'boe':[('hdim','embedding_dim')], 'pytorch':[('hdim', 'hidden_size')], 'cnn':[], 'cnn_highway':[]}
SEQ2SEQ_DIM_INFER = {'pytorch-lstm':lambda x: x[1] * x[2]['hidden_size'], 'pytorch-rnn':lambda x: x[1] * x[2]['hidden_size'], 'pytorch-gru':lambda x: x[1] * x[2]['hidden_size'], 'cnn':lambda x: 2 * x[0], 'isa':lambda x: x[0]}
SEQ2VEC_DIM_INFER = {'boe':lambda x: x[0], 'pytorch-lstm':lambda x: x[2]['hidden_size'], 'pytorch-agmnlstm':lambda x: x[2]['hidden_size'], 'pytorch-rnn':lambda x: x[2]['hidden_size'], 'pytorch-stkaltlstm':lambda x: x[2]['hidden_size'], 'pytorch-gru':lambda x: x[2]['hidden_size'], 'cnn':lambda x: int(1.5 * x[2]['embedding_dim']), 'cnn_highway':lambda x: x[0]}
NORM_TYPE_MAP = {'batch':nn.BatchNorm1d, 'layer':nn.LayerNorm}
ACTVTN_MAP = {'relu':nn.ReLU, 'sigmoid':nn.Sigmoid}
SIM_FUNC_MAP = {'sim':'sim', 'dist':'dist'}
LM_PARAMS = {
"ELMo": {
"options_path": "options.json",
"weights_path": "weights.hdf5",
"elmoedim": 1024,
"dropout": 0.5
}
}
def gen_pytorch_wrapper(mdl_type, mdl_name, **kwargs):
wrapper_cls = PytorchSeq2SeqWrapper if mdl_type == 'seq2seq' else PytorchSeq2VecWrapper
mdl_cls = PYTORCH_WRAPPER[mdl_name]
return wrapper_cls(module=mdl_cls(**kwargs))
def gen_mdl(mdl_name, pretrained=True, use_gpu=False, distrb=False, dev_id=None):
try:
params = LM_PARAMS[PARAMS_MAP[mdl_name]]
config = CONFIG_MAP[mdl_name](**params)
pos_params = [config[k] for k in ['options_file','weight_file', 'num_output_representations']]
kw_params = dict([(k, config[k]) for k in config.keys() if k not in ['options_file','weight_file', 'num_output_representations', 'elmoedim']])
model = MODEL_MAP[mdl_name](*pos_params, **kw_params)
except Exception as e:
print(e)
print('Cannot find the pretrained model file, using online model instead.')
model = MODEL_MAP[mdl_name].from_pretrained(MDL_NAME_MAP[mdl_name])
if (use_gpu): model = _handle_model(model, dev_id=dev_id, distrb=distrb)
return model
def gen_clf(mdl_name, use_gpu=False, distrb=False, dev_id=None, **kwargs):
params = LM_PARAMS[PARAMS_MAP[mdl_name]]
kwargs['config'] = CONFIG_MAP[mdl_name](**params)
clf = CLF_MAP[mdl_name](**kwargs)
return clf.to('cuda') if use_gpu else clf
def classify(dev_id=None):
use_gpu = dev_id is not None
encode_func = ENCODE_FUNC_MAP[opts.model]
tokenizer = None
task_type = TASK_TYPE_MAP[opts.task]
special_tkns = (['start_tknids', 'delim_tknids', 'clf_tknids'], ['_@_', ' _#_', ' _$_']) if task_type == 'sentsim' else (['start_tknids', 'clf_tknids'], ['_@_', ' _$_'])
special_tknids = _adjust_encoder(opts.model, None, special_tkns[1], ret_list=True)
special_tknids_args = dict(zip(special_tkns[0], special_tknids))
# Prepare data
task_path, task_dstype, task_cols, task_trsfm, task_extrsfm, task_extparms = TASK_PATH_MAP[opts.task], TASK_DS_MAP[opts.task], TASK_COL_MAP[opts.task], TASK_TRSFM[opts.task], TASK_EXT_TRSFM[opts.task], TASK_EXT_PARAMS[opts.task]
trsfms = task_trsfm[0] if len(task_trsfm) > 0 else []
trsfms_kwargs = task_trsfm[1] if len(task_trsfm) >= 2 else [{}] * len(task_trsfm)
train_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'train.tsv'), task_cols['X'], task_cols['y'], ENCODE_FUNC_MAP[opts.model], tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms else None, transforms=trsfms, transforms_kwargs=trsfms_kwargs)
lb_trsfm = [x['get_lb'] for x in task_trsfm[1] if 'get_lb' in x]
if (task_type == 'sentsim'):
class_count = None
elif len(lb_trsfm) > 0:
lb_df = train_ds.df[task_cols['y']].apply(lb_trsfm[0])
class_count = np.array([[1 if lb in y else 0 for lb in task_extparms.setdefault('binlb', train_ds.binlb).keys()] for y in lb_df]).sum(axis=0)
else:
lb_df = train_ds.df[task_cols['y']]
binlb = task_extparms.setdefault('binlb', train_ds.binlb)
class_count = lb_df.value_counts()[binlb.keys()].values
if (class_count is None):
class_weights = None
sampler = None
else:
class_weights = torch.Tensor(1.0 / class_count)
class_weights /= class_weights.sum()
sampler = WeightedRandomSampler(weights=class_weights, num_samples=opts.bsize, replacement=True)
train_loader = DataLoader(train_ds, batch_size=opts.bsize, shuffle=False, sampler=None, num_workers=opts.np, drop_last=opts.droplast)
dev_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'dev.tsv'), task_cols['X'], task_cols['y'], ENCODE_FUNC_MAP[opts.model], tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms else train_ds.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs)
dev_loader = DataLoader(dev_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
test_ds = task_dstype(os.path.join(DATA_PATH, task_path, 'test.tsv'), task_cols['X'], task_cols['y'], ENCODE_FUNC_MAP[opts.model], tokenizer=tokenizer, sep='\t', index_col=task_cols['index'], binlb=task_extparms['binlb'] if 'binlb' in task_extparms else train_ds.binlb, transforms=trsfms, transforms_kwargs=trsfms_kwargs)
test_loader = DataLoader(test_ds, batch_size=opts.bsize, shuffle=False, num_workers=opts.np)
# Load model
mdl_name = opts.model.lower().replace(' ', '_')
if (opts.resume):
clf = load_model(opts.resume)
if (use_gpu): clf = _handle_model(clf, dev_id=dev_id, distrb=opts.distrb)
else:
# Build model
lm_model = gen_mdl(opts.model, pretrained=True if type(opts.pretrained) is str and opts.pretrained.lower() == 'true' else opts.pretrained, use_gpu=use_gpu, distrb=opts.distrb, dev_id=dev_id)
clf = gen_clf(opts.model, lm_model=lm_model, task_type=task_type, num_lbs=len(train_ds.binlb) if train_ds.binlb else 1, mlt_trnsfmr=True if task_type=='sentsim' else False, use_gpu=use_gpu, distrb=opts.distrb, dev_id=dev_id, **dict([(k, getattr(opts, k)) if hasattr(opts, k) else (k, v) for k, v in CLF_EXT_PARAMS.setdefault(opts.model, {}).items()]))
# optimizer = torch.optim.SGD(clf.parameters(), lr=opts.lr, momentum=0.9)
optimizer = torch.optim.Adam(clf.parameters(), lr=opts.lr, weight_decay=opts.wdecay)
# Training
train(clf, optimizer, train_loader, special_tknids_args['clf_tknids'], pad_val=train_ds.binlb[task_extparms.setdefault('ypad_val', 0)] if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), weights=class_weights, lmcoef=opts.lmcoef, clipmaxn=opts.clipmaxn, epochs=opts.epochs, task_type=task_type, task_name=opts.task, mdl_name=mdl_name, use_gpu=use_gpu, devq=dev_id)
# Evaluation
eval(clf, dev_loader, dev_ds.binlbr, special_tknids_args['clf_tknids'], pad_val=train_ds.binlb[task_extparms.setdefault('ypad_val', 0)] if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=opts.task, ds_name='dev', mdl_name=mdl_name, use_gpu=use_gpu)
eval(clf, test_loader, test_ds.binlbr, special_tknids_args['clf_tknids'], pad_val=train_ds.binlb[task_extparms.setdefault('ypad_val', 0)] if task_type=='nmt' else task_extparms.setdefault('xpad_val', 0), task_type=task_type, task_name=opts.task, ds_name='test', mdl_name=mdl_name, use_gpu=use_gpu)
def train(clf, optimizer, dataset, clf_tknids, pad_val=0, weights=None, lmcoef=0.5, clipmaxn=0.25, epochs=1, task_type='mltc-clf', task_name='classification', mdl_name='sota', use_gpu=False, devq=None):
clf.train()
for epoch in range(epochs):
total_loss = 0
if task_type != 'entlmnt' and task_type != 'sentsim': dataset.dataset.rebalance()
for step, batch in enumerate(tqdm(dataset, desc='[%i/%i epoch(s)] Training batches' % (epoch + 1, epochs))):
optimizer.zero_grad()
if task_type == 'nmt':
idx, tkns_tnsr, lb_tnsr, record_idx = batch
record_idx = [list(map(int, x.split(SC))) for x in record_idx]
else:
idx, tkns_tnsr, lb_tnsr = batch
if task_type == 'entlmnt' or task_type == 'sentsim':
tkns_tnsr = [[[w.text for w in nlp(sents)] for sents in tkns_tnsr[x]] for x in [0,1]]
tkns_tnsr = [[s[:min(len(s), opts.maxlen)] + [''] * (opts.maxlen-len(s)) for s in tkns_tnsr[x]] for x in [0,1]]
pool_idx = [torch.LongTensor([len(s) - 1 for s in tkns_tnsr[x]]) for x in [0,1]]
tkns_tnsr = [batch_to_ids(tkns_tnsr[x]) for x in [0,1]]
if (use_gpu): tkns_tnsr, lb_tnsr, pool_idx, weights = [tkns_tnsr[x].to('cuda') for x in [0,1]] , lb_tnsr.to('cuda'), [pool_idx[x].to('cuda') for x in [0,1]], (weights if weights is None else weights.to('cuda'))
elif task_type == 'nmt':
tkns_tnsr, lb_tnsr = [s.split(SC) for s in tkns_tnsr if (type(s) is str and s != '') and len(s) > 0], [list(map(int, s.split(SC))) for s in lb_tnsr if (type(s) is str and s != '') and len(s) > 0]
if (len(tkns_tnsr) == 0 or len(lb_tnsr) == 0): continue
tkns_tnsr = [s[:min(len(s), opts.maxlen)] + [''] * (opts.maxlen-len(s)) for s in tkns_tnsr]
lb_tnsr = torch.LongTensor([s[:min(len(s), opts.maxlen)] + [pad_val] * (opts.maxlen-len(s)) for s in lb_tnsr])
pool_idx = torch.LongTensor([len(s) - 1 for s in tkns_tnsr])
tkns_tnsr = batch_to_ids(tkns_tnsr)
if (use_gpu): tkns_tnsr, lb_tnsr, pool_idx, weights = tkns_tnsr.to('cuda') , lb_tnsr.to('cuda'), pool_idx.to('cuda'), (weights if weights is None else weights.to('cuda'))
else:
tkns_tnsr = [[w.text for w in nlp(text)] for text in tkns_tnsr]
if clf.pool: tkns_tnsr = [s[:min(len(s), opts.maxlen)] + [''] * (opts.maxlen-len(s)) for s in tkns_tnsr]
pool_idx = torch.LongTensor([len(s) - 1 for s in tkns_tnsr])
tkns_tnsr = batch_to_ids(tkns_tnsr)
if (use_gpu): tkns_tnsr, lb_tnsr, pool_idx, weights = tkns_tnsr.to('cuda') , lb_tnsr.to('cuda'), pool_idx.to('cuda'), (weights if weights is None else weights.to('cuda'))
clf_loss, lm_loss = clf(input_ids=tkns_tnsr, pool_idx=pool_idx, labels=lb_tnsr.view(-1), weights=weights)
train_loss = clf_loss.mean() if lm_loss is None else (clf_loss.mean() + lmcoef * ((lm_loss.view(tkns_tnsr.size(0), -1) * mask_tnsr).sum(1) / (1e-12 + mask_tnsr.sum(1))).mean())
total_loss += train_loss.item()
torch.nn.utils.clip_grad_norm_(clf.parameters(), clipmaxn)
train_loss.backward()
optimizer.step()
print('Train loss in %i epoch(s): %f' % (epoch + 1, total_loss / (step + 1)))
save_model(clf, optimizer, '%s_%s.pth' % (task_name, mdl_name), devq=devq)
def eval(clf, dataset, binlbr, clf_tknids, pad_val=0, task_type='mltc-clf', task_name='classification', ds_name='', mdl_name='sota', clipmaxn=0.25, use_gpu=False):
clf.eval()
total_loss, indices, preds, probs, all_logits, trues, ds_name = 0, [], [], [], [], [], ds_name.strip()
if task_type not in ['entlmnt', 'sentsim', 'mltl-clf']: dataset.dataset.remove_mostfrqlb()
for step, batch in enumerate(tqdm(dataset, desc="%s batches" % ds_name.title() if ds_name else 'Evaluation')):
if task_type == 'nmt':
idx, tkns_tnsr, lb_tnsr, record_idx = batch
record_idx = [list(map(int, x.split(SC))) for x in record_idx]
else:
idx, tkns_tnsr, lb_tnsr = batch
indices.extend(idx if type(idx) is list else (idx.tolist() if type(idx) is torch.Tensor else list(idx)))
_lb_tnsr = lb_tnsr
if task_type == 'entlmnt' or task_type == 'sentsim':
tkns_tnsr = [[[w.text for w in nlp(sents)] for sents in tkns_tnsr[x]] for x in [0,1]]
tkns_tnsr = [[s[:min(len(s), opts.maxlen)] + [''] * (opts.maxlen-len(s)) for s in tkns_tnsr[x]] for x in [0,1]]
pool_idx = _pool_idx = [torch.LongTensor([len(s) - 1 for s in tkns_tnsr[x]]) for x in [0,1]]
tkns_tnsr = [batch_to_ids(tkns_tnsr[x]) for x in [0,1]]
if (use_gpu): tkns_tnsr, lb_tnsr, pool_idx= [tkns_tnsr[x].to('cuda') for x in [0,1]] , lb_tnsr.to('cuda'), [pool_idx[x].to('cuda') for x in [0,1]]
elif task_type == 'nmt':
tkns_tnsr, lb_tnsr = [s.split(SC) for s in tkns_tnsr if (type(s) is str and s != '') or len(s) > 0], [list(map(int, s.split(SC))) for s in lb_tnsr if (type(s) is str and s != '') or len(s) > 0]
if (len(tkns_tnsr) == 0 or len(lb_tnsr) == 0): continue
tkns_tnsr = [s[:min(len(s), opts.maxlen)] + [''] * (opts.maxlen-len(s)) for s in tkns_tnsr]
_lb_tnsr = lb_tnsr = torch.LongTensor([s[:min(len(s), opts.maxlen)] + [pad_val] * (opts.maxlen-len(s)) for s in lb_tnsr])
pool_idx = _pool_idx = torch.LongTensor([len(s) - 1 for s in tkns_tnsr])
tkns_tnsr = batch_to_ids(tkns_tnsr)
if (use_gpu): tkns_tnsr, lb_tnsr, pool_idx = tkns_tnsr.to('cuda') , lb_tnsr.to('cuda'), pool_idx.to('cuda')
else:
tkns_tnsr = [[w.text for w in nlp(text)] for text in tkns_tnsr]
if clf.pool: tkns_tnsr = [s[:min(len(s), opts.maxlen)] + [''] * (opts.maxlen-len(s)) for s in tkns_tnsr]
pool_idx = _pool_idx = torch.LongTensor([len(s) - 1 for s in tkns_tnsr])
tkns_tnsr = batch_to_ids(tkns_tnsr)
if (use_gpu): tkns_tnsr, lb_tnsr, pool_idx= tkns_tnsr.to('cuda') , lb_tnsr.to('cuda'), pool_idx.to('cuda')
with torch.no_grad():
logits = clf(tkns_tnsr, pool_idx, labels=None)
if task_type == 'mltc-clf' or task_type == 'entlmnt':
loss_func = nn.CrossEntropyLoss(reduction='none')
loss = loss_func(logits.view(-1, len(binlbr)), lb_tnsr.view(-1))
prob, pred = torch.softmax(logits, -1).max(-1)
elif task_type == 'mltl-clf':
loss_func = nn.BCEWithLogitsLoss(reduction='none')
loss = loss_func(logits.view(-1, len(binlbr)), lb_tnsr.view(-1, len(binlbr)).float())
prob = torch.sigmoid(logits).data
pred = (prob > opts.pthrshld).int()
elif task_type == 'nmt':
loss_func = nn.CrossEntropyLoss(reduction='none')
loss = loss_func(logits.view(-1, len(binlbr)), lb_tnsr.view(-1))
prob, pred = torch.softmax(logits, -1).max(-1)
elif task_type == 'sentsim':
loss_func = nn.MSELoss(reduction='none')
loss = loss_func(logits.view(-1), lb_tnsr.view(-1))
prob, pred = logits, logits
total_loss += loss.mean().item()
if task_type == 'nmt':
last_tkns = torch.arange(_lb_tnsr.size(0)) * _lb_tnsr.size(1) + _pool_idx
flat_tures, flat_preds, flat_probs = _lb_tnsr.view(-1).tolist(), pred.view(-1).detach().cpu().tolist(), prob.view(-1).detach().cpu().tolist()
flat_tures_set, flat_preds_set, flat_probs_set = set(flat_tures), set(flat_preds), set(flat_probs)
trues.append([[max(flat_tures_set, key=flat_tures[a:b][c[idx]:c[idx+1]].count) for idx in range(len(c)-1)] for a, b, c in zip(range(_lb_tnsr.size(0)), last_tkns, record_idx)])
preds.append([[max(flat_preds_set, key=flat_preds[a:b][c[idx]:c[idx+1]].count) for idx in range(len(c)-1)] for a, b, c in zip(range(_lb_tnsr.size(0)), last_tkns, record_idx)])
probs.append([[max(flat_probs_set, key=flat_probs[a:b][c[idx]:c[idx+1]].count) for idx in range(len(c)-1)] for a, b, c in zip(range(_lb_tnsr.size(0)), last_tkns, record_idx)])
else:
trues.append(_lb_tnsr.view(_lb_tnsr.size(0), -1).numpy() if task_type == 'mltl-clf' else _lb_tnsr.view(-1).detach().cpu().numpy())
preds.append(pred.detach().cpu().numpy())
probs.append(prob.detach().cpu().numpy())
all_logits.append(logits.view(_lb_tnsr.size(0), -1, logits.size(-1)).detach().cpu().numpy())
total_loss = total_loss / (step + 1)
print('Evaluation loss on %s dataset: %.2f' % (ds_name, total_loss))
all_logits = np.concatenate(all_logits, axis=0)
if task_type == 'nmt':
trues = list(itertools.chain.from_iterable(list(itertools.chain.from_iterable(trues))))
preds = list(itertools.chain.from_iterable(list(itertools.chain.from_iterable(preds))))
probs = list(itertools.chain.from_iterable(list(itertools.chain.from_iterable(probs))))
else:
trues = np.concatenate(trues, axis=0)
preds = np.concatenate(preds, axis=0)
probs = np.concatenate(probs, axis=0)
resf_prefix = ds_name.lower().replace(' ', '_')
with open('%s_preds_trues.pkl' % resf_prefix, 'wb') as fd:
pickle.dump((trues, preds, probs, all_logits), fd)
if any(task_type == t for t in ['mltc-clf', 'entlmnt', 'nmt']):
preds = preds
elif task_type == 'mltl-clf':
preds = preds
elif task_type == 'sentsim':
preds = np.squeeze(preds)
if task_type == 'sentsim':
if (np.isnan(preds).any()):
print('Predictions contain NaN values! Please try to decrease the learning rate!')
return
metric_names, metrics_funcs = ['Mean Absolute Error', 'Mean Squared Error', 'Mean Squared Log Error', 'Median Absolute Error', 'R2', 'Pearson Correlation'], [metrics.mean_absolute_error, metrics.mean_squared_error, metrics.mean_squared_log_error, metrics.median_absolute_error, metrics.r2_score, _prsn_cor]
perf_df = pd.DataFrame(dict([(k, [f(trues, preds)]) for k, f in zip(metric_names, metrics_funcs)]), index=[mdl_name])[metric_names]
elif task_type == 'mltl-clf':
perf_df = pd.DataFrame(metrics.classification_report(trues, preds, target_names=[binlbr[x] for x in binlbr.keys()], output_dict=True)).T[['precision', 'recall', 'f1-score', 'support']]
else:
perf_df = pd.DataFrame(metrics.classification_report(trues, preds, target_names=[binlbr[x] for x in binlbr.keys() if x in preds or x in trues], output_dict=True)).T[['precision', 'recall', 'f1-score', 'support']]
print('Results for %s dataset is:\n%s' % (ds_name.title(), perf_df))
perf_df.to_excel('perf_%s.xlsx' % resf_prefix)
if (type(indices[0]) is str and SC in indices[0]):
indices = list(itertools.chain.from_iterable([list(map(int, idx.split(SC))) for idx in indices if idx]))
try:
dataset.dataset.fill_labels(preds, saved_path='pred_%s.csv' % resf_prefix, index=indices)
except Exception as e:
print(e)
def _prsn_cor(trues, preds):
return np.corrcoef(trues, preds)[0, 1]
def save_model(model, optimizer, fpath='checkpoint.pth', in_wrapper=False, devq=None, **kwargs):
print('Saving trained model...')
if in_wrapper: model = model.module
model = model.cpu() if devq and len(devq) > 0 else model
checkpoint = {'model': model, 'state_dict': model.state_dict(), 'optimizer':optimizer.state_dict()}
checkpoint.update(kwargs)
torch.save(checkpoint, fpath)
def load_model(mdl_path):
print('Loading previously trained model...')
checkpoint = torch.load(mdl_path, map_location='cpu')
model = checkpoint['model']
model.load_state_dict(checkpoint['state_dict'])
return model
def _handle_model(model, dev_id=None, distrb=False):
if (distrb):
if (type(dev_id) is list):
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=dev_id)
else:
torch.cuda.set_device(dev_id)
model = model.cuda(dev_id)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[dev_id])
raise NotImplementedError
elif (dev_id is not None):
if (type(dev_id) is list):
model.cuda()
model = torch.nn.DataParallel(model, device_ids=dev_id)
else:
torch.cuda.set_device(dev_id)
model = model.cuda(dev_id)
return model
def main():
if any(opts.task == t for t in ['bc5cdr-chem', 'bc5cdr-dz', 'shareclefe', 'ddi', 'chemprot', 'i2b2', 'hoc', 'mednli', 'biosses', 'clnclsts']):
main_func = classify
else:
return
if (opts.distrb):
if (opts.np > 1): # Multi-process multiple GPU
import torch.multiprocessing as mp
mp.spawn(main_func, nprocs=len(opts.devq))
else: # Single-process multiple GPU
main_func(opts.devq if len(opts.devq) > 1 else opts.devq[0])
elif (opts.devq): # Single-process
main_func(opts.devq if len(opts.devq) > 1 else opts.devq[0])
else:
main_func(None) # CPU
if __name__ == '__main__':
# Logging setting
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
# Parse commandline arguments
op = OptionParser()
op.add_option('-k', '--kfold', default=10, action='store', type='int', dest='kfold', help='indicate the K fold cross validation')
op.add_option('-p', '--pid', default=0, action='store', type='int', dest='pid', help='indicate the process ID')
op.add_option('-n', '--np', default=1, action='store', type='int', dest='np', help='indicate the number of processes used for training')
op.add_option('-f', '--fmt', default='npz', help='data stored format: csv, npz, or h5 [default: %default]')
op.add_option('-s', '--spfmt', default='csr', help='sparse data stored format: csc or csr [default: %default]')
op.add_option('-a', '--avg', default='micro', help='averaging strategy for performance metrics: micro or macro [default: %default]')
op.add_option('-j', '--epochs', default=1, action='store', type='int', dest='epochs', help='indicate the epoch used in deep learning')
op.add_option('-z', '--bsize', default=64, action='store', type='int', dest='bsize', help='indicate the batch size used in deep learning')
op.add_option('-o', '--omp', action='store_true', dest='omp', default=False, help='use openmp multi-thread')
op.add_option('-g', '--gpunum', default=1, action='store', type='int', dest='gpunum', help='indicate the gpu device number')
op.add_option('-q', '--gpuq', dest='gpuq', help='prefered gpu device queue [template: DEVICE_ID1,DEVICE_ID2,...,DEVICE_IDn]')
op.add_option('--gpumem', default=0.5, action='store', type='float', dest='gpumem', help='indicate the per process gpu memory fraction')
op.add_option('--crsdev', action='store_true', dest='crsdev', default=False, help='whether to use heterogeneous devices')
op.add_option('--distrb', action='store_true', dest='distrb', default=False, help='whether to distribute data over multiple devices')
op.add_option('--distbknd', default='nccl', action='store', dest='distbknd', help='distribute framework backend')
op.add_option('--disturl', default='env://', action='store', dest='disturl', help='distribute framework url')
op.add_option('--earlystop', default=False, action='store_true', dest='earlystop', help='whether to use early stopping')
op.add_option('--es_patience', default=5, action='store', type='int', dest='es_patience', help='indicate the tolerance time for training metric violation')
op.add_option('--es_delta', default=float(5e-3), action='store', type='float', dest='es_delta', help='indicate the minimum delta of early stopping')
op.add_option('--options_path', dest='options_path', help='ELMo option file')
op.add_option('--weights_path', dest='weights_path', help='ELMo weight file')
op.add_option('--maxlen', default=128, action='store', type='int', dest='maxlen', help='indicate the maximum sequence length for each samples')
op.add_option('--maxtrial', default=50, action='store', type='int', dest='maxtrial', help='maximum time to try')
op.add_option('--initln', default=False, action='store_true', dest='initln', help='whether to initialize the linear layer')
op.add_option('--initln_mean', default=0., action='store', type='float', dest='initln_mean', help='indicate the mean of the parameters in linear model when Initializing')
op.add_option('--initln_std', default=0.02, action='store', type='float', dest='initln_std', help='indicate the standard deviation of the parameters in linear model when Initializing')
op.add_option('--weight_class', default=False, action='store_true', dest='weight_class', help='whether to drop the last incompleted batch')
op.add_option('--droplast', default=False, action='store_true', dest='droplast', help='whether to drop the last incompleted batch')
op.add_option('--do_norm', default=False, action='store_true', dest='do_norm', help='whether to do normalization')
op.add_option('--norm_type', default='batch', action='store', dest='norm_type', help='normalization layer class')
op.add_option('--do_lastdrop', default=False, action='store_true', dest='do_lastdrop', help='whether to apply dropout to the last layer')
op.add_option('--lm_loss', default=False, action='store_true', dest='lm_loss', help='whether to apply dropout to the last layer')
op.add_option('--do_crf', default=False, action='store_true', dest='do_crf', help='whether to apply CRF layer')
op.add_option('--fchdim', default=0, action='store', type='int', dest='fchdim', help='indicate the dimensions of the hidden layers in the Embedding-based classifier, 0 means using only one linear layer')
op.add_option('--pool', dest='pool', help='indicate the pooling strategy when selecting features: max or avg')
op.add_option('--seq2seq', dest='seq2seq', help='indicate the seq2seq strategy when converting sequences of embeddings into a vector')
op.add_option('--seq2vec', dest='seq2vec', help='indicate the seq2vec strategy when converting sequences of embeddings into a vector: pytorch-lstm, cnn, or cnn_highway')
op.add_option('--ssfunc', dest='sentsim_func', help='indicate the sentence similarity metric')
op.add_option('--lr', default=float(1e-3), action='store', type='float', dest='lr', help='indicate the learning rate of the optimizer')
op.add_option('--wdecay', default=float(1e-5), action='store', type='float', dest='wdecay', help='indicate the weight decay of the optimizer')
op.add_option('--lmcoef', default=0.5, action='store', type='float', dest='lmcoef', help='indicate the coefficient of the language model loss when fine tuning')
op.add_option('--pdrop', default=0.2, action='store', type='float', dest='pdrop', help='indicate the dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler')
op.add_option('--pthrshld', default=0.5, action='store', type='float', dest='pthrshld', help='indicate the threshold for predictive probabilitiy')
op.add_option('--clipmaxn', default=0.25, action='store', type='float', dest='clipmaxn', help='indicate the max norm of the gradients')
op.add_option('--resume', action='store', dest='resume', help='resume training model file')
op.add_option('-i', '--input', help='input dataset')
op.add_option('-w', '--cache', default='.cache', help='the location of cache files')
op.add_option('-u', '--task', default='ddi', type='str', dest='task', help='the task name [default: %default]')
op.add_option('-m', '--model', default='elmo', type='str', dest='model', help='the model to be validated')
op.add_option('--pretrained', dest='pretrained', help='pretrained model file')
op.add_option('-v', '--verbose', action='store_true', dest='verbose', default=False, help='display detailed information')
op.add_option('--data_dir', dest='data_dir', help='indicate the data path')
(opts, args) = op.parse_args()
if len(args) > 0:
op.print_help()
op.error('Please input options instead of arguments.')
sys.exit(1)
if (opts.gpuq is not None and not opts.gpuq.strip().isspace()):
opts.gpuq = list(range(torch.cuda.device_count())) if (opts.gpuq == 'auto' or opts.gpuq == 'all') else [int(x) for x in opts.gpuq.split(',') if x]
elif (opts.gpunum > 0):
opts.gpuq = list(range(opts.gpunum))
else:
opts.gpuq = []
if (opts.gpuq and opts.gpunum > 0):
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(map(str, opts.gpuq[:opts.gpunum]))
setattr(opts, 'devq', list(range(torch.cuda.device_count())))
else:
setattr(opts, 'devq', None)
if (opts.options_path): LM_PARAMS['ELMo']['options_path'] = opts.options_path
if (opts.weights_path): LM_PARAMS['ELMo']['weights_path'] = opts.weights_path
if (opts.data_dir): DATA_PATH = opts.data_dir
main()
| 65,502 | 71.219405 | 959 | py |
RotationCorrection | RotationCorrection-main/Codes/tf_spatial_transform_local.py | import tensorflow as tf
import numpy as np
import math
import tensorDLT_local
from keras.layers import UpSampling2D
import constant
grid_w = constant.GRID_W
grid_h = constant.GRID_H
def transformer(U, theta, name='SpatialTransformer', **kwargs):
"""Spatial Transformer Layer
Implements a spatial transformer layer as described in [1]_.
Based on [2]_ and edited by David Dao for Tensorflow.
Parameters
----------
U : float
The output of a convolutional net should have the
shape [num_batch, height, width, num_channels].
theta: float
The output of the
localisation network should be [num_batch, 6].
out_size: tuple of two ints
The size of the output of the network (height, width)
References
----------
.. [1] Spatial Transformer Networks
Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu
Submitted on 5 Jun 2015
.. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py
Notes
-----
To initialize the network to the identity transform init
``theta`` to :
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
theta = tf.Variable(initial_value=identity)
"""
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(
tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
def _interpolate(im, x, y, out_size):
with tf.variable_scope('_interpolate'):
# constants
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# scale indices from [-1, 1] to [0, width/height]
#x = (x + 1.0)*(width_f) / 2.0
#y = (y + 1.0)*(height_f) / 2.0
# do sampling
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width*height
base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat image and restore
# channels dim
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.cast(im_flat, 'float32')
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
# and finally calculate interpolated values
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)
wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)
wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)
wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)
output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
return output
#input: batch_size*(grid_h+1)*(grid_w+1)*2
#output: batch_size*grid_h*grid_w*9
def get_Hs(theta, width, height):
with tf.variable_scope('get_Hs'):
num_batch = tf.shape(theta)[0]
h = height / grid_h
w = width / grid_w
Hs = []
for i in range(grid_h):
for j in range(grid_w):
hh = i * h
ww = j * w
ori = tf.tile(tf.constant([ww, hh, ww + w, hh, ww, hh + h, ww + w, hh + h], shape=[1, 8], dtype=tf.float32), multiples=[num_batch, 1])
#id = i * (grid_w + 1) + grid_w
tar = tf.concat([tf.slice(theta, [0, i, j, 0], [-1, 1, 1, -1]), tf.slice(theta, [0, i, j + 1, 0], [-1, 1, 1, -1]),
tf.slice(theta, [0, i + 1, j, 0], [-1, 1, 1, -1]), tf.slice(theta, [0, i + 1, j + 1, 0], [-1, 1, 1, -1])], axis=1)
tar = tf.reshape(tar, [num_batch, 8])
#tar = tf.Print(tar, [tf.slice(ori, [0, 0], [1, -1])],message="[ori--i:"+str(i)+",j:"+str(j)+"]:", summarize=100,first_n=5)
#tar = tf.Print(tar, [tf.slice(tar, [0, 0], [1, -1])],message="[tar--i:"+str(i)+",j:"+str(j)+"]:", summarize=100,first_n=5)
Hs.append(tf.reshape(tensorDLT_local.solve_DLT(ori, tar), [num_batch, 1, 9]))
Hs = tf.reshape(tf.concat(Hs, axis=1), [num_batch, grid_h, grid_w, 9], name='Hs')
return Hs
def _meshgrid2(height, width, sh, eh, sw, ew):
hn = eh - sh + 1
wn = ew - sw + 1
x_t = tf.matmul(tf.ones(shape=tf.stack([hn, 1])),
tf.transpose(tf.expand_dims(tf.slice(tf.linspace(-1.0, 1.0, width), [sw], [wn]), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.slice(tf.linspace(-1.0, 1.0, height), [sh], [hn]), 1),
tf.ones(shape=tf.stack([1, wn])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([x_t_flat, y_t_flat, ones], 0)
return grid
def _meshgrid(height, width):
#x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
# tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
#y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
# tf.ones(shape=tf.stack([1, width])))
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(0., tf.cast(width, 'float32')-1.001, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(0., tf.cast(height, 'float32')-1.001, height), 1),
tf.ones(shape=tf.stack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([x_t_flat, y_t_flat, ones], 0)
return grid
def _transform3(theta, input_dim):
with tf.variable_scope('_transform'):
num_batch = tf.shape(input_dim)[0]
height = tf.shape(input_dim)[1]
width = tf.shape(input_dim)[2]
num_channels = tf.shape(input_dim)[3]
#widthfff = 512.
#height
width_float = 512.
height_float = 384.
#M = np.array([[width_float / 2.0, 0., width_float / 2.0],
# [0., height_float / 2.0, height_float / 2.0],
# [0., 0., 1.]]).astype(np.float32)
#M_tensor = tf.constant(M, tf.float32)
#M_tile = tf.tile(tf.expand_dims(M_tensor, [0]), [num_batch, 1, 1])
#M_inv = np.linalg.inv(M)
#M_tensor_inv = tf.constant(M_inv, tf.float32)
#M_tile_inv = tf.tile(tf.expand_dims(M_tensor_inv, [0]), [num_batch, 1, 1])
theta = tf.cast(theta, 'float32')
Hs = get_Hs(theta, width_float, height_float)
gh = tf.cast(height / grid_h, 'int32')
gw =tf.cast(width / grid_w, 'int32')
##########################################
print("Hs")
print(Hs.shape)
H_array = UpSampling2D(size=(384/grid_h, 512/grid_w))(Hs)
H_array = tf.reshape(H_array, [-1, 3, 3])
##########################################
out_height = height
out_width = width
grid = _meshgrid(out_height, out_width)
grid = tf.expand_dims(grid, 0)
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.stack([num_batch])) # stack num_batch grids
grid = tf.reshape(grid, tf.stack([num_batch, 3, -1]))
print("grid")
print(grid.shape)
### [bs, 3, N]
grid = tf.expand_dims(tf.transpose(grid, [0, 2, 1]),3)
### [bs, 3, N] -> [bs, N, 3] -> [bs, N, 3, 1]
grid = tf.reshape(grid, [-1, 3, 1])
### [bs*N, 3, 1]
grid_row = tf.reshape(grid, [-1, 3])
print("grid_row")
print(grid_row.shape)
x_s = tf.reduce_sum(tf.multiply(H_array[:,0,:], grid_row), 1)
y_s = tf.reduce_sum(tf.multiply(H_array[:,1,:], grid_row), 1)
t_s = tf.reduce_sum(tf.multiply(H_array[:,2,:], grid_row), 1)
# The problem may be here as a general homo does not preserve the parallelism
# while an affine transformation preserves it.
t_s_flat = tf.reshape(t_s, [-1])
t_1 = tf.ones(shape = tf.shape(t_s_flat))
t_0 = tf.zeros(shape = tf.shape(t_s_flat))
sign_t = tf.where(t_s_flat >= 0, t_1, t_0) * 2 - 1
t_s_flat = t_s_flat + sign_t*1e-8
x_s_flat = tf.reshape(x_s, [-1]) / t_s_flat
y_s_flat = tf.reshape(y_s, [-1]) / t_s_flat
out_size = (height, width)
input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size)
#mask_transformed = _interpolate(mask, x_s_flat, y_s_flat, out_size)
warp_image = tf.reshape(input_transformed, tf.shape(input_dim), name='output_img')
#warp_mask = tf.reshape(mask_transformed, tf.stack([num_batch, height, width, num_channels]), name='output_mask')
return warp_image#, warp_mask
with tf.variable_scope(name):
U = U - 1.
warp_image = _transform3(theta, U)
warp_image = warp_image + 1.
warp_image = tf.clip_by_value(warp_image, -1, 1)
return warp_image
| 10,939 | 39.07326 | 154 | py |
RotationCorrection | RotationCorrection-main/Codes/tf_mesh2flow.py | import tensorflow as tf
import numpy as np
import math
import tensorDLT_local
from keras.layers import UpSampling2D
import constant
grid_w = constant.GRID_W
grid_h = constant.GRID_H
def mesh2flow(mesh, name='Mesh2Flow', **kwargs):
"""Spatial Transformer Layer
Implements a spatial transformer layer as described in [1]_.
Based on [2]_ and edited by David Dao for Tensorflow.
Parameters
----------
U : float
The output of a convolutional net should have the
shape [num_batch, height, width, num_channels].
theta: float
The output of the
localisation network should be [num_batch, 6].
out_size: tuple of two ints
The size of the output of the network (height, width)
References
----------
.. [1] Spatial Transformer Networks
Max Jaderberg, Karen Simonyan, Andrew Zisserman, Koray Kavukcuoglu
Submitted on 5 Jun 2015
.. [2] https://github.com/skaae/transformer_network/blob/master/transformerlayer.py
Notes
-----
To initialize the network to the identity transform init
``theta`` to :
identity = np.array([[1., 0., 0.],
[0., 1., 0.]])
identity = identity.flatten()
theta = tf.Variable(initial_value=identity)
"""
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(
tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
return tf.reshape(x, [-1])
def _interpolate(im, x, y, out_size):
with tf.variable_scope('_interpolate'):
# constants
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
# scale indices from [-1, 1] to [0, width/height]
#x = (x + 1.0)*(width_f) / 2.0
#y = (y + 1.0)*(height_f) / 2.0
# do sampling
x0 = tf.cast(tf.floor(x), 'int32')
x1 = x0 + 1
y0 = tf.cast(tf.floor(y), 'int32')
y1 = y0 + 1
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = width*height
base = _repeat(tf.range(num_batch)*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat image and restore
# channels dim
im_flat = tf.reshape(im, tf.stack([-1, channels]))
im_flat = tf.cast(im_flat, 'float32')
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
# and finally calculate interpolated values
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f-x) * (y1_f-y)), 1)
wb = tf.expand_dims(((x1_f-x) * (y-y0_f)), 1)
wc = tf.expand_dims(((x-x0_f) * (y1_f-y)), 1)
wd = tf.expand_dims(((x-x0_f) * (y-y0_f)), 1)
output = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
return output
#input: batch_size*(grid_h+1)*(grid_w+1)*2
#output: batch_size*grid_h*grid_w*9
def get_Hs(theta, width, height):
with tf.variable_scope('get_Hs'):
num_batch = tf.shape(theta)[0]
h = height / grid_h
w = width / grid_w
Hs = []
for i in range(grid_h):
for j in range(grid_w):
hh = i * h
ww = j * w
ori = tf.tile(tf.constant([ww, hh, ww + w, hh, ww, hh + h, ww + w, hh + h], shape=[1, 8], dtype=tf.float32), multiples=[num_batch, 1])
#id = i * (grid_w + 1) + grid_w
tar = tf.concat([tf.slice(theta, [0, i, j, 0], [-1, 1, 1, -1]), tf.slice(theta, [0, i, j + 1, 0], [-1, 1, 1, -1]),
tf.slice(theta, [0, i + 1, j, 0], [-1, 1, 1, -1]), tf.slice(theta, [0, i + 1, j + 1, 0], [-1, 1, 1, -1])], axis=1)
tar = tf.reshape(tar, [num_batch, 8])
#tar = tf.Print(tar, [tf.slice(ori, [0, 0], [1, -1])],message="[ori--i:"+str(i)+",j:"+str(j)+"]:", summarize=100,first_n=5)
#tar = tf.Print(tar, [tf.slice(tar, [0, 0], [1, -1])],message="[tar--i:"+str(i)+",j:"+str(j)+"]:", summarize=100,first_n=5)
Hs.append(tf.reshape(tensorDLT_local.solve_DLT(ori, tar), [num_batch, 1, 9]))
Hs = tf.reshape(tf.concat(Hs, axis=1), [num_batch, grid_h, grid_w, 9], name='Hs')
return Hs
def _meshgrid2(height, width, sh, eh, sw, ew):
hn = eh - sh + 1
wn = ew - sw + 1
x_t = tf.matmul(tf.ones(shape=tf.stack([hn, 1])),
tf.transpose(tf.expand_dims(tf.slice(tf.linspace(-1.0, 1.0, width), [sw], [wn]), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.slice(tf.linspace(-1.0, 1.0, height), [sh], [hn]), 1),
tf.ones(shape=tf.stack([1, wn])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([x_t_flat, y_t_flat, ones], 0)
return grid
def _meshgrid(height, width):
#x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
# tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
#y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
# tf.ones(shape=tf.stack([1, width])))
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])),
tf.transpose(tf.expand_dims(tf.linspace(0., tf.cast(width, 'float32')-1.001, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace(0., tf.cast(height, 'float32')-1.001, height), 1),
tf.ones(shape=tf.stack([1, width])))
x_t_flat = tf.reshape(x_t, (1, -1))
y_t_flat = tf.reshape(y_t, (1, -1))
ones = tf.ones_like(x_t_flat)
grid = tf.concat([x_t_flat, y_t_flat, ones], 0)
return grid
def _transform3(theta):
with tf.variable_scope('_transform'):
num_batch = tf.shape(theta)[0]
height = 384
width = 512
width_float = 512.
height_float = 384.
theta = tf.cast(theta, 'float32')
Hs = get_Hs(theta, width_float, height_float)
gh = tf.cast(height / grid_h, 'int32')
gw =tf.cast(width / grid_w, 'int32')
##########################################
print("Hs")
print(Hs.shape)
H_array = UpSampling2D(size=(384/grid_h, 512/grid_w))(Hs)
H_array = tf.reshape(H_array, [-1, 3, 3])
##########################################
out_height = height
out_width = width
grid = _meshgrid(out_height, out_width)
grid = tf.expand_dims(grid, 0)
grid = tf.reshape(grid, [-1])
grid = tf.tile(grid, tf.stack([num_batch])) # stack num_batch grids
grid = tf.reshape(grid, tf.stack([num_batch, 3, -1]))
print("grid")
print(grid.shape)
### [bs, 3, N]
grid = tf.expand_dims(tf.transpose(grid, [0, 2, 1]),3)
### [bs, 3, N] -> [bs, N, 3] -> [bs, N, 3, 1]
grid = tf.reshape(grid, [-1, 3, 1])
### [bs*N, 3, 1]
print("grid")
print(grid.shape)
grid_row = tf.reshape(grid, [-1, 3])
print("grid_row")
print(grid_row.shape)
x_s = tf.reduce_sum(tf.multiply(H_array[:,0,:], grid_row), 1)
y_s = tf.reduce_sum(tf.multiply(H_array[:,1,:], grid_row), 1)
t_s = tf.reduce_sum(tf.multiply(H_array[:,2,:], grid_row), 1)
t_s_flat = tf.reshape(t_s, [-1])
t_1 = tf.ones(shape = tf.shape(t_s_flat))
t_0 = tf.zeros(shape = tf.shape(t_s_flat))
sign_t = tf.where(t_s_flat >= 0, t_1, t_0) * 2 - 1
t_s_flat = t_s_flat + sign_t*1e-8
x_s_flat = tf.reshape(x_s, [-1]) / t_s_flat
y_s_flat = tf.reshape(y_s, [-1]) / t_s_flat
print("x_s_flat")
print(x_s_flat.shape)
flow_x = x_s_flat - grid_row[:,0]
flow_y = y_s_flat - grid_row[:,1]
flow = tf.stack([flow_x, flow_y], 1)
flow = tf.reshape(flow, tf.stack([num_batch, height, width, 2]))
return flow
with tf.variable_scope(name):
flow = _transform3(mesh)
return flow
| 9,924 | 37.02682 | 154 | py |
graph_attention_pool | graph_attention_pool-master/main.py | import argparse
import random
import datetime
from torchvision import transforms
from graphdata import *
from train_test import *
import warnings
warnings.filterwarnings("once")
def parse_args():
parser = argparse.ArgumentParser(description='Run experiments with Graph Neural Networks')
# Dataset
parser.add_argument('-D', '--dataset', type=str, default='colors-3',
choices=['colors-3', 'colors-4', 'colors-8', 'colors-16', 'colors-32',
'triangles', 'mnist', 'mnist-75sp', 'TU'],
help='colors-n means the colors dataset with n-dimensional features; TU is any dataset from https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets')
parser.add_argument('-d', '--data_dir', type=str, default='./data', help='path to the dataset')
# Hyperparameters
parser.add_argument('--epochs', type=int, default=None, help='# of the epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size for training data')
parser.add_argument('--lr', type=float, default=0.001, help='Learning Rate')
parser.add_argument('--lr_decay_step', type=str, default=None, help='number of epochs after which to reduce learning rate')
parser.add_argument('--wdecay', type=float, default=1e-4, help='weight decay')
parser.add_argument('--dropout', type=float, default=0, help='dropout rate')
parser.add_argument('-f', '--filters', type=str, default='64,64,64', help='number of filters in each graph layer')
parser.add_argument('-K', '--filter_scale', type=int, default=1, help='filter scale (receptive field size), must be > 0; 1 for GCN or GIN')
parser.add_argument('--n_hidden', type=int, default=0, help='number of hidden units inside the graph layer')
parser.add_argument('--aggregation', type=str, default='mean', choices=['mean', 'sum'], help='neighbors aggregation inside the graph layer')
parser.add_argument('--readout', type=str, default=None, choices=['mean', 'sum', 'max'], help='type of global pooling over all nodes')
parser.add_argument('--kl_weight', type=float, default=100, help='weight of the KL term in the loss')
parser.add_argument('--pool', type=str, default=None, help='type of pooling between layers, None for global pooling only')
parser.add_argument('--pool_arch', type=str, default=None, help='pooling layers architecture defining whether to use fully-connected layers or GNN and to which layer to attach (e.g.: fc_prev, gnn_prev, fc_curr, gnn_curr, fc_prev_32)')
parser.add_argument('--init', type=str, default='normal', choices=['normal', 'uniform'], help='distribution used for initialization for the attention model')
parser.add_argument('--scale', type=str, default='1', help='initialized weights scale for the attention model, set to None to use PyTorch default init')
parser.add_argument('--degree_feature', action='store_true', default=False, help='use degree features (only for the Triangles dataset)')
# TU datasets arguments
parser.add_argument('--n_nodes', type=int, default=25, help='maximum number of nodes in the training set for collab, proteins and dd (35 for collab, 25 for proteins, 200 or 300 for dd)')
parser.add_argument('--cv_folds', type=int, default=5, help='number of folds for cross-validating hyperparameters for collab, proteins and dd (5 or 10 shows similar results, 5 is faster)')
parser.add_argument('--cv_threads', type=int, default=5, help='number of parallel threads for cross-validation')
parser.add_argument('--tune_init', action='store_true', default=False, help='do not tune initialization hyperparameters')
parser.add_argument('--ax', action='store_true', default=False, help='use AX for hyperparameter optimization (recommended)')
parser.add_argument('--ax_trials', type=int, default=30, help='number of AX trials (hyperparameters optimization steps)')
parser.add_argument('--cv', action='store_true', default=False, help='run in the cross-validation mode')
parser.add_argument('--seed_data', type=int, default=111, help='random seed for data splits')
# Image datasets arguments
parser.add_argument('--img_features', type=str, default='mean,coord', help='image features to use as node features')
parser.add_argument('--img_noise_levels', type=str, default=None,
help='Gaussian noise standard deviations for grayscale and color image features')
# Auxiliary arguments
parser.add_argument('--validation', action='store_true', default=False, help='run in the validation mode')
parser.add_argument('--debug', action='store_true', default=False, help='evaluate on the test set after each epoch (only for visualization purposes)')
parser.add_argument('--eval_attn_train', action='store_true', default=False, help='evaluate attention and save coefficients on the training set for models without learnable attention')
parser.add_argument('--eval_attn_test', action='store_true', default=False, help='evaluate attention and save coefficients on the test set for models without learnable attention')
parser.add_argument('--test_batch_size', type=int, default=100, help='batch size for test data')
parser.add_argument('--alpha_ws', type=str, default=None, help='attention labels that will be used for (weak)supervision')
parser.add_argument('--log_interval', type=int, default=400, help='print interval')
parser.add_argument('--results', type=str, default='./results', help='directory to save model checkpoints and other results, set to None to prevent saving anything')
parser.add_argument('--resume', type=str, default=None, help='checkpoint to load the model and optimzer states from and continue training')
parser.add_argument('--device', type=str, default='cuda', choices=['cuda', 'cpu'], help='cuda/cpu')
parser.add_argument('--seed', type=int, default=111, help='random seed for model parameters')
parser.add_argument('--threads', type=int, default=0, help='number of threads for data loader')
args = parser.parse_args()
# Set default number of epochs and learning rate schedules and other hyperparameters
if args.readout in [None, 'None']:
args.readout = 'max' # global max pooling for all datasets except for COLORS
set_default_lr_decay_step = args.lr_decay_step in [None, 'None']
if args.epochs in [None, 'None']:
if args.dataset.find('mnist') >= 0:
args.epochs = 30
if set_default_lr_decay_step:
args.lr_decay_step = '20,25'
elif args.dataset == 'triangles':
args.epochs = 100
if set_default_lr_decay_step:
args.lr_decay_step = '85,95'
elif args.dataset == 'TU':
args.epochs = 50
if set_default_lr_decay_step:
args.lr_decay_step = '25,35,45'
elif args.dataset.find('color') >= 0:
if args.readout in [None, 'None']:
args.readout = 'sum'
if args.pool in [None, 'None']:
args.epochs = 100
if set_default_lr_decay_step:
args.lr_decay_step = '90'
else:
args.epochs = 300
if set_default_lr_decay_step:
args.lr_decay_step = '280'
else:
raise NotImplementedError(args.dataset)
args.lr_decay_step = list(map(int, args.lr_decay_step.split(',')))
args.filters = list(map(int, args.filters.split(',')))
args.img_features = args.img_features.split(',')
args.img_noise_levels = None if args.img_noise_levels in [None, 'None'] else list(map(float, args.img_noise_levels.split(',')))
args.pool = None if args.pool in [None, 'None'] else args.pool.split('_')
args.pool_arch = None if args.pool_arch in [None, 'None'] else args.pool_arch.split('_')
try:
args.scale = float(args.scale)
except:
args.scale = None
args.torch = torch.__version__
for arg in vars(args):
print(arg, getattr(args, arg))
return args
def load_synthetic(args):
train_dataset = SyntheticGraphs(args.data_dir, args.dataset, 'train', degree_feature=args.degree_feature,
attn_coef=args.alpha_ws)
test_dataset = SyntheticGraphs(args.data_dir, args.dataset, 'val' if args.validation else 'test',
degree_feature=args.degree_feature)
loss_fn = mse_loss
collate_fn = collate_batch
in_features = train_dataset.feature_dim
out_features = 1
return train_dataset, test_dataset, loss_fn, collate_fn, in_features, out_features
def load_mnist(args):
use_mean_px = 'mean' in args.img_features
use_coord = 'coord' in args.img_features
assert use_mean_px, ('this mode is not well supported', use_mean_px)
gt_attn_threshold = 0 if (args.pool is not None and args.pool[1] in ['gt'] and args.filter_scale > 1) else 0.5
if args.dataset == 'mnist':
train_dataset = MNIST(args.data_dir, train=True, download=True, transform=transforms.ToTensor(),
attn_coef=args.alpha_ws)
else:
train_dataset = MNIST75sp(args.data_dir, split='train', use_mean_px=use_mean_px, use_coord=use_coord,
gt_attn_threshold=gt_attn_threshold, attn_coef=args.alpha_ws)
noises, color_noises = None, None
if args.validation:
n_val = 5000
if args.dataset == 'mnist':
train_dataset.train_data = train_dataset.train_data[:-n_val]
train_dataset.train_labels = train_dataset.train_labels[:-n_val]
test_dataset = MNIST(args.data_dir, train=True, download=True, transform=transforms.ToTensor())
test_dataset.train_data = train_dataset.train_data[-n_val:]
test_dataset.train_labels = train_dataset.train_labels[-n_val:]
else:
train_dataset.train_val_split(np.arange(0, train_dataset.n_samples - n_val))
test_dataset = MNIST75sp(args.data_dir, split='train', use_mean_px=use_mean_px, use_coord=use_coord,
gt_attn_threshold=gt_attn_threshold)
test_dataset.train_val_split(np.arange(train_dataset.n_samples - n_val, train_dataset.n_samples))
else:
noise_file = pjoin(args.data_dir, '%s_noise.pt' % args.dataset.replace('-', '_'))
color_noise_file = pjoin(args.data_dir, '%s_color_noise.pt' % args.dataset.replace('-', '_'))
if args.dataset == 'mnist':
test_dataset = MNIST(args.data_dir, train=False, download=True, transform=transforms.ToTensor())
noise_shape = (len(test_dataset.test_labels), 28 * 28)
else:
test_dataset = MNIST75sp(args.data_dir, split='test', use_mean_px=use_mean_px, use_coord=use_coord,
gt_attn_threshold=gt_attn_threshold)
noise_shape = (len(test_dataset.labels), 75)
# Generate/load noise (save it to make reproducible)
noises = load_save_noise(noise_file, noise_shape)
color_noises = load_save_noise(color_noise_file, (noise_shape[0], noise_shape[1], 3))
if args.dataset == 'mnist':
A, coord, mask = precompute_graph_images(train_dataset.train_data.shape[1])
collate_fn = lambda batch: collate_batch_images(batch, A, mask, use_mean_px=use_mean_px,
coord=coord if use_coord else None,
gt_attn_threshold=gt_attn_threshold,
replicate_features=args.img_noise_levels is not None)
else:
train_dataset.precompute_graph_data(replicate_features=args.img_noise_levels is not None, threads=12)
test_dataset.precompute_graph_data(replicate_features=args.img_noise_levels is not None, threads=12)
collate_fn = collate_batch
loss_fn = F.cross_entropy
in_features = 0 if args.img_noise_levels is None else 2
for features in args.img_features:
if features == 'mean':
in_features += 1
elif features == 'coord':
in_features += 2
else:
raise NotImplementedError(features)
in_features = np.max((in_features, 1)) # in_features=1 if neither mean nor coord are used (dummy features will be used in this case)
out_features = 10
return train_dataset, test_dataset, loss_fn, collate_fn, in_features, out_features, noises, color_noises
def load_TU(args, cv_folds=5):
loss_fn = F.cross_entropy
collate_fn = collate_batch
scale, init = args.scale, args.init
n_hidden_attn = float(args.pool_arch[2]) if (args.pool_arch is not None and len(args.pool_arch) > 2) else 0
if args.pool is None:
# Global pooling models
datareader = DataReader(data_dir=args.data_dir, N_nodes=args.n_nodes, rnd_state=rnd_data, folds=0)
train_dataset = GraphData(datareader, None, 'train_val')
test_dataset = GraphData(datareader, None, 'test')
in_features = train_dataset.num_features
out_features = train_dataset.num_classes
pool = args.pool
kl_weight = args.kl_weight
elif args.pool[1] == 'gt':
raise ValueError('ground truth attention for TU datasets is not available')
elif args.pool[1] in ['sup', 'unsup']:
datareader = DataReader(data_dir=args.data_dir, N_nodes=args.n_nodes, rnd_state=rnd_data, folds=cv_folds)
if args.ax:
# Cross-validation using Ax (recommended way), Python3 must be used
best_parameters = ax_optimize(datareader, args, collate_fn, loss_fn, None, folds=cv_folds,
threads=args.cv_threads, n_trials=args.ax_trials)
pool = args.pool
kl_weight = best_parameters['kl_weight']
if args.tune_init:
scale, init = best_parameters['scale'], best_parameters['init']
n_hidden_attn, layer = best_parameters['n_hidden_attn'], 1
if layer == 0:
pool = copy.deepcopy(args.pool)
del pool[3]
pool = set_pool(best_parameters['pool'], pool)
else:
if not args.cv:
# Run with some fixed parameters without cross-validation
pool_thresh_values = np.array([float(args.pool[-1])])
n_hiddens = [n_hidden_attn]
layers = [1]
elif args.debug:
pool_thresh_values = np.array([1e-4, 1e-1])
n_hiddens = [n_hidden_attn]
layers = [1]
else:
# Cross-validation using grid search (not recommended, since it's time consuming and not effective
if args.data_dir.lower().find('proteins') >= 0:
pool_thresh_values = np.array([2e-3, 5e-3, 1e-2, 3e-2, 5e-2])
elif args.data_dir.lower().find('dd') >= 0:
pool_thresh_values = np.array([1e-4, 1e-3, 2e-3, 5e-3, 1e-2, 3e-2, 5e-2, 1e-1])
elif args.data_dir.lower().find('collab') >= 0:
pool_thresh_values = np.array([1e-3, 2e-3, 5e-3, 1e-2, 3e-2, 5e-2, 1e-1])
else:
raise NotImplementedError('this dataset is not supported currently')
n_hiddens = np.array([0, 32]) # hidden units in the atention model
layers = np.array([0, 1]) # layer where to attach the attention model
if args.pool[1] == 'sup' and not args.debug and args.cv:
kl_weight_values = np.array([0.25, 1, 2, 10])
else:
kl_weight_values = np.array([args.kl_weight]) # any value (ignored for unsupervised training)
if len(pool_thresh_values) > 1 or len(kl_weight_values) > 1 or len(n_hiddens) > 1 or len(layers) > 1:
val_acc = np.zeros((len(layers), len(n_hiddens), len(pool_thresh_values), len(kl_weight_values)))
for i_, layer in enumerate(layers):
if layer == 0:
pool = copy.deepcopy(args.pool)
del pool[3]
else:
pool = args.pool
for j_, n_hidden_attn in enumerate(n_hiddens):
for k_, pool_thresh in enumerate(pool_thresh_values):
for m_, kl_weight in enumerate(kl_weight_values):
val_acc[i_, j_, k_, m_] = \
cross_validation(datareader, args, collate_fn, loss_fn, set_pool(pool_thresh, pool),
kl_weight, None, n_hidden_attn=n_hidden_attn, folds=cv_folds, threads=args.cv_threads)
ind1, ind2, ind3, ind4 = np.where(val_acc == np.max(val_acc)) # np.argmax returns only first occurrence
print(val_acc)
print(ind1, ind2, ind3, ind4, layers[ind1], n_hiddens[ind2], pool_thresh_values[ind3], kl_weight_values[ind4],
val_acc[ind1[0], ind2[0], ind3[0], ind4[0]])
layer = layers[ind1[0]]
if layer == 0:
pool = copy.deepcopy(args.pool)
del pool[3]
else:
pool = args.pool
n_hidden_attn = n_hiddens[ind2[0]]
pool = set_pool(pool_thresh_values[ind3[0]], pool)
kl_weight = kl_weight_values[ind4[0]]
else:
pool = args.pool
kl_weight = args.kl_weight
train_dataset = GraphData(datareader, None, 'train_val')
test_dataset = GraphData(datareader, None, 'test')
in_features = train_dataset.num_features
out_features = train_dataset.num_classes
if args.pool[1] == 'sup':
# Train a model with global pooling first
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.threads,
collate_fn=collate_fn)
train_loader_test = DataLoader(train_dataset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.threads, collate_fn=collate_fn)
# Train global pooling model
start_epoch, model, optimizer, scheduler = create_model_optimizer(in_features, out_features, None, kl_weight,
args, scale=scale, init=init, n_hidden_attn=n_hidden_attn)
for epoch in range(start_epoch, args.epochs + 1):
scheduler.step()
train_loss, acc = train(model, train_loader, optimizer, epoch, args, loss_fn, None)
train_loss, train_acc, attn_WS = test(model, train_loader_test, epoch, loss_fn, 'train', args, None,
eval_attn=True)[:3]
train_dataset = GraphData(datareader, None, 'train_val', attn_labels=attn_WS)
else:
raise NotImplementedError(args.pool)
return train_dataset, test_dataset, loss_fn, collate_fn, in_features, out_features, pool, kl_weight, scale, init, n_hidden_attn
if __name__ == '__main__':
# mp.set_start_method('spawn')
dt = datetime.datetime.now()
print('start time:', dt)
args = parse_args()
args.experiment_ID = '%06d' % dt.microsecond
print('experiment_ID: ', args.experiment_ID)
if args.cv_threads > 1 and args.dataset == 'TU':
# this requires python3
torch.multiprocessing.set_start_method('spawn')
print('gpus: ', torch.cuda.device_count())
if args.results not in [None, 'None'] and not os.path.isdir(args.results):
os.mkdir(args.results)
rnd, rnd_data = set_seed(args.seed, args.seed_data)
pool = args.pool
kl_weight = args.kl_weight
scale = args.scale
init = args.init
n_hidden_attn = float(args.pool_arch[2]) if (args.pool_arch is not None and len(args.pool_arch) > 2) else 0
if args.dataset.find('colors') >= 0 or args.dataset == 'triangles':
train_dataset, test_dataset, loss_fn, collate_fn, in_features, out_features = load_synthetic(args)
elif args.dataset in ['mnist', 'mnist-75sp']:
train_dataset, test_dataset, loss_fn, collate_fn, in_features, out_features, noises, color_noises = load_mnist(args)
else:
train_dataset, test_dataset, loss_fn, collate_fn, in_features, out_features, pool, kl_weight, scale, init, n_hidden_attn = \
load_TU(args, cv_folds=args.cv_folds)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=args.threads,
collate_fn=collate_fn)
# A loader to test and evaluate attn on the training set (shouldn't be shuffled and have larger batch size multiple of 50)
train_loader_test = DataLoader(train_dataset, batch_size=args.test_batch_size, shuffle=False, num_workers=args.threads, collate_fn=collate_fn)
print('test_dataset', test_dataset.split)
test_loader = DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False,
num_workers=args.threads, collate_fn=collate_fn)
start_epoch, model, optimizer, scheduler = create_model_optimizer(in_features, out_features, pool, kl_weight, args,
scale=scale, init=init, n_hidden_attn=n_hidden_attn)
feature_stats = None
if args.dataset in ['mnist', 'mnist-75sp']:
feature_stats = compute_feature_stats(model, train_loader, args.device, n_batches=1000)
# Test function wrapper
def test_fn(loader, epoch, split, eval_attn):
test_loss, acc, _, _ = test(model, loader, epoch, loss_fn, split, args, feature_stats,
noises=None, img_noise_level=None, eval_attn=eval_attn, alpha_WS_name='orig')
if args.dataset in ['mnist', 'mnist-75sp'] and split == 'test' and args.img_noise_levels is not None:
test(model, loader, epoch, loss_fn, split, args, feature_stats,
noises=noises, img_noise_level=args.img_noise_levels[0], eval_attn=eval_attn, alpha_WS_name='noisy')
test(model, loader, epoch, loss_fn, split, args, feature_stats,
noises=color_noises, img_noise_level=args.img_noise_levels[1], eval_attn=eval_attn, alpha_WS_name='noisy-c')
return test_loss, acc
if start_epoch > args.epochs:
print('evaluating the model')
test_fn(test_loader, start_epoch - 1, 'val' if args.validation else 'test', args.eval_attn_test)
else:
for epoch in range(start_epoch, args.epochs + 1):
eval_epoch = epoch <= 1 or epoch == args.epochs # check for epoch == 1 just to make sure that the test function works fine for this test set before training all the way until the last epoch
scheduler.step()
train_loss, acc = train(model, train_loader, optimizer, epoch, args, loss_fn, feature_stats)
if eval_epoch:
save_checkpoint(model, scheduler, optimizer, args, epoch)
# Report Training accuracy and other metrics on the training set
test_fn(train_loader_test, epoch, 'train', (epoch == args.epochs) and args.eval_attn_train)
if args.validation:
test_fn(test_loader, epoch, 'val', (epoch == args.epochs) and args.eval_attn_test)
elif eval_epoch or args.debug:
test_fn(test_loader, epoch, 'test', (epoch == args.epochs) and args.eval_attn_test)
print('done in {}'.format(datetime.datetime.now() - dt))
| 23,756 | 59.144304 | 238 | py |
graph_attention_pool | graph_attention_pool-master/extract_superpixels.py | # Compute superpixels for MNIST/CIFAR-10 using SLIC algorithm
# https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.slic
import numpy as np
import random
import os
import scipy
import pickle
from skimage.segmentation import slic
from torchvision import datasets
import multiprocessing as mp
import scipy.ndimage
import scipy.spatial
import argparse
import datetime
def parse_args():
parser = argparse.ArgumentParser(description='Extract SLIC superpixels from images')
parser.add_argument('-D', '--dataset', type=str, default='mnist', choices=['mnist', 'cifar10'])
parser.add_argument('-d', '--data_dir', type=str, default='./data', help='path to the dataset')
parser.add_argument('-o', '--out_dir', type=str, default='./data', help='path where to save superpixels')
parser.add_argument('-s', '--split', type=str, default='train', choices=['train', 'val', 'test'])
parser.add_argument('-t', '--threads', type=int, default=0, help='number of parallel threads')
parser.add_argument('-n', '--n_sp', type=int, default=75, help='max number of superpixels per image')
parser.add_argument('-c', '--compactness', type=int, default=0.25, help='compactness of the SLIC algorithm '
'(Balances color proximity and space proximity): '
'0.25 is a good value for MNIST '
'and 10 for color images like CIFAR-10')
parser.add_argument('--seed', type=int, default=111, help='seed for shuffling nodes')
args = parser.parse_args()
for arg in vars(args):
print(arg, getattr(args, arg))
return args
def process_image(params):
img, index, n_images, args, to_print, shuffle = params
assert img.dtype == np.uint8, img.dtype
img = (img / 255.).astype(np.float32)
n_sp_extracted = args.n_sp + 1 # number of actually extracted superpixels (can be different from requested in SLIC)
n_sp_query = args.n_sp + (20 if args.dataset == 'mnist' else 50) # number of superpixels we ask to extract (larger to extract more superpixels - closer to the desired n_sp)
while n_sp_extracted > args.n_sp:
superpixels = slic(img, n_segments=n_sp_query, compactness=args.compactness, multichannel=len(img.shape) > 2)
sp_indices = np.unique(superpixels)
n_sp_extracted = len(sp_indices)
n_sp_query -= 1 # reducing the number of superpixels until we get <= n superpixels
assert n_sp_extracted <= args.n_sp and n_sp_extracted > 0, (args.split, index, n_sp_extracted, args.n_sp)
assert n_sp_extracted == np.max(superpixels) + 1, ('superpixel indices', np.unique(superpixels)) # make sure superpixel indices are numbers from 0 to n-1
if shuffle:
ind = np.random.permutation(n_sp_extracted)
else:
ind = np.arange(n_sp_extracted)
sp_order = sp_indices[ind].astype(np.int32)
if len(img.shape) == 2:
img = img[:, :, None]
n_ch = 1 if img.shape[2] == 1 else 3
sp_intensity, sp_coord = [], []
for seg in sp_order:
mask = (superpixels == seg).squeeze()
avg_value = np.zeros(n_ch)
for c in range(n_ch):
avg_value[c] = np.mean(img[:, :, c][mask])
cntr = np.array(scipy.ndimage.measurements.center_of_mass(mask)) # row, col
sp_intensity.append(avg_value)
sp_coord.append(cntr)
sp_intensity = np.array(sp_intensity, np.float32)
sp_coord = np.array(sp_coord, np.float32)
if to_print:
print('image={}/{}, shape={}, min={:.2f}, max={:.2f}, n_sp={}'.format(index + 1, n_images, img.shape,
img.min(), img.max(), sp_intensity.shape[0]))
return sp_intensity, sp_coord, sp_order, superpixels
if __name__ == '__main__':
dt = datetime.datetime.now()
print('start time:', dt)
args = parse_args()
if not os.path.isdir(args.out_dir):
os.mkdir(args.out_dir)
random.seed(args.seed)
np.random.seed(args.seed) # to make node random permutation reproducible (not tested)
# Read image data using torchvision
is_train = args.split.lower() == 'train'
if args.dataset == 'mnist':
data = datasets.MNIST(args.data_dir, train=is_train, download=True)
assert args.compactness < 10, ('high compactness can result in bad superpixels on MNIST')
assert args.n_sp > 1 and args.n_sp < 28*28, (
'the number of superpixels cannot exceed the total number of pixels or be too small')
elif args.dataset == 'cifar10':
data = datasets.CIFAR10(args.data_dir, train=is_train, download=True)
assert args.compactness > 1, ('low compactness can result in bad superpixels on CIFAR-10')
assert args.n_sp > 1 and args.n_sp < 32*32, (
'the number of superpixels cannot exceed the total number of pixels or be too small')
else:
raise NotImplementedError('unsupported dataset: ' + args.dataset)
images = data.train_data if is_train else data.test_data
labels = data.train_labels if is_train else data.test_labels
if not isinstance(images, np.ndarray):
images = images.numpy()
if isinstance(labels, list):
labels = np.array(labels)
if not isinstance(labels, np.ndarray):
labels = labels.numpy()
n_images = len(labels)
if args.threads <= 0:
sp_data = []
for i in range(n_images):
sp_data.append(process_image((images[i], i, n_images, args, True, True)))
else:
with mp.Pool(processes=args.threads) as pool:
sp_data = pool.map(process_image, [(images[i], i, n_images, args, True, True) for i in range(n_images)])
superpixels = [sp_data[i][3] for i in range(n_images)]
sp_data = [sp_data[i][:3] for i in range(n_images)]
with open('%s/%s_%dsp_%s.pkl' % (args.out_dir, args.dataset, args.n_sp, args.split), 'wb') as f:
pickle.dump((labels.astype(np.int32), sp_data), f, protocol=2)
with open('%s/%s_%dsp_%s_superpixels.pkl' % (args.out_dir, args.dataset, args.n_sp, args.split), 'wb') as f:
pickle.dump(superpixels, f, protocol=2)
print('done in {}'.format(datetime.datetime.now() - dt))
| 6,355 | 44.078014 | 177 | py |
graph_attention_pool | graph_attention_pool-master/chebygin.py | import numpy as np
import torch
import torch.sparse
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from attention_pooling import *
from utils import *
class ChebyGINLayer(nn.Module):
'''
General Graph Neural Network layer that depending on arguments can be:
1. Graph Convolution Layer (T. Kipf and M. Welling, ICLR 2017)
2. Chebyshev Graph Convolution Layer (M. Defferrard et al., NeurIPS 2017)
3. GIN Layer (K. Xu et al., ICLR 2019)
4. ChebyGIN Layer (B. Knyazev et al., ICLR 2019 Workshop on Representation Learning on Graphs and Manifolds)
The first three types (1-3) of layers are particular cases of the fourth (4) case.
'''
def __init__(self,
in_features,
out_features,
K,
n_hidden=0,
aggregation='mean',
activation=nn.ReLU(True),
n_relations=1):
super(ChebyGINLayer, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.n_relations = n_relations
assert K > 0, 'order is assumed to be > 0'
self.K = K
assert n_hidden >= 0, ('invalid n_hidden value', n_hidden)
self.n_hidden = n_hidden
assert aggregation in ['mean', 'sum'], ('invalid aggregation', aggregation)
self.aggregation = aggregation
self.activation = activation
n_in = self.in_features * self.K * n_relations
if self.n_hidden == 0:
fc = [nn.Linear(n_in, self.out_features)]
else:
fc = [nn.Linear(n_in, n_hidden),
nn.ReLU(True),
nn.Linear(n_hidden, self.out_features)]
if activation is not None:
fc.append(activation)
self.fc = nn.Sequential(*fc)
print('ChebyGINLayer', list(self.fc.children())[0].weight.shape,
torch.norm(list(self.fc.children())[0].weight, dim=1)[:10])
def __repr__(self):
return 'ChebyGINLayer(in_features={}, out_features={}, K={}, n_hidden={}, aggregation={})\nfc={}'.format(
self.in_features,
self.out_features,
self.K,
self.n_hidden,
self.aggregation,
str(self.fc))
def chebyshev_basis(self, L, X, K):
'''
Return T_k X where T_k are the Chebyshev polynomials of order up to K.
:param L: graph Laplacian, batch (B), nodes (N), nodes (N)
:param X: input of size batch (B), nodes (N), features (F)
:param K: Chebyshev polynomial order, i.e. filter size (number of hopes)
:return: Tensor of size (B,N,K,F) as a result of multiplying T_k(L) by X for each order
'''
if K > 1:
Xt = [X]
Xt.append(torch.bmm(L, X)) # B,N,F
for k in range(2, K):
Xt.append(2 * torch.bmm(L, Xt[k - 1]) - Xt[k - 2]) # B,N,F
Xt = torch.stack(Xt, 2) # B,N,K,F
return Xt
else:
# GCN
assert K == 1, K
return torch.bmm(L, X).unsqueeze(2) # B,N,1,F
def laplacian_batch(self, A, add_identity=False):
'''
Computes normalized Laplacian transformed so that its eigenvalues are in range [-1, 1].
Note that sum of all eigenvalues = trace(L) = 0.
:param A: Tensor of size (B,N,N) containing batch (B) of adjacency matrices of shape N,N
:return: Normalized Laplacian of size (B,N,N)
'''
B, N = A.shape[:2]
if add_identity:
A = A + torch.eye(N, device=A.get_device() if A.is_cuda else 'cpu').unsqueeze(0)
D = torch.sum(A, 1) # nodes degree (B,N)
D_hat = (D + 1e-5) ** (-0.5)
L = D_hat.view(B, N, 1) * A * D_hat.view(B, 1, N) # B,N,N
if not add_identity:
L = -L # for ChebyNet to make a valid Chebyshev basis
return D, L
def forward(self, data):
x, A, mask = data[:3]
B, N, F = x.shape
assert N == A.shape[1] == A.shape[2], ('invalid shape', N, x.shape, A.shape)
if len(A.shape) == 3:
A = A.unsqueeze(3)
y_out = []
for rel in range(A.shape[3]):
D, L = self.laplacian_batch(A[:, :, :, rel], add_identity=self.K == 1) # for the first layer this can be done at the preprocessing stage
y = self.chebyshev_basis(L, x, self.K) # B,N,K,F
if self.aggregation == 'sum':
# Sum features of neighbors
if self.K == 1:
# GIN
y = y * D.view(B, N, 1, 1)
else:
# ChebyGIN
D_GIN = torch.ones(B, N, self.K, device=x.get_device() if x.is_cuda else 'cpu')
D_GIN[:, :, 1:] = D.view(B, N, 1).expand(-1, -1, self.K - 1) # keep self-loop features the same
y = y * D_GIN.view(B, N, self.K, 1) # apply summation for other scales
y_out.append(y)
y = torch.cat(y_out, dim=2)
y = self.fc(y.view(B, N, -1)) # B,N,F
if len(mask.shape) == 2:
mask = mask.unsqueeze(2)
y = y * mask.float()
output = [y, A, mask]
output.extend(data[3:] + [x]) # for python2
return output
class GraphReadout(nn.Module):
'''
Global pooling layer applied after the last graph layer.
'''
def __init__(self,
pool_type):
super(GraphReadout, self).__init__()
self.pool_type = pool_type
dim = 1 # pooling over nodes
if pool_type == 'max':
self.readout_layer = lambda x, mask: torch.max(x, dim=dim)[0]
elif pool_type in ['avg', 'mean']:
# sum over all nodes, then divide by the number of valid nodes in each sample of the batch
self.readout_layer = lambda x, mask: torch.sum(x, dim=dim) / torch.sum(mask, dim=dim).float()
elif pool_type in ['sum']:
self.readout_layer = lambda x, mask: torch.sum(x, dim=dim)
else:
raise NotImplementedError(pool_type)
def __repr__(self):
return 'GraphReadout({})'.format(self.pool_type)
def forward(self, data):
x, A, mask = data[:3]
B, N = x.shape[:2]
x = self.readout_layer(x, mask.view(B, N, 1))
output = [x]
output.extend(data[1:]) # [x, *data[1:]] doesn't work in Python2
return output
class ChebyGIN(nn.Module):
'''
Graph Neural Network class.
'''
def __init__(self,
in_features,
out_features,
filters,
K=1,
n_hidden=0,
aggregation='mean',
dropout=0,
readout='max',
pool=None, # Example: 'attn_gt_threshold_0_skip_skip'.split('_'),
pool_arch='fc_prev'.split('_'),
large_graph=False, # > ~500 graphs
kl_weight=None,
graph_layer_fn=None,
init='normal',
scale=None,
debug=False):
super(ChebyGIN, self).__init__()
self.out_features = out_features
assert len(filters) > 0, 'filters must be an iterable object with at least one element'
assert K > 0, 'filter scale must be a positive integer'
self.pool = pool
self.pool_arch = pool_arch
self.debug = debug
n_prev = None
attn_gnn = None
if graph_layer_fn is None:
graph_layer_fn = lambda n_in, n_out, K_, n_hidden_, activation: ChebyGINLayer(in_features=n_in,
out_features=n_out,
K=K_,
n_hidden=n_hidden_,
aggregation=aggregation,
activation=activation)
if self.pool_arch is not None and self.pool_arch[0] == 'gnn':
attn_gnn = lambda n_in: ChebyGIN(in_features=n_in,
out_features=0,
filters=[32, 32, 1],
K=np.min((K, 2)),
n_hidden=0,
graph_layer_fn=graph_layer_fn)
graph_layers = []
for layer, f in enumerate(filters + [None]):
n_in = in_features if layer == 0 else filters[layer - 1]
# Pooling layers
# It's a non-standard way to put pooling before convolution, but it's important for our work
if self.pool is not None and len(self.pool) > len(filters) + layer and self.pool[layer + 3] != 'skip':
graph_layers.append(AttentionPooling(in_features=n_in, in_features_prev=n_prev,
pool_type=self.pool[:3] + [self.pool[layer + 3]],
pool_arch=self.pool_arch,
large_graph=large_graph,
kl_weight=kl_weight,
attn_gnn=attn_gnn,
init=init,
scale=scale,
debug=debug))
if f is not None:
# Graph "convolution" layers
# no ReLU if the last layer and no fc layer after that
graph_layers.append(graph_layer_fn(n_in, f, K, n_hidden,
None if self.out_features == 0 and layer == len(filters) - 1 else nn.ReLU(True)))
n_prev = n_in
if self.out_features > 0:
# Global pooling over nodes
graph_layers.append(GraphReadout(readout))
self.graph_layers = nn.Sequential(*graph_layers)
if self.out_features > 0:
# Fully connected (classification/regression) layers
self.fc = nn.Sequential(*(([nn.Dropout(p=dropout)] if dropout > 0 else []) + [nn.Linear(filters[-1], out_features)]))
def forward(self, data):
data = self.graph_layers(data)
if self.out_features > 0:
y = self.fc(data[0]) # B,out_features
else:
y = data[0] # B,N,out_features
return y, data[4]
| 10,695 | 40.945098 | 149 | py |
graph_attention_pool | graph_attention_pool-master/graphdata.py | import numpy as np
import os
from os.path import join as pjoin
import pickle
import copy
import torch
import torch.utils
import torch.utils.data
import torch.nn.functional as F
import torchvision
from scipy.spatial.distance import cdist
from utils import *
def compute_adjacency_matrix_images(coord, sigma=0.1):
coord = coord.reshape(-1, 2)
dist = cdist(coord, coord)
A = np.exp(- dist / (sigma * np.pi) ** 2)
A[np.diag_indices_from(A)] = 0
return A
def precompute_graph_images(img_size):
col, row = np.meshgrid(np.arange(img_size), np.arange(img_size))
coord = np.stack((col, row), axis=2) / img_size # 28,28,2
A = torch.from_numpy(compute_adjacency_matrix_images(coord)).float().unsqueeze(0)
coord = torch.from_numpy(coord).float().unsqueeze(0).view(1, -1, 2)
mask = torch.ones(1, img_size * img_size, dtype=torch.uint8)
return A, coord, mask
def collate_batch_images(batch, A, mask, use_mean_px=True, coord=None,
gt_attn_threshold=0, replicate_features=True):
B = len(batch)
C, H, W = batch[0][0].shape
N_nodes = H * W
params_dict = {'N_nodes': torch.zeros(B, dtype=torch.long) + N_nodes, 'node_attn_eval': None}
has_WS_attn = len(batch[0]) > 2
if has_WS_attn:
WS_attn = torch.from_numpy(np.stack([batch[b][2].reshape(N_nodes) for b in range(B)]).astype(np.float32)).view(B, N_nodes)
WS_attn = normalize_batch(WS_attn)
params_dict.update({'node_attn': WS_attn}) # use these scores for training
if use_mean_px:
x = torch.stack([batch[b][0].view(C, N_nodes).t() for b in range(B)]).float()
if gt_attn_threshold == 0:
GT_attn = (x > 0).view(B, N_nodes).float()
else:
GT_attn = x.view(B, N_nodes).float().clone()
GT_attn[GT_attn < gt_attn_threshold] = 0
GT_attn = normalize_batch(GT_attn)
params_dict.update({'node_attn_eval': GT_attn}) # use this for evaluation of attention
if not has_WS_attn:
params_dict.update({'node_attn': GT_attn}) # use this to train attention
else:
raise NotImplementedError('this case is not well supported')
if coord is not None:
if use_mean_px:
x = torch.cat((x, coord.expand(B, -1, -1)), dim=2)
else:
x = coord.expand(B, -1, -1)
if x is None:
x = torch.ones(B, N_nodes, 1) # dummy features
if replicate_features:
x = F.pad(x, (2, 0), 'replicate')
try:
labels = torch.Tensor([batch[b][1] for b in range(B)]).long()
except:
labels = torch.stack([batch[b][1] for b in range(B)]).long()
return [x, A.expand(B, -1, -1), mask.expand(B, -1), labels, params_dict]
def collate_batch(batch):
'''
Creates a batch of same size graphs by zero-padding node features and adjacency matrices up to
the maximum number of nodes in the current batch rather than in the entire dataset.
'''
B = len(batch)
N_nodes = [batch[b][2] for b in range(B)]
C = batch[0][0].shape[1]
N_nodes_max = int(np.max(N_nodes))
mask = torch.zeros(B, N_nodes_max, dtype=torch.bool) # use byte for older PyTorch
A = torch.zeros(B, N_nodes_max, N_nodes_max)
x = torch.zeros(B, N_nodes_max, C)
has_GT_attn = len(batch[0]) > 4 and batch[0][4] is not None
if has_GT_attn:
GT_attn = torch.zeros(B, N_nodes_max)
has_WS_attn = len(batch[0]) > 5 and batch[0][5] is not None
if has_WS_attn:
WS_attn = torch.zeros(B, N_nodes_max)
for b in range(B):
x[b, :N_nodes[b]] = batch[b][0]
A[b, :N_nodes[b], :N_nodes[b]] = batch[b][1]
mask[b][:N_nodes[b]] = 1 # mask with values of 0 for dummy (zero padded) nodes, otherwise 1
if has_GT_attn:
GT_attn[b, :N_nodes[b]] = batch[b][4].squeeze()
if has_WS_attn:
WS_attn[b, :N_nodes[b]] = batch[b][5].squeeze()
N_nodes = torch.from_numpy(np.array(N_nodes)).long()
params_dict = {'N_nodes': N_nodes}
if has_WS_attn:
params_dict.update({'node_attn': WS_attn}) # use this to train attention
if has_GT_attn:
params_dict.update({'node_attn_eval': GT_attn}) # use this for evaluation of attention
if not has_WS_attn:
params_dict.update({'node_attn': GT_attn}) # use this to train attention
elif has_WS_attn:
params_dict.update({'node_attn_eval': WS_attn}) # use this for evaluation of attention
labels = torch.from_numpy(np.array([batch[b][3] for b in range(B)])).long()
return [x, A, mask, labels, params_dict]
class MNIST(torchvision.datasets.MNIST):
'''
Wrapper around MNIST to use predefined attention coefficients
'''
def __init__(self, root, train=True, transform=None, target_transform=None, download=False, attn_coef=None):
super(MNIST, self).__init__(root, train, transform, target_transform, download)
self.alpha_WS = None
if attn_coef is not None and train:
print('loading weakly-supervised labels from %s' % attn_coef)
with open(attn_coef, 'rb') as f:
self.alpha_WS = pickle.load(f)
print(train, len(self.alpha_WS))
def __getitem__(self, index):
img, target = super(MNIST, self).__getitem__(index)
if self.alpha_WS is None:
return img, target
else:
return img, target, self.alpha_WS[index]
class MNIST75sp(torch.utils.data.Dataset):
def __init__(self,
data_dir,
split,
use_mean_px=True,
use_coord=True,
gt_attn_threshold=0,
attn_coef=None):
self.data_dir = data_dir
self.split = split
self.is_test = split.lower() in ['test', 'val']
with open(pjoin(data_dir, 'mnist_75sp_%s.pkl' % split), 'rb') as f:
self.labels, self.sp_data = pickle.load(f)
self.use_mean_px = use_mean_px
self.use_coord = use_coord
self.n_samples = len(self.labels)
self.img_size = 28
self.gt_attn_threshold = gt_attn_threshold
self.alpha_WS = None
if attn_coef is not None and not self.is_test:
with open(attn_coef, 'rb') as f:
self.alpha_WS = pickle.load(f)
print('using weakly-supervised labels from %s (%d samples)' % (attn_coef, len(self.alpha_WS)))
def train_val_split(self, samples_idx):
self.sp_data = [self.sp_data[i] for i in samples_idx]
self.labels = self.labels[samples_idx]
self.n_samples = len(self.labels)
def precompute_graph_data(self, replicate_features, threads=0):
print('precompute all data for the %s set...' % self.split.upper())
self.Adj_matrices, self.node_features, self.GT_attn, self.WS_attn = [], [], [], []
for index, sample in enumerate(self.sp_data):
mean_px, coord = sample[:2]
coord = coord / self.img_size
A = compute_adjacency_matrix_images(coord)
N_nodes = A.shape[0]
x = None
if self.use_mean_px:
x = mean_px.reshape(N_nodes, -1)
if self.use_coord:
coord = coord.reshape(N_nodes, 2)
if self.use_mean_px:
x = np.concatenate((x, coord), axis=1)
else:
x = coord
if x is None:
x = np.ones(N_nodes, 1) # dummy features
if replicate_features:
x = np.pad(x, ((0, 0), (2, 0)), 'edge') # replicate features to make it possible to test on colored images
if self.gt_attn_threshold == 0:
gt_attn = (mean_px > 0).astype(np.float32)
else:
gt_attn = mean_px.copy()
gt_attn[gt_attn < self.gt_attn_threshold] = 0
self.GT_attn.append(normalize(gt_attn))
if self.alpha_WS is not None:
self.WS_attn.append(normalize(self.alpha_WS[index]))
self.node_features.append(x)
self.Adj_matrices.append(A)
def __len__(self):
return self.n_samples
def __getitem__(self, index):
data = [self.node_features[index],
self.Adj_matrices[index],
self.Adj_matrices[index].shape[0],
self.labels[index],
self.GT_attn[index]]
if self.alpha_WS is not None:
data.append(self.WS_attn[index])
data = list_to_torch(data) # convert to torch
return data
class SyntheticGraphs(torch.utils.data.Dataset):
def __init__(self,
data_dir,
dataset,
split,
degree_feature=True,
attn_coef=None,
threads=12):
self.is_test = split.lower() in ['test', 'val']
self.split = split
self.degree_feature = degree_feature
if dataset.find('colors') >= 0:
dim = int(dataset.split('-')[1])
data_file = 'random_graphs_colors_dim%d_%s.pkl' % (dim, split)
is_triangles = False
self.feature_dim = dim + 1
if dataset.find('triangles') >= 0:
data_file = 'random_graphs_triangles_%s.pkl' % split
is_triangles = True
else:
NotImplementedError(dataset)
with open(pjoin(data_dir, data_file), 'rb') as f:
data = pickle.load(f)
for key in data:
if not isinstance(data[key], list) and not isinstance(data[key], np.ndarray):
print(split, key, data[key])
else:
print(split, key, len(data[key]))
self.Node_degrees = [np.sum(A, 1).astype(np.int32) for A in data['Adj_matrices']]
if is_triangles:
# use one-hot degree features as node features
self.feature_dim = data['Max_degree'] + 1
self.node_features = []
for i in range(len(data['Adj_matrices'])):
N = data['Adj_matrices'][i].shape[0]
if degree_feature:
D_onehot = np.zeros((N, self.feature_dim ))
D_onehot[np.arange(N), self.Node_degrees[i]] = 1
else:
D_onehot = np.zeros((N, 1))
self.node_features.append(D_onehot)
if not degree_feature:
self.feature_dim = 1
else:
# Add 1 feature to support new colors at test time
self.node_features = []
for i in range(len(data['node_features'])):
features = data['node_features'][i]
if features.shape[1] < self.feature_dim:
features = np.pad(features, ((0, 0), (0, 1)), 'constant')
self.node_features.append(features)
self.alpha_WS = None
if attn_coef is not None and not self.is_test:
with open(attn_coef, 'rb') as f:
self.alpha_WS = pickle.load(f)
print('using weakly-supervised labels from %s (%d samples)' % (attn_coef, len(self.alpha_WS)))
self.WS_attn = []
for index in range(len(self.alpha_WS)):
self.WS_attn.append(normalize(self.alpha_WS[index]))
N_nodes = np.array([A.shape[0] for A in data['Adj_matrices']])
self.Adj_matrices = data['Adj_matrices']
self.GT_attn = data['GT_attn']
# Normalizing ground truth attention so that it sums to 1
for i in range(len(self.GT_attn)):
self.GT_attn[i] = normalize(self.GT_attn[i])
#assert np.sum(self.GT_attn[i]) == 1, (i, np.sum(self.GT_attn[i]), self.GT_attn[i])
self.labels = data['graph_labels'].astype(np.int32)
self.classes = np.unique(self.labels)
self.n_classes = len(self.classes)
R = np.corrcoef(self.labels, N_nodes)[0, 1]
degrees = []
for i in range(len(self.Node_degrees)):
degrees.extend(list(self.Node_degrees[i]))
degrees = np.array(degrees, np.int32)
print('N nodes avg/std/min/max: \t{:.2f}/{:.2f}/{:d}/{:d}'.format(*stats(N_nodes)))
print('N edges avg/std/min/max: \t{:.2f}/{:.2f}/{:d}/{:d}'.format(*stats(data['N_edges'])))
print('Node degree avg/std/min/max: \t{:.2f}/{:.2f}/{:d}/{:d}'.format(*stats(degrees)))
print('Node features dim: \t\t%d' % self.feature_dim)
print('N classes: \t\t\t%d' % self.n_classes)
print('Correlation of labels with graph size: \t%.2f' % R)
print('Classes: \t\t\t%s' % str(self.classes))
for lbl in self.classes:
idx = self.labels == lbl
print('Class {}: \t\t\t{} samples, N_nodes: avg/std/min/max: \t{:.2f}/{:.2f}/{:d}/{:d}'.format(lbl, np.sum(idx), *stats(N_nodes[idx])))
def __len__(self):
return len(self.Adj_matrices)
def __getitem__(self, index):
data = [self.node_features[index],
self.Adj_matrices[index],
self.Adj_matrices[index].shape[0],
self.labels[index],
self.GT_attn[index]]
if self.alpha_WS is not None:
data.append(self.WS_attn[index])
data = list_to_torch(data) # convert to torch
return data
class GraphData(torch.utils.data.Dataset):
def __init__(self,
datareader,
fold_id,
split, # train, val, train_val, test
degree_feature=True,
attn_labels=None):
self.fold_id = fold_id
self.split = split
self.w_sup_signal_attn = None
print('''The degree_feature argument is ignored for this dataset.
It will automatically be set to True if nodes do not have any features. Otherwise it will be set to False''')
if attn_labels is not None:
if isinstance(attn_labels, str) and os.path.isfile(attn_labels):
with open(attn_labels, 'rb') as f:
self.w_sup_signal_attn = pickle.load(f)
else:
self.w_sup_signal_attn = attn_labels
for i in range(len(self.w_sup_signal_attn)):
alpha = self.w_sup_signal_attn[i]
alpha[alpha < 1e-3] = 0 # assuming that some nodes should have zero importance
self.w_sup_signal_attn[i] = normalize(alpha)
print(('!!!using weakly supervised labels (%d samples)!!!' % len(self.w_sup_signal_attn)).upper())
self.set_fold(datareader.data, fold_id)
def set_fold(self, data, fold_id):
self.total = len(data['targets'])
self.N_nodes_max = data['N_nodes_max']
self.num_classes = data['num_classes']
self.num_features = data['num_features']
if self.split in ['train', 'val']:
self.idx = data['splits'][self.split][fold_id]
else:
assert self.split in ['train_val', 'test'], ('unexpected split', self.split)
self.idx = data['splits'][self.split]
# use deepcopy to make sure we don't alter objects in folds
self.labels = np.array(copy.deepcopy([data['targets'][i] for i in self.idx]))
self.adj_list = copy.deepcopy([data['adj_list'][i] for i in self.idx])
self.features_onehot = copy.deepcopy([data['features_onehot'][i] for i in self.idx])
self.N_edges = np.array([A.sum() // 2 for A in self.adj_list]) # assume undirected graph with binary edges
print('%s: %d/%d' % (self.split.upper(), len(self.labels), len(data['targets'])))
classes = np.unique(self.labels)
for lbl in classes:
print('Class %d: \t\t\t%d samples' % (lbl, np.sum(self.labels == lbl)))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
if isinstance(index, str):
# To make data format consistent with SyntheticGraphs
if index == 'Adj_matrices':
return self.adj_list
elif index == 'GT_attn':
print('Ground truth attention is unavailable for this dataset: weakly-supervised labels will be returned')
return self.w_sup_signal_attn
elif index == 'graph_labels':
return self.labels
elif index == 'node_features':
return self.features_onehot
elif index == 'N_edges':
return self.N_edges
else:
raise KeyError(index)
else:
data = [self.features_onehot[index],
self.adj_list[index],
self.adj_list[index].shape[0],
self.labels[index],
None] # no GT attention
if self.w_sup_signal_attn is not None:
data.append(self.w_sup_signal_attn[index])
data = list_to_torch(data) # convert to torch
return data
class DataReader():
'''
Class to read the txt files containing all data of the dataset
Should work for any dataset from https://ls11-www.cs.tu-dortmund.de/staff/morris/graphkerneldatasets
'''
def __init__(self,
data_dir, # folder with txt files
N_nodes=None, # maximum number of nodes in the training set
rnd_state=None,
use_cont_node_attr=False, # use or not additional float valued node attributes available in some datasets
folds=10,
fold_id=None):
self.data_dir = data_dir
self.rnd_state = np.random.RandomState() if rnd_state is None else rnd_state
self.use_cont_node_attr = use_cont_node_attr
self.N_nodes = N_nodes
if os.path.isfile('%s/data.pkl' % data_dir):
print('loading data from %s/data.pkl' % data_dir)
with open('%s/data.pkl' % data_dir, 'rb') as f:
data = pickle.load(f)
else:
files = os.listdir(self.data_dir)
data = {}
nodes, graphs = self.read_graph_nodes_relations(
list(filter(lambda f: f.find('graph_indicator') >= 0, files))[0])
lst = list(filter(lambda f: f.find('node_labels') >= 0, files))
if len(lst) > 0:
data['features'] = self.read_node_features(lst[0], nodes, graphs, fn=lambda s: int(s.strip()))
else:
data['features'] = None
data['adj_list'] = self.read_graph_adj(list(filter(lambda f: f.find('_A') >= 0, files))[0], nodes, graphs)
data['targets'] = np.array(
self.parse_txt_file(list(filter(lambda f: f.find('graph_labels') >= 0, files))[0],
line_parse_fn=lambda s: int(float(s.strip()))))
if self.use_cont_node_attr:
data['attr'] = self.read_node_features(list(filter(lambda f: f.find('node_attributes') >= 0, files))[0],
nodes, graphs,
fn=lambda s: np.array(list(map(float, s.strip().split(',')))))
features, n_edges, degrees = [], [], []
for sample_id, adj in enumerate(data['adj_list']):
N = len(adj) # number of nodes
if data['features'] is not None:
assert N == len(data['features'][sample_id]), (N, len(data['features'][sample_id]))
n = np.sum(adj) # total sum of edges
# assert n % 2 == 0, n
n_edges.append(int(n / 2)) # undirected edges, so need to divide by 2
if not np.allclose(adj, adj.T):
print(sample_id, 'not symmetric')
degrees.extend(list(np.sum(adj, 1)))
if data['features'] is not None:
features.append(np.array(data['features'][sample_id]))
# Create features over graphs as one-hot vectors for each node
if data['features'] is not None:
features_all = np.concatenate(features)
features_min = features_all.min()
num_features = int(features_all.max() - features_min + 1) # number of possible values
features_onehot = []
for i, x in enumerate(features):
feature_onehot = np.zeros((len(x), num_features))
for node, value in enumerate(x):
feature_onehot[node, value - features_min] = 1
if self.use_cont_node_attr:
feature_onehot = np.concatenate((feature_onehot, np.array(data['attr'][i])), axis=1)
features_onehot.append(feature_onehot)
if self.use_cont_node_attr:
num_features = features_onehot[0].shape[1]
else:
degree_max = int(np.max([np.sum(A, 1).max() for A in data['adj_list']]))
num_features = degree_max + 1
features_onehot = []
for A in data['adj_list']:
n = A.shape[0]
D = np.sum(A, 1).astype(np.int)
D_onehot = np.zeros((n, num_features))
D_onehot[np.arange(n), D] = 1
features_onehot.append(D_onehot)
shapes = [len(adj) for adj in data['adj_list']]
labels = data['targets'] # graph class labels
labels -= np.min(labels) # to start from 0
classes = np.unique(labels)
num_classes = len(classes)
if not np.all(np.diff(classes) == 1):
print('making labels sequential, otherwise pytorch might crash')
labels_new = np.zeros(labels.shape, dtype=labels.dtype) - 1
for lbl in range(num_classes):
labels_new[labels == classes[lbl]] = lbl
labels = labels_new
classes = np.unique(labels)
assert len(np.unique(labels)) == num_classes, np.unique(labels)
def stats(x):
return (np.mean(x), np.std(x), np.min(x), np.max(x))
print('N nodes avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(shapes))
print('N edges avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(n_edges))
print('Node degree avg/std/min/max: \t%.2f/%.2f/%d/%d' % stats(degrees))
print('Node features dim: \t\t%d' % num_features)
print('N classes: \t\t\t%d' % num_classes)
print('Classes: \t\t\t%s' % str(classes))
for lbl in classes:
print('Class %d: \t\t\t%d samples' % (lbl, np.sum(labels == lbl)))
if data['features'] is not None:
for u in np.unique(features_all):
print('feature {}, count {}/{}'.format(u, np.count_nonzero(features_all == u), len(features_all)))
N_graphs = len(labels) # number of samples (graphs) in data
assert N_graphs == len(data['adj_list']) == len(features_onehot), 'invalid data'
data['features_onehot'] = features_onehot
data['targets'] = labels
data['N_nodes_max'] = np.max(shapes) # max number of nodes
data['num_features'] = num_features
data['num_classes'] = num_classes
# Save preprocessed data for faster loading
with open('%s/data.pkl' % data_dir, 'wb') as f:
pickle.dump(data, f, protocol=2)
labels = data['targets']
# Create test sets first
N_graphs = len(labels)
shapes = np.array([len(adj) for adj in data['adj_list']])
train_ids, val_ids, train_val_ids, test_ids = self.split_ids_shape(np.arange(N_graphs), shapes, N_nodes, folds=folds)
# Create train sets
splits = {'train': [], 'val': [], 'train_val': train_val_ids, 'test': test_ids}
for fold in range(folds):
splits['train'].append(train_ids[fold])
splits['val'].append(val_ids[fold])
data['splits'] = splits
self.data = data
def split_ids_shape(self, ids_all, shapes, N_nodes, folds=1, fold_id=0):
if N_nodes > 0:
small_graphs_ind = np.where(shapes <= N_nodes)[0]
print('{}/{} graphs with at least {} nodes'.format(len(small_graphs_ind), len(shapes), N_nodes))
idx = self.rnd_state.permutation(len(small_graphs_ind))
if len(idx) > 1000:
n = 1000
else:
n = 500
train_val_ids = small_graphs_ind[idx[:n]]
test_ids = small_graphs_ind[idx[n:]]
large_graphs_ind = np.where(shapes > N_nodes)[0]
test_ids = np.concatenate((test_ids, large_graphs_ind))
else:
idx = self.rnd_state.permutation(len(ids_all))
n = len(ids_all) // folds # number of test samples
test_ids = ids_all[idx[fold_id * n: (fold_id + 1) * n if fold_id < folds - 1 else -1]]
train_val_ids = []
for i in ids_all:
if i not in test_ids:
train_val_ids.append(i)
train_val_ids = np.array(train_val_ids)
assert np.all(
np.unique(np.concatenate((train_val_ids, test_ids))) == sorted(ids_all)), 'some graphs are missing in the test sets'
if folds > 0:
print('generating %d-fold cross-validation splits' % folds)
train_ids, val_ids = self.split_ids(train_val_ids, folds=folds)
# Sanity checks
for fold in range(folds):
ind = np.concatenate((train_ids[fold], val_ids[fold]))
print(fold, len(train_ids[fold]), len(val_ids[fold]))
assert len(train_ids[fold]) + len(val_ids[fold]) == len(np.unique(ind)) == len(ind) == len(train_val_ids), 'invalid splits'
else:
train_ids, val_ids = [], []
return train_ids, val_ids, train_val_ids, test_ids
def split_ids(self, ids, folds=10):
n = len(ids)
stride = int(np.ceil(n / float(folds)))
test_ids = [ids[i: i + stride] for i in range(0, n, stride)]
assert np.all(
np.unique(np.concatenate(test_ids)) == sorted(ids)), 'some graphs are missing in the test sets'
assert len(test_ids) == folds, 'invalid test sets'
train_ids = []
for fold in range(folds):
train_ids.append(np.array([e for e in ids if e not in test_ids[fold]]))
assert len(train_ids[fold]) + len(test_ids[fold]) == len(
np.unique(list(train_ids[fold]) + list(test_ids[fold]))) == n, 'invalid splits'
return train_ids, test_ids
def parse_txt_file(self, fpath, line_parse_fn=None):
with open(pjoin(self.data_dir, fpath), 'r') as f:
lines = f.readlines()
data = [line_parse_fn(s) if line_parse_fn is not None else s for s in lines]
return data
def read_graph_adj(self, fpath, nodes, graphs):
edges = self.parse_txt_file(fpath, line_parse_fn=lambda s: s.split(','))
adj_dict = {}
for edge in edges:
node1 = int(edge[0].strip()) - 1 # -1 because of zero-indexing in our code
node2 = int(edge[1].strip()) - 1
graph_id = nodes[node1]
assert graph_id == nodes[node2], ('invalid data', graph_id, nodes[node2])
if graph_id not in adj_dict:
n = len(graphs[graph_id])
adj_dict[graph_id] = np.zeros((n, n))
ind1 = np.where(graphs[graph_id] == node1)[0]
ind2 = np.where(graphs[graph_id] == node2)[0]
assert len(ind1) == len(ind2) == 1, (ind1, ind2)
adj_dict[graph_id][ind1, ind2] = 1
adj_list = [adj_dict[graph_id] for graph_id in sorted(list(graphs.keys()))]
return adj_list
def read_graph_nodes_relations(self, fpath):
graph_ids = self.parse_txt_file(fpath, line_parse_fn=lambda s: int(s.rstrip()))
nodes, graphs = {}, {}
for node_id, graph_id in enumerate(graph_ids):
if graph_id not in graphs:
graphs[graph_id] = []
graphs[graph_id].append(node_id)
nodes[node_id] = graph_id
graph_ids = np.unique(list(graphs.keys()))
for graph_id in graph_ids:
graphs[graph_id] = np.array(graphs[graph_id])
return nodes, graphs
def read_node_features(self, fpath, nodes, graphs, fn):
node_features_all = self.parse_txt_file(fpath, line_parse_fn=fn)
node_features = {}
for node_id, x in enumerate(node_features_all):
graph_id = nodes[node_id]
if graph_id not in node_features:
node_features[graph_id] = [None] * len(graphs[graph_id])
ind = np.where(graphs[graph_id] == node_id)[0]
assert len(ind) == 1, ind
assert node_features[graph_id][ind[0]] is None, node_features[graph_id][ind[0]]
node_features[graph_id][ind[0]] = x
node_features_lst = [node_features[graph_id] for graph_id in sorted(list(graphs.keys()))]
return node_features_lst
| 29,128 | 42.154074 | 147 | py |
graph_attention_pool | graph_attention_pool-master/utils.py | import numpy as np
import os
import torch
import copy
from graphdata import *
import torch.nn.functional as F
from torchvision import datasets, transforms
from sklearn.metrics import roc_auc_score
import numbers
import random
def load_save_noise(f, noise_shape):
if os.path.isfile(f):
print('loading noise from %s' % f)
noises = torch.load(f)
else:
noises = torch.randn(noise_shape, dtype=torch.float)
# np.save(f, noises.numpy())
torch.save(noises, f)
return noises
def list_to_torch(data):
for i in range(len(data)):
if data[i] is None:
continue
elif isinstance(data[i], np.ndarray):
if data[i].dtype == np.bool:
data[i] = data[i].astype(np.float32)
data[i] = torch.from_numpy(data[i]).float()
elif isinstance(data[i], list):
data[i] = list_to_torch(data[i])
return data
def data_to_device(data, device):
if isinstance(data, dict):
keys = list(data.keys())
else:
keys = range(len(data))
for i in keys:
if isinstance(data[i], list) or isinstance(data[i], dict):
data[i] = data_to_device(data[i], device)
else:
if isinstance(data[i], torch.Tensor):
try:
data[i] = data[i].to(device)
except:
print('error', i, data[i], type(data[i]))
raise
return data
def count_correct(output, target, N_nodes=None, N_nodes_min=0, N_nodes_max=25):
if output.shape[1] == 1:
# Regression
pred = output.round().long()
else:
# Classification
pred = output.max(1, keepdim=True)[1]
target = target.long().squeeze().cpu() # for older pytorch
pred = pred.squeeze().cpu() # for older pytorch
if N_nodes is not None:
idx = (N_nodes >= N_nodes_min) & (N_nodes <= N_nodes_max)
if idx.sum() > 0:
correct = pred[idx].eq(target[idx]).sum().item()
for lbl in torch.unique(target, sorted=True):
idx_lbl = target[idx] == lbl
eq = (pred[idx][idx_lbl] == target[idx][idx_lbl]).float()
print('lbl: {}, avg acc: {:2.2f}% ({}/{})'.format(lbl, 100 * eq.mean(), int(eq.sum()),
int(idx_lbl.float().sum())))
eq = (pred[idx] == target[idx]).float()
print('{} <= N_nodes <= {} (min={}, max={}), avg acc: {:2.2f}% ({}/{})'.format(N_nodes_min,
N_nodes_max,
N_nodes[idx].min(),
N_nodes[idx].max(),
100 * eq.mean(),
int(eq.sum()), int(idx.sum())))
else:
correct = 0
print('no graphs with nodes >= {} and <= {}'.format(N_nodes_min, N_nodes_max))
else:
correct = pred.eq(target).sum().item()
return correct
def attn_AUC(alpha_GT, alpha):
auc = []
if len(alpha) > 0 and alpha_GT is not None and len(alpha_GT) > 0:
for layer in alpha:
alpha_gt = np.concatenate([a.flatten() for a in alpha_GT[layer]]) > 0
if len(np.unique(alpha_gt)) <= 1:
print('Only one class ({}) present in y_true. ROC AUC score is not defined in that case.'.format(np.unique(alpha_gt)))
auc.append(np.nan)
else:
auc.append(100 * roc_auc_score(y_true=alpha_gt,
y_score=np.concatenate([a.flatten() for a in alpha[layer]])))
return auc
def stats(arr):
return np.mean(arr), np.std(arr), np.min(arr), np.max(arr)
def normalize(x, eps=1e-7):
return x / (x.sum() + eps)
def normalize_batch(x, dim=1, eps=1e-7):
return x / (x.sum(dim=dim, keepdim=True) + eps)
def normalize_zero_one(im, eps=1e-7):
m1 = im.min()
m2 = im.max()
return (im - m1) / (m2 - m1 + eps)
def mse_loss(target, output, reduction='mean', reduce=None):
loss = (target.float().squeeze() - output.float().squeeze()) ** 2
if reduce is None:
if reduction == 'mean':
return torch.mean(loss)
elif reduction == 'sum':
return torch.sum(loss)
elif reduction == 'none':
return loss
else:
NotImplementedError(reduction)
elif not reduce:
return loss
else:
NotImplementedError('use reduction if reduce=True')
def shuffle_nodes(batch):
x, A, mask, labels, params_dict = batch
for b in range(x.shape[0]):
idx = np.random.permutation(x.shape[1])
x[b] = x[b, idx]
A[b] = A[b, :, idx][idx, :]
mask[b] = mask[b, idx]
if 'node_attn' in params_dict:
params_dict['node_attn'][b] = params_dict['node_attn'][b, idx]
return [x, A, mask, labels, params_dict]
def copy_batch(data):
data_cp = []
for i in range(len(data)):
if isinstance(data[i], dict):
data_cp.append({key: data[i][key].clone() for key in data[i]})
else:
data_cp.append(data[i].clone())
return data_cp
def sanity_check(model, data):
with torch.no_grad():
output1 = model(copy_batch(data))[0]
output2 = model(shuffle_nodes(copy_batch(data)))[0]
if not torch.allclose(output1, output2, rtol=1e-02, atol=1e-03):
print('WARNING: model outputs different depending on the nodes order', (torch.norm(output1 - output2),
torch.max(output1 - output2),
torch.max(output1),
torch.max(output2)))
print('model is checked for nodes shuffling')
def set_seed(seed, seed_data=None):
random.seed(seed) # for some libraries
rnd = np.random.RandomState(seed)
if seed_data is not None:
rnd_data = np.random.RandomState(seed_data)
else:
rnd_data = rnd
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
return rnd, rnd_data
def compute_feature_stats(model, train_loader, device, n_batches=100):
print('computing mean and std of input features')
model.eval()
x = []
with torch.no_grad():
for batch_idx, data in enumerate(train_loader):
x.append(data[0].data.cpu().numpy()) # B,N,F
if batch_idx > n_batches:
break
x = np.concatenate(x, axis=1).reshape(-1, x[0].shape[-1])
print('features shape loaded', x.shape)
mn = x.mean(axis=0, keepdims=True)
sd = x.std(axis=0, keepdims=True)
print('mn', mn)
print('std', sd)
sd[sd < 1e-2] = 1 # to prevent dividing by a small number
print('corrected (non zeros) std', sd)#.data.cpu().numpy())
mn = torch.from_numpy(mn).float().to(device).unsqueeze(0)
sd = torch.from_numpy(sd).float().to(device).unsqueeze(0)
return mn, sd
def copy_data(data, idx):
data_new = {}
for key in data:
if key == 'Max_degree':
data_new[key] = data[key]
print(key, data_new[key])
else:
data_new[key] = copy.deepcopy([data[key][i] for i in idx])
if key in ['graph_labels', 'N_edges']:
data_new[key] = np.array(data_new[key], np.int32)
print(key, len(data_new[key]))
return data_new
def concat_data(data):
data_new = {}
for key in data[0]:
if key == 'Max_degree':
data_new[key] = np.max(np.array([ d[key] for d in data ]))
print(key, data_new[key])
else:
if key in ['graph_labels', 'N_edges']:
data_new[key] = np.concatenate([ d[key] for d in data ])
else:
lst = []
for d in data:
lst.extend(d[key])
data_new[key] = lst
print(key, len(data_new[key]))
return data_new
| 8,522 | 33.930328 | 134 | py |
graph_attention_pool | graph_attention_pool-master/attention_pooling.py | import numpy as np
import torch
import torch.sparse
import torch.nn as nn
import torch.nn.functional as F
from utils import *
class AttentionPooling(nn.Module):
'''
Graph pooling layer implementing top-k and threshold-based pooling.
'''
def __init__(self,
in_features, # feature dimensionality in the current graph layer
in_features_prev, # feature dimensionality in the previous graph layer
pool_type,
pool_arch,
large_graph,
attn_gnn=None,
kl_weight=None,
drop_nodes=True,
init='normal',
scale=None,
debug=False):
super(AttentionPooling, self).__init__()
self.pool_type = pool_type
self.pool_arch = pool_arch
self.large_graph = large_graph
self.kl_weight = kl_weight
self.proj = None
self.drop_nodes = drop_nodes
self.is_topk = self.pool_type[2].lower() == 'topk'
self.scale =scale
self.init = init
self.debug = debug
self.clamp_value = 60
self.torch = torch.__version__
if self.is_topk:
self.topk_ratio = float(self.pool_type[3]) # r
assert self.topk_ratio > 0 and self.topk_ratio <= 1, ('invalid top-k ratio', self.topk_ratio, self.pool_type)
else:
self.threshold = float(self.pool_type[3]) # \tilde{alpha}
assert self.threshold >= 0 and self.threshold <= 1, ('invalid pooling threshold', self.threshold, self.pool_type)
if self.pool_type[1] in ['unsup', 'sup']:
assert self.pool_arch not in [None, 'None'], self.pool_arch
n_in = in_features_prev if self.pool_arch[1] == 'prev' else in_features
if self.pool_arch[0] == 'fc':
p_optimal = torch.from_numpy(np.pad(np.array([0, 1]), (0, n_in - 2), 'constant')).float().view(1, n_in)
if len(self.pool_arch) == 2:
# single layer projection
self.proj = nn.Linear(n_in, 1, bias=False)
p = self.proj.weight.data
if scale is not None:
if init == 'normal':
p = torch.randn(n_in) # std=1, seed 9753 for optimal initialization
elif init == 'uniform':
p = torch.rand(n_in) * 2 - 1 # [-1,1]
else:
raise NotImplementedError(init)
p *= scale # multiply std for normal or change range for uniform
else:
print('Default PyTorch init is used for layer %s, std=%.3f' % (str(p.shape), p.std()))
self.proj.weight.data = p.view_as(self.proj.weight.data)
p = self.proj.weight.data.view(1, n_in)
else:
# multi-layer projection
filters = list(map(int, self.pool_arch[2:]))
self.proj = []
for layer in range(len(filters)):
self.proj.append(nn.Linear(in_features=n_in if layer == 0 else filters[layer - 1],
out_features=filters[layer]))
if layer == 0:
p = self.proj[0].weight.data
if scale is not None:
if init == 'normal':
p = torch.randn(filters[layer], n_in)
elif init == 'uniform':
p = torch.rand(filters[layer], n_in) * 2 - 1 # [-1,1]
else:
raise NotImplementedError(init)
p *= scale # multiply std for normal or change range for uniform
else:
print('Default PyTorch init is used for layer %s, std=%.3f' % (str(p.shape), p.std()))
self.proj[0].weight.data = p.view_as(self.proj[0].weight.data)
p = self.proj[0].weight.data.view(-1, n_in)
self.proj.append(nn.ReLU(True))
self.proj.append(nn.Linear(filters[-1], 1))
self.proj = nn.Sequential(*self.proj)
# Compute cosine similarity with the optimal vector and print values
# ignore the last dimension, because it does not receive gradients during training
# n_in=4 for colors-3 because some of our test subsets have 4 dimensional features
cos_sim = self.cosine_sim(p[:, :-1], p_optimal[:, :-1])
if p.shape[0] == 1:
print('p values', p[0].data.cpu().numpy())
print('cos_sim', cos_sim.item())
else:
for fn in [torch.max, torch.min, torch.mean, torch.std]:
print('cos_sim', fn(cos_sim).item())
elif self.pool_arch[0] == 'gnn':
self.proj = attn_gnn(n_in)
else:
raise ValueError('invalid pooling layer architecture', self.pool_arch)
elif self.pool_type[1] == 'gt':
if not self.is_topk and self.threshold > 0:
print('For ground truth attention threshold should be 0, but it is %f' % self.threshold)
else:
raise NotImplementedError(self.pool_type[1])
def __repr__(self):
return 'AttentionPooling(pool_type={}, pool_arch={}, topk={}, kl_weight={}, init={}, scale={}, proj={})'.format(
self.pool_type,
self.pool_arch,
self.is_topk,
self.kl_weight,
self.init,
self.scale,
self.proj)
def cosine_sim(self, a, b):
return torch.mm(a, b.t()) / (torch.norm(a, dim=1, keepdim=True) * torch.norm(b, dim=1, keepdim=True))
def mask_out(self, x, mask):
return x.view_as(mask) * mask
def drop_nodes_edges(self, x, A, mask):
N_nodes = torch.sum(mask, dim=1).long() # B
N_nodes_max = N_nodes.max()
idx = None
if N_nodes_max > 0:
B, N, C = x.shape
# Drop nodes
mask, idx = torch.topk(mask.byte(), N_nodes_max, dim=1, largest=True, sorted=False)
x = torch.gather(x, dim=1, index=idx.unsqueeze(2).expand(-1, -1, C))
# Drop edges
A = torch.gather(A, dim=1, index=idx.unsqueeze(2).expand(-1, -1, N))
A = torch.gather(A, dim=2, index=idx.unsqueeze(1).expand(-1, N_nodes_max, -1))
return x, A, mask, N_nodes, idx
def forward(self, data):
KL_loss = None
x, A, mask, _, params_dict = data[:5]
mask_float = mask.float()
N_nodes_float = params_dict['N_nodes'].float()
B, N, C = x.shape
A = A.view(B, N, N)
alpha_gt = None
if 'node_attn' in params_dict:
if not isinstance(params_dict['node_attn'], list):
params_dict['node_attn'] = [params_dict['node_attn']]
alpha_gt = params_dict['node_attn'][-1].view(B, N)
if 'node_attn_eval' in params_dict:
if not isinstance(params_dict['node_attn_eval'], list):
params_dict['node_attn_eval'] = [params_dict['node_attn_eval']]
if (self.pool_type[1] == 'gt' or (self.pool_type[1] == 'sup' and self.training)) and alpha_gt is None:
raise ValueError('ground truth node attention values node_attn required for %s' % self.pool_type)
if self.pool_type[1] in ['unsup', 'sup']:
attn_input = data[-1] if self.pool_arch[1] == 'prev' else x.clone()
if self.pool_arch[0] == 'fc':
alpha_pre = self.proj(attn_input).view(B, N)
else:
# to support python2
input = [attn_input]
input.extend(data[1:])
alpha_pre = self.proj(input)[0].view(B, N)
# softmax with masking out dummy nodes
alpha_pre = torch.clamp(alpha_pre, -self.clamp_value, self.clamp_value)
alpha = normalize_batch(self.mask_out(torch.exp(alpha_pre), mask_float).view(B, N))
if self.pool_type[1] == 'sup' and self.training:
if self.torch.find('1.') == 0:
KL_loss_per_node = self.mask_out(F.kl_div(torch.log(alpha + 1e-14), alpha_gt, reduction='none'),
mask_float.view(B,N))
else:
KL_loss_per_node = self.mask_out(F.kl_div(torch.log(alpha + 1e-14), alpha_gt, reduce=False),
mask_float.view(B, N))
KL_loss = self.kl_weight * torch.mean(KL_loss_per_node.sum(dim=1) / (N_nodes_float + 1e-7)) # mean over nodes, then mean over batches
else:
alpha = alpha_gt
x = x * alpha.view(B, N, 1)
if self.large_graph:
# For large graphs during training, all alpha values can be very small hindering training
x = x * N_nodes_float.view(B, 1, 1)
if self.is_topk:
N_remove = torch.round(N_nodes_float * (1 - self.topk_ratio)).long() # number of nodes to be removed for each graph
idx = torch.sort(alpha, dim=1, descending=False)[1] # indices of alpha in ascending order
mask = mask.clone().view(B, N)
for b in range(B):
idx_b = idx[b, mask[b, idx[b]]] # take indices of non-dummy nodes for current data example
mask[b, idx_b[:N_remove[b]]] = 0
else:
mask = (mask & (alpha.view_as(mask) > self.threshold)).view(B, N)
if self.drop_nodes:
x, A, mask, N_nodes_pooled, idx = self.drop_nodes_edges(x, A, mask)
if idx is not None and 'node_attn' in params_dict:
# update ground truth (or weakly labeled) attention for a reduced graph
params_dict['node_attn'].append(normalize_batch(self.mask_out(torch.gather(alpha_gt, dim=1, index=idx), mask.float())))
if idx is not None and 'node_attn_eval' in params_dict:
# update ground truth (or weakly labeled) attention for a reduced graph
params_dict['node_attn_eval'].append(normalize_batch(self.mask_out(torch.gather(params_dict['node_attn_eval'][-1], dim=1, index=idx), mask.float())))
else:
N_nodes_pooled = torch.sum(mask, dim=1).long() # B
if 'node_attn' in params_dict:
params_dict['node_attn'].append((self.mask_out(params_dict['node_attn'][-1], mask.float())))
if 'node_attn_eval' in params_dict:
params_dict['node_attn_eval'].append((self.mask_out(params_dict['node_attn_eval'][-1], mask.float())))
params_dict['N_nodes'] = N_nodes_pooled
mask_matrix = mask.unsqueeze(2) & mask.unsqueeze(1)
A = A * mask_matrix.float() # or A[~mask_matrix] = 0
# Add additional losses regularizing the model
if KL_loss is not None:
if 'reg' not in params_dict:
params_dict['reg'] = []
params_dict['reg'].append(KL_loss)
# Keep attention coefficients for evaluation
for key, value in zip(['alpha', 'mask'], [alpha, mask]):
if key not in params_dict:
params_dict[key] = []
params_dict[key].append(value.detach())
if self.debug and alpha_gt is not None:
idx_correct_pool = (alpha_gt > 0)
idx_correct_drop = (alpha_gt == 0)
alpha_correct_pool = alpha[idx_correct_pool].sum() / N_nodes_float.sum()
alpha_correct_drop = alpha[idx_correct_drop].sum() / N_nodes_float.sum()
ratio_avg = (N_nodes_pooled.float() / N_nodes_float).mean()
for key, values in zip(['alpha_correct_pool_debug', 'alpha_correct_drop_debug', 'ratio_avg_debug'],
[alpha_correct_pool, alpha_correct_drop, ratio_avg]):
if key not in params_dict:
params_dict[key] = []
params_dict[key].append(values.detach())
output = [x, A, mask]
output.extend(data[3:])
return output
| 12,420 | 48.486056 | 165 | py |
graph_attention_pool | graph_attention_pool-master/train_test.py | import time
import torch
from torch.utils.data import DataLoader
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from chebygin import *
from utils import *
from graphdata import *
import torch.multiprocessing as mp
import multiprocessing
try:
import ax
from ax.service.managed_loop import optimize
except Exception as e:
print('AX is not available: %s' % str(e))
def set_pool(pool_thresh, args_pool):
pool = copy.deepcopy(args_pool)
for i, s in enumerate(pool):
try:
thresh = float(s)
pool[i] = str(pool_thresh)
except:
continue
return pool
def train_evaluate(datareader, args, collate_fn, loss_fn, feature_stats, parameterization, folds=10, threads=5):
print('parameterization', parameterization)
pool_thresh, kl_weight = parameterization['pool'], parameterization['kl_weight']
pool = args.pool
if args.tune_init:
scale, init = parameterization['scale'], parameterization['init']
else:
scale, init = args.scale, args.init
n_hidden_attn, layer = parameterization['n_hidden_attn'], 1
if layer == 0:
pool = copy.deepcopy(args.pool)
del pool[3]
pool = set_pool(pool_thresh, pool)
manager = multiprocessing.Manager()
val_acc = manager.dict()
assert threads <= folds, (threads, folds)
n_it = int(np.ceil(float(folds) / threads))
for i in range(n_it):
processes = []
if threads <= 1:
single_job(i * threads, datareader, args, collate_fn, loss_fn, pool, kl_weight,
feature_stats, val_acc, scale=scale, init=init, n_hidden_attn=n_hidden_attn)
else:
for fold in range(threads):
p = mp.Process(target=single_job,
args=(i * threads + fold, datareader, args, collate_fn, loss_fn, pool, kl_weight,
feature_stats, val_acc, scale, init, n_hidden_attn))
p.start()
processes.append(p)
for p in processes:
p.join()
print(val_acc)
val_acc = list(val_acc.values())
print('average and std over {} folds: {} +- {}'.format(folds, np.mean(val_acc), np.std(val_acc)))
metric = np.mean(val_acc) - np.std(val_acc) # large std is considered bad
print('metric: avg acc - std: {}'.format(metric))
return metric
def ax_optimize(datareader, args, collate_fn, loss_fn, feature_stats, folds=10, threads=5, n_trials=30):
parameters = [
{"name": "pool", "type": "range", "bounds": [1e-4, 2e-2], "log_scale": False},
{"name": "kl_weight", "type": "range", "bounds": [0.1, 10.], "log_scale": False},
{"name": "n_hidden_attn", "type": "choice", "values": [0, 32]} # hidden units in the attention layer (0: no hidden layer)
]
if args.tune_init:
parameters.extend([{"name": "scale", "type": "range", "bounds": [0.1, 2.], "log_scale": False},
{"name": "init", "type": "choice", "values": ['normal', 'uniform']}])
best_parameters, values, experiment, model = optimize(
parameters=parameters,
evaluation_function=lambda parameterization: train_evaluate(datareader,
args, collate_fn, loss_fn,
feature_stats, parameterization, folds=folds,
threads=threads),
total_trials=n_trials,
objective_name='accuracy',
)
print('best_parameters', best_parameters)
print('values', values)
return best_parameters
def train(model, train_loader, optimizer, epoch, args, loss_fn, feature_stats=None, log=True):
model.train()
optimizer.zero_grad()
n_samples, correct, train_loss = 0, 0, 0
alpha_pred, alpha_GT = {}, {}
start = time.time()
# with torch.autograd.set_detect_anomaly(True):
for batch_idx, data in enumerate(train_loader):
data = data_to_device(data, args.device)
if feature_stats is not None:
data[0] = (data[0] - feature_stats[0]) / feature_stats[1]
if batch_idx == 0 and epoch <= 1:
sanity_check(model.eval(), data) # to disable the effect of dropout or other regularizers that can change behavior from batch to batch
model.train()
optimizer.zero_grad()
mask = [data[2].view(len(data[2]), -1)]
output, other_outputs = model(data)
other_losses = other_outputs['reg'] if 'reg' in other_outputs else []
alpha = other_outputs['alpha'] if 'alpha' in other_outputs else []
mask.extend(other_outputs['mask'] if 'mask' in other_outputs else [])
targets = data[3]
loss = loss_fn(output, targets)
for l in other_losses:
loss += l
loss_item = loss.item()
train_loss += loss_item
n_samples += len(targets)
loss.backward() # accumulates gradient
optimizer.step() # update weights
time_iter = time.time() - start
correct += count_correct(output.detach(), targets.detach())
update_attn(data, alpha, alpha_pred, alpha_GT, mask)
acc = 100. * correct / n_samples # average over all examples in the dataset
train_loss_avg = train_loss / (batch_idx + 1)
if log and ((batch_idx > 0 and batch_idx % args.log_interval == 0) or batch_idx == len(train_loader) - 1):
print('Train set (epoch {}): [{}/{} ({:.0f}%)]\tLoss: {:.4f} (avg: {:.4f}), other losses: {}\tAcc metric: {}/{} ({:.2f}%)\t AttnAUC: {}\t avg sec/iter: {:.4f}'.format(
epoch, n_samples, len(train_loader.dataset), 100. * n_samples / len(train_loader.dataset),
loss_item, train_loss_avg, ['%.4f' % l.item() for l in other_losses],
correct, n_samples, acc, ['%.2f' % a for a in attn_AUC(alpha_GT, alpha_pred)],
time_iter / (batch_idx + 1)))
assert n_samples == len(train_loader.dataset), (n_samples, len(train_loader.dataset))
return train_loss, acc
def test(model, test_loader, epoch, loss_fn, split, args, feature_stats=None, noises=None,
img_noise_level=None, eval_attn=False, alpha_WS_name=''):
model.eval()
n_samples, correct, test_loss = 0, 0, 0
pred, targets, N_nodes = [], [], []
start = time.time()
alpha_pred, alpha_GT = {}, {}
if eval_attn:
alpha_pred[0] = []
print('testing with evaluation of attention: takes longer time')
if args.debug:
debug_data = {}
with torch.no_grad():
for batch_idx, data in enumerate(test_loader):
data = data_to_device(data, args.device)
if feature_stats is not None:
assert feature_stats[0].shape[2] == feature_stats[1].shape[2] == data[0].shape[2], \
(feature_stats[0].shape, feature_stats[1].shape, data[0].shape)
data[0] = (data[0] - feature_stats[0]) / feature_stats[1]
if batch_idx == 0 and epoch <= 1:
sanity_check(model, data)
if noises is not None:
noise = noises[n_samples:n_samples + len(data[0])].to(args.device) * img_noise_level
if len(noise.shape) == 2:
noise = noise.unsqueeze(2)
data[0][:, :, :3] = data[0][:, :, :3] + noise
mask = [data[2].view(len(data[2]), -1)]
N_nodes.append(data[4]['N_nodes'].detach())
targets.append(data[3].detach())
output, other_outputs = model(data)
other_losses = other_outputs['reg'] if 'reg' in other_outputs else []
alpha = other_outputs['alpha'] if 'alpha' in other_outputs else []
mask.extend(other_outputs['mask'] if 'mask' in other_outputs else [])
if args.debug:
for key in other_outputs:
if key.find('debug') >= 0:
if key not in debug_data:
debug_data[key] = []
debug_data[key].append([d.data.cpu().numpy() for d in other_outputs[key]])
if args.torch.find('1.') == 0:
loss = loss_fn(output, data[3], reduction='sum')
else:
loss = loss_fn(output, data[3], reduce=False).sum()
for l in other_losses:
loss += l
test_loss += loss.item()
pred.append(output.detach())
update_attn(data, alpha, alpha_pred, alpha_GT, mask)
if eval_attn:
assert len(alpha) == 0, ('invalid mode, eval_attn should be false for this type of pooling')
alpha_pred[0].extend(attn_heatmaps(model, args.device, data, output.data, test_loader.batch_size, constant_mask=args.dataset=='mnist'))
n_samples += len(data[0])
if eval_attn and (n_samples % 100 == 0 or n_samples == len(test_loader.dataset)):
print('{}/{} samples processed'.format(n_samples, len(test_loader.dataset)))
assert n_samples == len(test_loader.dataset), (n_samples, len(test_loader.dataset))
pred = torch.cat(pred)
targets = torch.cat(targets)
N_nodes = torch.cat(N_nodes)
if args.dataset.find('colors') >= 0:
correct = count_correct(pred, targets, N_nodes=N_nodes, N_nodes_min=0, N_nodes_max=25)
if pred.shape[0] > 2500:
correct += count_correct(pred[2500:5000], targets[2500:5000], N_nodes=N_nodes[2500:5000], N_nodes_min=26, N_nodes_max=200)
correct += count_correct(pred[5000:], targets[5000:], N_nodes=N_nodes[5000:], N_nodes_min=26, N_nodes_max=200)
elif args.dataset == 'triangles':
correct = count_correct(pred, targets, N_nodes=N_nodes, N_nodes_min=0, N_nodes_max=25)
if pred.shape[0] > 5000:
correct += count_correct(pred, targets, N_nodes=N_nodes, N_nodes_min=26, N_nodes_max=100)
else:
correct = count_correct(pred, targets, N_nodes=N_nodes, N_nodes_min=0, N_nodes_max=1e5)
time_iter = time.time() - start
test_loss_avg = test_loss / n_samples
acc = 100. * correct / n_samples # average over all examples in the dataset
print('{} set (epoch {}): Avg loss: {:.4f}, Acc metric: {}/{} ({:.2f}%)\t AttnAUC: {}\t avg sec/iter: {:.4f}\n'.format(
split.capitalize(), epoch, test_loss_avg, correct, n_samples, acc,
['%.2f' % a for a in attn_AUC(alpha_GT, alpha_pred)], time_iter / (batch_idx + 1)))
if args.debug:
for key in debug_data:
for layer in range(len(debug_data[key][0])):
print('{} (layer={}): {:.5f}'.format(key, layer, np.mean([d[layer] for d in debug_data[key]])))
if eval_attn:
alpha_pred = alpha_pred[0]
if args.results in [None, 'None', ''] or alpha_WS_name == '':
print('skip saving alpha values, invalid results dir (%s) or alpha_WS_name (%s)' % (args.results, alpha_WS_name))
else:
file_path = pjoin(args.results, '%s_alpha_WS_%s_seed%d_%s.pkl' % (args.dataset, split, args.seed, alpha_WS_name))
if os.path.isfile(file_path):
print('WARNING: file %s exists and will be overwritten' % file_path)
with open(file_path, 'wb') as f:
pickle.dump(alpha_pred, f, protocol=2)
return test_loss, acc, alpha_pred, pred
def update_attn(data, alpha, alpha_pred, alpha_GT, mask):
key = 'node_attn_eval'
for layer in range(len(mask)):
mask[layer] = mask[layer].data.cpu().numpy() > 0
if key in data[4]:
if not isinstance(data[4][key], list):
data[4][key] = [data[4][key]]
for layer in range(len(data[4][key])):
if layer not in alpha_GT:
alpha_GT[layer] = []
# print(key, layer, len(data[4][key]), len(mask))
alpha_GT[layer].extend(masked_alpha(data[4][key][layer].data.cpu().numpy(), mask[layer]))
for layer in range(len(alpha)):
if layer not in alpha_pred:
alpha_pred[layer] = []
alpha_pred[layer].extend(masked_alpha(alpha[layer].data.cpu().numpy(), mask[layer]))
def masked_alpha(alpha, mask):
alpha_lst = []
for i in range(len(alpha)):
# print('gt', len(alpha), alpha[i].shape, mask[i].shape, alpha[i][mask[i] > 0].shape, mask[i].sum(), mask[i].min(), mask[i].max(), mask[i].dtype)
alpha_lst.append(alpha[i][mask[i]])
return alpha_lst
def attn_heatmaps(model, device, data, output_org, batch_size=1, constant_mask=False):
labels = torch.argmax(output_org, dim=1)
B, N_nodes_max, C = data[0].shape # N_nodes should be the same in the batch
alpha_WS = []
if N_nodes_max > 1000:
print('WARNING: graph is too large (%d nodes) and not supported by this function (evaluation will be incorrect for graphs in this batch).' % N_nodes_max)
for b in range(B):
n = data[2][b].sum().item()
alpha_WS.append(np.zeros((1, n)) + 1. / n)
return alpha_WS
if constant_mask:
mask = torch.ones(N_nodes_max, N_nodes_max - 1).to(device)
# Indices of nodes such that in each row one index (i.e. one node) is removed
node_ids = torch.arange(start=0, end=N_nodes_max, device=device).view(1, -1).repeat(N_nodes_max, 1)
node_ids[np.diag_indices(N_nodes_max, 2)] = -1
node_ids = node_ids[node_ids >= 0].view(N_nodes_max, N_nodes_max - 1).long()
with torch.no_grad():
for b in range(B):
x = torch.gather(data[0][b].unsqueeze(0).expand(N_nodes_max, -1, -1), dim=1, index=node_ids.unsqueeze(2).expand(-1, -1, C))
if not constant_mask:
mask = torch.gather(data[2][b].unsqueeze(0).expand(N_nodes_max, -1), dim=1, index=node_ids)
A = torch.gather(data[1][b].unsqueeze(0).expand(N_nodes_max, -1, -1), dim=1, index=node_ids.unsqueeze(2).expand(-1, -1, N_nodes_max))
A = torch.gather(A, dim=2, index=node_ids.unsqueeze(1).expand(-1, N_nodes_max - 1, -1))
output = torch.zeros(N_nodes_max).to(device)
n_chunks = int(np.ceil(N_nodes_max / float(batch_size)))
for i in range(n_chunks):
idx = np.arange(i * batch_size, (i + 1) * batch_size) if i < n_chunks - 1 else np.arange(i * batch_size, N_nodes_max)
output[idx] = model([x[idx], A[idx], mask[idx], None, {}])[0][:, labels[b]].data
alpha = torch.abs(output - output_org[b, labels[b]]).view(1, N_nodes_max) #* mask_org[b].view(1, N_nodes_max)
if not constant_mask:
alpha = alpha[data[2][b].view(1, N_nodes_max)]
alpha_WS.append(normalize(alpha).data.cpu().numpy())
return alpha_WS
def save_checkpoint(model, scheduler, optimizer, args, epoch):
if args.results in [None, 'None']:
print('skip saving checkpoint, invalid results dir: %s' % args.results)
return
file_path = '%s/checkpoint_%s_%s_epoch%d_seed%07d.pth.tar' % (args.results, args.dataset, args.experiment_ID, epoch, args.seed)
try:
print('saving the model to %s' % file_path)
state = {
'epoch': epoch,
'args': args,
'state_dict': model.state_dict(),
'scheduler': scheduler.state_dict(),
'optimizer': optimizer.state_dict(),
}
if os.path.isfile(file_path):
print('WARNING: file %s exists and will be overwritten' % file_path)
torch.save(state, file_path)
except Exception as e:
print('error saving the model', e)
def load_checkpoint(model, optimizer, scheduler, file_path):
print('loading the model from %s' % file_path)
state = torch.load(file_path)
model.load_state_dict(state['state_dict'])
optimizer.load_state_dict(state['optimizer'])
scheduler.load_state_dict(state['scheduler'])
print('loading from epoch %d done' % state['epoch'])
return state['epoch'] + 1 # +1 because we already finished training for this epoch
def create_model_optimizer(in_features, out_features, pool, kl_weight, args, scale=None, init=None, n_hidden_attn=None):
set_seed(args.seed, seed_data=None)
model = ChebyGIN(in_features=in_features,
out_features=out_features,
filters=args.filters,
K=args.filter_scale,
n_hidden=args.n_hidden,
aggregation=args.aggregation,
dropout=args.dropout,
readout=args.readout,
pool=pool,
pool_arch=args.pool_arch if n_hidden_attn in [None, 0] else args.pool_arch[:2] + ['%d' % n_hidden_attn],
large_graph=args.dataset.lower() == 'mnist',
kl_weight=float(kl_weight),
init=args.init if init is None else init,
scale=args.scale if scale is None else scale,
debug=args.debug)
print(model)
# Compute the total number of trainable parameters
print('model capacity: %d' %
np.sum([np.prod(p.size()) if p.requires_grad else 0 for p in model.parameters()]))
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wdecay, betas=(0.5, 0.999))
scheduler = lr_scheduler.MultiStepLR(optimizer, args.lr_decay_step, gamma=0.1)
epoch = 1
if args.resume not in [None, 'None']:
epoch = load_checkpoint(model, optimizer, scheduler, args.resume)
if epoch < args.epochs + 1:
print('resuming training for epoch %d' % epoch)
model.to(args.device)
return epoch, model, optimizer, scheduler
def single_job(fold, datareader, args, collate_fn, loss_fn, pool, kl_weight, feature_stats, val_acc,
scale=None, init=None, n_hidden_attn=None):
set_seed(args.seed, seed_data=None)
wsup = args.pool[1] == 'sup'
train_loader = DataLoader(GraphData(datareader, fold, 'train'), batch_size=args.batch_size, shuffle=True,
num_workers=args.threads, collate_fn=collate_fn)
val_loader = DataLoader(GraphData(datareader, fold, 'val'), batch_size=args.test_batch_size, shuffle=False,
num_workers=args.threads, collate_fn=collate_fn)
start_epoch, model, optimizer, scheduler = create_model_optimizer(train_loader.dataset.num_features,
train_loader.dataset.num_classes,
None if wsup else pool, kl_weight, args,
scale=scale, init=init, n_hidden_attn=n_hidden_attn)
for epoch in range(start_epoch, args.epochs + 1):
scheduler.step()
train(model, train_loader, optimizer, epoch, args, loss_fn, feature_stats, log=False)
if wsup:
train_loader_test = DataLoader(GraphData(datareader, fold, 'train'), batch_size=args.test_batch_size, shuffle=False,
num_workers=args.threads, collate_fn=collate_fn)
train_loss, train_acc, attn_WS = test(model, train_loader_test, epoch, loss_fn, 'train', args, feature_stats, eval_attn=True)[:3] # test_loss, acc, alpha_pred, pred
train_loader = DataLoader(GraphData(datareader, fold, 'train', attn_labels=attn_WS),
batch_size=args.batch_size, shuffle=True,
num_workers=args.threads, collate_fn=collate_fn)
val_loader = DataLoader(GraphData(datareader, fold, 'val'), batch_size=args.test_batch_size, shuffle=False,
num_workers=args.threads, collate_fn=collate_fn)
start_epoch, model, optimizer, scheduler = create_model_optimizer(train_loader.dataset.num_features,
train_loader.dataset.num_classes,
pool, kl_weight, args,
scale=scale, init=init, n_hidden_attn=n_hidden_attn)
for epoch in range(start_epoch, args.epochs + 1):
scheduler.step()
train(model, train_loader, optimizer, epoch, args, loss_fn, feature_stats, log=False)
acc = test(model, val_loader, epoch, loss_fn, 'val', args, feature_stats)[1]
val_acc[fold] = acc
def cross_validation(datareader, args, collate_fn, loss_fn, pool, kl_weight, feature_stats, n_hidden_attn=None, folds=10, threads=5):
print('%d-fold cross-validation' % folds)
manager = multiprocessing.Manager()
val_acc = manager.dict()
assert threads <= folds, (threads, folds)
n_it = int(np.ceil(float(folds) / threads))
for i in range(n_it):
processes = []
if threads <= 1:
single_job(i * threads, datareader, args, collate_fn, loss_fn, pool, kl_weight,
feature_stats, val_acc, scale=args.scale, init=args.init, n_hidden_attn=n_hidden_attn)
else:
for fold in range(threads):
p = mp.Process(target=single_job, args=(i * threads + fold, datareader, args, collate_fn, loss_fn, pool, kl_weight,
feature_stats, val_acc, args.scale, args.init, n_hidden_attn))
p.start()
processes.append(p)
for p in processes:
p.join()
print(val_acc)
val_acc = list(val_acc.values())
print('average and std over {} folds: {} +- {}'.format(folds, np.mean(val_acc), np.std(val_acc)))
metric = np.mean(val_acc) - np.std(val_acc)
print('metric: avg acc - std: {}'.format(metric))
return metric
| 21,879 | 47.087912 | 179 | py |
option-critic-pytorch | option-critic-pytorch-master/main.py | import numpy as np
import argparse
import torch
from copy import deepcopy
from option_critic import OptionCriticFeatures, OptionCriticConv
from option_critic import critic_loss as critic_loss_fn
from option_critic import actor_loss as actor_loss_fn
from experience_replay import ReplayBuffer
from utils import make_env, to_tensor
from logger import Logger
import time
parser = argparse.ArgumentParser(description="Option Critic PyTorch")
parser.add_argument('--env', default='CartPole-v0', help='ROM to run')
parser.add_argument('--optimal-eps', type=float, default=0.05, help='Epsilon when playing optimally')
parser.add_argument('--frame-skip', default=4, type=int, help='Every how many frames to process')
parser.add_argument('--learning-rate',type=float, default=.0005, help='Learning rate')
parser.add_argument('--gamma', type=float, default=.99, help='Discount rate')
parser.add_argument('--epsilon-start', type=float, default=1.0, help=('Starting value for epsilon.'))
parser.add_argument('--epsilon-min', type=float, default=.1, help='Minimum epsilon.')
parser.add_argument('--epsilon-decay', type=float, default=20000, help=('Number of steps to minimum epsilon.'))
parser.add_argument('--max-history', type=int, default=10000, help=('Maximum number of steps stored in replay'))
parser.add_argument('--batch-size', type=int, default=32, help='Batch size.')
parser.add_argument('--freeze-interval', type=int, default=200, help=('Interval between target freezes.'))
parser.add_argument('--update-frequency', type=int, default=4, help=('Number of actions before each SGD update.'))
parser.add_argument('--termination-reg', type=float, default=0.01, help=('Regularization to decrease termination prob.'))
parser.add_argument('--entropy-reg', type=float, default=0.01, help=('Regularization to increase policy entropy.'))
parser.add_argument('--num-options', type=int, default=2, help=('Number of options to create.'))
parser.add_argument('--temp', type=float, default=1, help='Action distribution softmax tempurature param.')
parser.add_argument('--max_steps_ep', type=int, default=18000, help='number of maximum steps per episode.')
parser.add_argument('--max_steps_total', type=int, default=int(4e6), help='number of maximum steps to take.') # bout 4 million
parser.add_argument('--cuda', type=bool, default=True, help='Enable CUDA training (recommended if possible).')
parser.add_argument('--seed', type=int, default=0, help='Random seed for numpy, torch, random.')
parser.add_argument('--logdir', type=str, default='runs', help='Directory for logging statistics')
parser.add_argument('--exp', type=str, default=None, help='optional experiment name')
parser.add_argument('--switch-goal', type=bool, default=False, help='switch goal after 2k eps')
def run(args):
env, is_atari = make_env(args.env)
option_critic = OptionCriticConv if is_atari else OptionCriticFeatures
device = torch.device('cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
option_critic = option_critic(
in_features=env.observation_space.shape[0],
num_actions=env.action_space.n,
num_options=args.num_options,
temperature=args.temp,
eps_start=args.epsilon_start,
eps_min=args.epsilon_min,
eps_decay=args.epsilon_decay,
eps_test=args.optimal_eps,
device=device
)
# Create a prime network for more stable Q values
option_critic_prime = deepcopy(option_critic)
optim = torch.optim.RMSprop(option_critic.parameters(), lr=args.learning_rate)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
env.seed(args.seed)
buffer = ReplayBuffer(capacity=args.max_history, seed=args.seed)
logger = Logger(logdir=args.logdir, run_name=f"{OptionCriticFeatures.__name__}-{args.env}-{args.exp}-{time.ctime()}")
steps = 0 ;
if args.switch_goal: print(f"Current goal {env.goal}")
while steps < args.max_steps_total:
rewards = 0 ; option_lengths = {opt:[] for opt in range(args.num_options)}
obs = env.reset()
state = option_critic.get_state(to_tensor(obs))
greedy_option = option_critic.greedy_option(state)
current_option = 0
# Goal switching experiment: run for 1k episodes in fourrooms, switch goals and run for another
# 2k episodes. In option-critic, if the options have some meaning, only the policy-over-options
# should be finedtuned (this is what we would hope).
if args.switch_goal and logger.n_eps == 1000:
torch.save({'model_params': option_critic.state_dict(),
'goal_state': env.goal},
f'models/option_critic_seed={args.seed}_1k')
env.switch_goal()
print(f"New goal {env.goal}")
if args.switch_goal and logger.n_eps > 2000:
torch.save({'model_params': option_critic.state_dict(),
'goal_state': env.goal},
f'models/option_critic_seed={args.seed}_2k')
break
done = False ; ep_steps = 0 ; option_termination = True ; curr_op_len = 0
while not done and ep_steps < args.max_steps_ep:
epsilon = option_critic.epsilon
if option_termination:
option_lengths[current_option].append(curr_op_len)
current_option = np.random.choice(args.num_options) if np.random.rand() < epsilon else greedy_option
curr_op_len = 0
action, logp, entropy = option_critic.get_action(state, current_option)
next_obs, reward, done, _ = env.step(action)
buffer.push(obs, current_option, reward, next_obs, done)
rewards += reward
actor_loss, critic_loss = None, None
if len(buffer) > args.batch_size:
actor_loss = actor_loss_fn(obs, current_option, logp, entropy, \
reward, done, next_obs, option_critic, option_critic_prime, args)
loss = actor_loss
if steps % args.update_frequency == 0:
data_batch = buffer.sample(args.batch_size)
critic_loss = critic_loss_fn(option_critic, option_critic_prime, data_batch, args)
loss += critic_loss
optim.zero_grad()
loss.backward()
optim.step()
if steps % args.freeze_interval == 0:
option_critic_prime.load_state_dict(option_critic.state_dict())
state = option_critic.get_state(to_tensor(next_obs))
option_termination, greedy_option = option_critic.predict_option_termination(state, current_option)
# update global steps etc
steps += 1
ep_steps += 1
curr_op_len += 1
obs = next_obs
logger.log_data(steps, actor_loss, critic_loss, entropy.item(), epsilon)
logger.log_episode(steps, rewards, option_lengths, ep_steps, epsilon)
if __name__=="__main__":
args = parser.parse_args()
run(args)
| 7,063 | 47.383562 | 126 | py |
option-critic-pytorch | option-critic-pytorch-master/option_critic.py | import torch
import torch.nn as nn
from torch.distributions import Categorical, Bernoulli
from math import exp
import numpy as np
from utils import to_tensor
class OptionCriticConv(nn.Module):
def __init__(self,
in_features,
num_actions,
num_options,
temperature=1.0,
eps_start=1.0,
eps_min=0.1,
eps_decay=int(1e6),
eps_test=0.05,
device='cpu',
testing=False):
super(OptionCriticConv, self).__init__()
self.in_channels = in_features
self.num_actions = num_actions
self.num_options = num_options
self.magic_number = 7 * 7 * 64
self.device = device
self.testing = testing
self.temperature = temperature
self.eps_min = eps_min
self.eps_start = eps_start
self.eps_decay = eps_decay
self.eps_test = eps_test
self.num_steps = 0
self.features = nn.Sequential(
nn.Conv2d(self.in_channels, 32, kernel_size=8, stride=4),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1),
nn.ReLU(),
nn.modules.Flatten(),
nn.Linear(self.magic_number, 512),
nn.ReLU()
)
self.Q = nn.Linear(512, num_options) # Policy-Over-Options
self.terminations = nn.Linear(512, num_options) # Option-Termination
self.options_W = nn.Parameter(torch.zeros(num_options, 512, num_actions))
self.options_b = nn.Parameter(torch.zeros(num_options, num_actions))
self.to(device)
self.train(not testing)
def get_state(self, obs):
if obs.ndim < 4:
obs = obs.unsqueeze(0)
obs = obs.to(self.device)
state = self.features(obs)
return state
def get_Q(self, state):
return self.Q(state)
def predict_option_termination(self, state, current_option):
termination = self.terminations(state)[:, current_option].sigmoid()
option_termination = Bernoulli(termination).sample()
Q = self.get_Q(state)
next_option = Q.argmax(dim=-1)
return bool(option_termination.item()), next_option.item()
def get_terminations(self, state):
return self.terminations(state).sigmoid()
def get_action(self, state, option):
logits = state.data @ self.options_W[option] + self.options_b[option]
action_dist = (logits / self.temperature).softmax(dim=-1)
action_dist = Categorical(action_dist)
action = action_dist.sample()
logp = action_dist.log_prob(action)
entropy = action_dist.entropy()
return action.item(), logp, entropy
def greedy_option(self, state):
Q = self.get_Q(state)
return Q.argmax(dim=-1).item()
@property
def epsilon(self):
if not self.testing:
eps = self.eps_min + (self.eps_start - self.eps_min) * exp(-self.num_steps / self.eps_decay)
self.num_steps += 1
else:
eps = self.eps_test
return eps
class OptionCriticFeatures(nn.Module):
def __init__(self,
in_features,
num_actions,
num_options,
temperature=1.0,
eps_start=1.0,
eps_min=0.1,
eps_decay=int(1e6),
eps_test=0.05,
device='cpu',
testing=False):
super(OptionCriticFeatures, self).__init__()
self.in_features = in_features
self.num_actions = num_actions
self.num_options = num_options
self.device = device
self.testing = testing
self.temperature = temperature
self.eps_min = eps_min
self.eps_start = eps_start
self.eps_decay = eps_decay
self.eps_test = eps_test
self.num_steps = 0
self.features = nn.Sequential(
nn.Linear(in_features, 32),
nn.ReLU(),
nn.Linear(32, 64),
nn.ReLU()
)
self.Q = nn.Linear(64, num_options) # Policy-Over-Options
self.terminations = nn.Linear(64, num_options) # Option-Termination
self.options_W = nn.Parameter(torch.zeros(num_options, 64, num_actions))
self.options_b = nn.Parameter(torch.zeros(num_options, num_actions))
self.to(device)
self.train(not testing)
def get_state(self, obs):
if obs.ndim < 4:
obs = obs.unsqueeze(0)
obs = obs.to(self.device)
state = self.features(obs)
return state
def get_Q(self, state):
return self.Q(state)
def predict_option_termination(self, state, current_option):
termination = self.terminations(state)[:, current_option].sigmoid()
option_termination = Bernoulli(termination).sample()
Q = self.get_Q(state)
next_option = Q.argmax(dim=-1)
return bool(option_termination.item()), next_option.item()
def get_terminations(self, state):
return self.terminations(state).sigmoid()
def get_action(self, state, option):
logits = state.data @ self.options_W[option] + self.options_b[option]
action_dist = (logits / self.temperature).softmax(dim=-1)
action_dist = Categorical(action_dist)
action = action_dist.sample()
logp = action_dist.log_prob(action)
entropy = action_dist.entropy()
return action.item(), logp, entropy
def greedy_option(self, state):
Q = self.get_Q(state)
return Q.argmax(dim=-1).item()
@property
def epsilon(self):
if not self.testing:
eps = self.eps_min + (self.eps_start - self.eps_min) * exp(-self.num_steps / self.eps_decay)
self.num_steps += 1
else:
eps = self.eps_test
return eps
def critic_loss(model, model_prime, data_batch, args):
obs, options, rewards, next_obs, dones = data_batch
batch_idx = torch.arange(len(options)).long()
options = torch.LongTensor(options).to(model.device)
rewards = torch.FloatTensor(rewards).to(model.device)
masks = 1 - torch.FloatTensor(dones).to(model.device)
# The loss is the TD loss of Q and the update target, so we need to calculate Q
states = model.get_state(to_tensor(obs)).squeeze(0)
Q = model.get_Q(states)
# the update target contains Q_next, but for stable learning we use prime network for this
next_states_prime = model_prime.get_state(to_tensor(next_obs)).squeeze(0)
next_Q_prime = model_prime.get_Q(next_states_prime) # detach?
# Additionally, we need the beta probabilities of the next state
next_states = model.get_state(to_tensor(next_obs)).squeeze(0)
next_termination_probs = model.get_terminations(next_states).detach()
next_options_term_prob = next_termination_probs[batch_idx, options]
# Now we can calculate the update target gt
gt = rewards + masks * args.gamma * \
((1 - next_options_term_prob) * next_Q_prime[batch_idx, options] + next_options_term_prob * next_Q_prime.max(dim=-1)[0])
# to update Q we want to use the actual network, not the prime
td_err = (Q[batch_idx, options] - gt.detach()).pow(2).mul(0.5).mean()
return td_err
def actor_loss(obs, option, logp, entropy, reward, done, next_obs, model, model_prime, args):
state = model.get_state(to_tensor(obs))
next_state = model.get_state(to_tensor(next_obs))
next_state_prime = model_prime.get_state(to_tensor(next_obs))
option_term_prob = model.get_terminations(state)[:, option]
next_option_term_prob = model.get_terminations(next_state)[:, option].detach()
Q = model.get_Q(state).detach().squeeze()
next_Q_prime = model_prime.get_Q(next_state_prime).detach().squeeze()
# Target update gt
gt = reward + (1 - done) * args.gamma * \
((1 - next_option_term_prob) * next_Q_prime[option] + next_option_term_prob * next_Q_prime.max(dim=-1)[0])
# The termination loss
termination_loss = option_term_prob * (Q[option].detach() - Q.max(dim=-1)[0].detach() + args.termination_reg) * (1 - done)
# actor-critic policy gradient with entropy regularization
policy_loss = -logp * (gt.detach() - Q[option]) - args.entropy_reg * entropy
actor_loss = termination_loss + policy_loss
return actor_loss
| 8,652 | 34.463115 | 129 | py |
option-critic-pytorch | option-critic-pytorch-master/utils.py | import gym
import numpy as np
import torch
from gym.wrappers import AtariPreprocessing, TransformReward
from gym.wrappers import FrameStack as FrameStack_
from fourrooms import Fourrooms
class LazyFrames(object):
def __init__(self, frames):
self._frames = frames
def __array__(self, dtype=None):
out = np.concatenate(self._frames, axis=0)
if dtype is not None:
out = out.astype(dtype)
return out
def __len__(self):
return len(self.__array__())
def __getitem__(self, i):
return self.__array__()[i]
class FrameStack(FrameStack_):
def __init__(self, env, k):
FrameStack_.__init__(self, env, k)
def _get_ob(self):
assert len(self.frames) == self.k
return LazyFrames(list(self.frames))
def make_env(env_name):
if env_name == 'fourrooms':
return Fourrooms(), False
env = gym.make(env_name)
is_atari = hasattr(gym.envs, 'atari') and isinstance(env.unwrapped, gym.envs.atari.atari_env.AtariEnv)
if is_atari:
env = AtariPreprocessing(env, grayscale_obs=True, scale_obs=True, terminal_on_life_loss=True)
env = TransformReward(env, lambda r: np.clip(r, -1, 1))
env = FrameStack(env, 4)
return env, is_atari
def to_tensor(obs):
obs = np.asarray(obs)
obs = torch.from_numpy(obs).float()
return obs
| 1,371 | 24.886792 | 106 | py |
option-critic-pytorch | option-critic-pytorch-master/logger.py | import logging
import os
import time
import numpy as np
from torch.utils.tensorboard import SummaryWriter
class Logger():
def __init__(self, logdir, run_name):
self.log_name = logdir + '/' + run_name
self.tf_writer = None
self.start_time = time.time()
self.n_eps = 0
if not os.path.exists(self.log_name):
os.makedirs(self.log_name)
self.writer = SummaryWriter(self.log_name)
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler(self.log_name + '/logger.log'),
],
datefmt='%Y/%m/%d %I:%M:%S %p'
)
def log_episode(self, steps, reward, option_lengths, ep_steps, epsilon):
self.n_eps += 1
logging.info(f"> ep {self.n_eps} done. total_steps={steps} | reward={reward} | episode_steps={ep_steps} "\
f"| hours={(time.time()-self.start_time) / 60 / 60:.3f} | epsilon={epsilon:.3f}")
self.writer.add_scalar(tag="episodic_rewards", scalar_value=reward, global_step=self.n_eps)
self.writer.add_scalar(tag='episode_lengths', scalar_value=ep_steps, global_step=self.n_eps)
# Keep track of options statistics
for option, lens in option_lengths.items():
# Need better statistics for this one, point average is terrible in this case
self.writer.add_scalar(tag=f"option_{option}_avg_length", scalar_value=np.mean(lens) if len(lens)>0 else 0, global_step=self.n_eps)
self.writer.add_scalar(tag=f"option_{option}_active", scalar_value=sum(lens)/ep_steps, global_step=self.n_eps)
def log_data(self, step, actor_loss, critic_loss, entropy, epsilon):
if actor_loss:
self.writer.add_scalar(tag="actor_loss", scalar_value=actor_loss.item(), global_step=step)
if critic_loss:
self.writer.add_scalar(tag="critic_loss", scalar_value=critic_loss.item(), global_step=step)
self.writer.add_scalar(tag="policy_entropy", scalar_value=entropy, global_step=step)
self.writer.add_scalar(tag="epsilon",scalar_value=epsilon, global_step=step)
if __name__=="__main__":
logger = Logger(logdir='runs/', run_name='test_model-test_env')
steps = 200 ; reward = 5 ; option_lengths = {opt: np.random.randint(0,5,size=(5)) for opt in range(5)} ; ep_steps = 50
logger.log_episode(steps, reward, option_lengths, ep_steps)
| 2,511 | 46.396226 | 143 | py |
fastracer-pmam-2022 | fastracer-pmam-2022-main/fastracer/tdebug-llvm/clang/docs/conf.py | # -*- coding: utf-8 -*-
#
# Clang documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 9 20:01:55 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang'
copyright = u'2007-2015, The Clang Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.7'
# The full version, including alpha/beta/rc tags.
release = '3.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'analyzer']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Clangdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Clang.tex', u'Clang Documentation',
u'The Clang Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = []
# Automatically derive the list of man pages from the contents of the command
# guide subdirectory. This was copied from llvm/docs/conf.py.
basedir = os.path.dirname(__file__)
man_page_authors = u'Maintained by the Clang / LLVM Team (<http://clang.llvm.org>)'
command_guide_subpath = 'CommandGuide'
command_guide_path = os.path.join(basedir, command_guide_subpath)
for name in os.listdir(command_guide_path):
# Ignore non-ReST files and the index page.
if not name.endswith('.rst') or name in ('index.rst',):
continue
# Otherwise, automatically extract the description.
file_subpath = os.path.join(command_guide_subpath, name)
with open(os.path.join(command_guide_path, name)) as f:
title = f.readline().rstrip('\n')
header = f.readline().rstrip('\n')
if len(header) != len(title):
print >>sys.stderr, (
"error: invalid header in %r (does not match title)" % (
file_subpath,))
if ' - ' not in title:
print >>sys.stderr, (
("error: invalid title in %r "
"(expected '<name> - <description>')") % (
file_subpath,))
# Split the name out of the title.
name,description = title.split(' - ', 1)
man_pages.append((file_subpath.replace('.rst',''), name,
description, man_page_authors, 1))
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Clang', u'Clang Documentation',
u'The Clang Team', 'Clang', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 9,088 | 32.29304 | 83 | py |
fastracer-pmam-2022 | fastracer-pmam-2022-main/fastracer/tdebug-llvm/clang/docs/analyzer/conf.py | # -*- coding: utf-8 -*-
#
# Clang Static Analyzer documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 2 15:54:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Clang Static Analyzer'
copyright = u'2013-2014, Analyzer Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '3.4'
# The full version, including alpha/beta/rc tags.
release = '3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'haiku'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'ClangStaticAnalyzerdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ClangStaticAnalyzer.tex', u'Clang Static Analyzer Documentation',
u'Analyzer Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'clangstaticanalyzer', u'Clang Static Analyzer Documentation',
[u'Analyzer Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'ClangStaticAnalyzer', u'Clang Static Analyzer Documentation',
u'Analyzer Team', 'ClangStaticAnalyzer', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 8,026 | 31.497976 | 80 | py |
awpy | awpy-main/docs/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
# -- Imports -----------------------------------------------------------------
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = "awpy"
copyright = "2022, Peter Xenopoulos"
author = "Peter Xenopoulos"
# The short X.Y version
version = ""
# The full version, including alpha/beta/rc tags
release = "1.2.3"
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.githubpages",
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx_rtd_theme",
"sphinx.ext.autosectionlabel",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "awpydoc"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "awpy.tex", "awpy Documentation", "Peter Xenopoulos", "manual"),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "awpy", "awpy Documentation", [author], 1)]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"awpy",
"awpy Documentation",
author,
"awpy",
"One line description of project.",
"Miscellaneous",
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ["search.html"]
# -- Extension configuration -------------------------------------------------
| 5,562 | 28.748663 | 81 | py |
parsing-as-pretraining | parsing-as-pretraining-master/run_token_classifier.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "tree2labels"))
import csv
import logging
import argparse
import random
import tempfile
import subprocess
from tqdm import tqdm, trange
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_pretrained_bert import BertForTokenClassification, BertModel
from sklearn.metrics import accuracy_score
from tree2labels.utils import sequence_to_parenthesis
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class MyBertForTokenClassification(BertForTokenClassification):
"""BERT model for token-level classification.
This module is composed of the BERT model with a linear layer on top of
the full hidden state of the last layer.
Params:
`config`: a BertConfig class instance with the configuration to build a new model.
`num_labels`: the number of classes for the classifier. Default = 2.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [0, ..., num_labels].
Outputs:
if `labels` is not `None`:
Outputs the CrossEntropy classification loss of the output with the labels.
if `labels` is `None`:
Outputs the classification logits of shape [batch_size, sequence_length, num_labels].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
num_labels = 2
model = BertForTokenClassification(config, num_labels)
logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config, num_labels, finetune, use_bilstms=False):
super(MyBertForTokenClassification, self).__init__(config)
self.num_labels = num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.use_bilstms = use_bilstms
if self.use_bilstms:
self.lstm = nn.LSTM(config.hidden_size, 400, num_layers=2, batch_first=True,
bidirectional=True)
self.classifier = nn.Linear(800, num_labels)
else:
self.classifier = nn.Linear(config.hidden_size, num_labels)
self.apply(self.init_bert_weights)
self.finetune = finetune
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
if not self.finetune:
sequence_output = sequence_output.detach()
if self.use_bilstms:
sequence_output, hidden = self.lstm(sequence_output, None)
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
return loss
else:
return logits
class InputSLExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a,
text_a_list,
text_a_postags, labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the sentence
label: (Optional) list. The labels for each token. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = None
self.text_a_list = text_a_list
self.text_a_postags = text_a_postags
self.labels = labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, labels_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.labels_ids = labels_ids
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding='utf-8') as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class SLProcessor(DataProcessor):
"""Processor for PTB formatted as sequence labeling seq_lu file"""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "dev")
def get_labels(self, data_dir):
"""See base class."""
train_samples = self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
dev_samples = self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
train_labels = [label for sample in train_samples
for label in sample.labels]
dev_labels = [label for sample in dev_samples
for label in sample.labels]
labels = []
labels.append("[MASK_LABEL]")
labels.append("-EOS-")
labels.append("-BOS-")
train_labels.extend(dev_labels)
for label in train_labels:
if label not in labels:
labels.append(label)
return labels
def _preprocess(self, word):
if word == "-LRB-":
word = "("
elif word == "-RRB-":
word = ")"
return word
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
sentences_texts = []
sentences_postags = []
sentences_labels = []
sentences_tokens = []
sentence, sentence_postags, sentence_labels = [],[], []
tokens = []
for l in lines:
if l != []:
if l[0] in ["-EOS-","-BOS-"]:
tokens.append(l[0])
sentence_postags.append(l[-2])
else:
tokens.append(l[0])
sentence.append(self._preprocess(l[0]))
sentence_labels.append(l[-1].strip())
sentence_postags.append(l[-2])
else:
sentences_texts.append(" ".join(sentence))
sentences_labels.append(sentence_labels)
sentences_postags.append(sentence_postags)
sentences_tokens.append(tokens)
sentence, sentence_postags, sentence_labels = [], [] ,[]
tokens = []
assert(len(sentences_labels), len(sentences_texts))
assert(len(sentence_postags), len(sentences_texts))
for guid, (sent, labels) in enumerate(zip(sentences_texts, sentences_labels)):
examples.append(
InputSLExample(guid=guid, text_a=sent,
text_a_list=sentences_tokens[guid],
text_a_postags=sentences_postags[guid],
labels=labels))
return examples
def _valid_wordpiece_indexes(sent, wp_sent):
valid_idxs = []
missing_chars = ""
idx = 0
for wp_idx, wp in enumerate(wp_sent,0):
if sent[idx].startswith(wp) and missing_chars == "":
valid_idxs.append(wp_idx)
if missing_chars == "":
missing_chars = sent[idx][len(wp.replace("##","")):]
else:
missing_chars = missing_chars[len(wp.replace("##","")):]
if missing_chars == "":
idx+=1
return valid_idxs
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, args):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {label : i for i, label in enumerate(label_list)}
label_map_reverse = {i:label for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
ori_tokens_a = example.text_a.split(" ") if not args.do_lower_case else example.text_a.lower().split(" ")
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
ori_tokens_a = ["[CLS]"] + ori_tokens_a + ["[SEP]"]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
valid_indexes = _valid_wordpiece_indexes(ori_tokens_a, tokens)
input_mask = [1 if idtoken in valid_indexes else 0
for idtoken, _ in enumerate(tokens)]
labels_ids = []
i=0
for idtoken, token in enumerate(tokens):
if idtoken in valid_indexes:
if token == "[CLS]":
labels_ids.append(label_map["-BOS-"])
elif token == "[SEP]":
labels_ids.append(label_map["-EOS-"])
else:
try:
labels_ids.append(label_map[example.labels[i]])
except KeyError:
labels_ids.append(0)
i+=1
else:
try:
labels_ids.append(label_map[example.labels[min(i, len(example.labels)-1)]])
except KeyError:
labels_ids.append(0)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
labels_ids += padding
# # The mask has 1 for real tokens and 0 for padding tokens. Only real
# # tokens are attended to.
# input_mask = [1] * len(input_ids)
# # Zero-pad up to the sequence length.
# padding = [0] * (max_seq_length - len(input_ids))
# input_ids += padding
# input_mask += padding
# segment_ids += padding
# labels_ids = [label_map[label] for label in example.labels]# label_map[example.labels]
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(labels_ids) == max_seq_length
# if ex_index < 5:
# logger.info("*** Example ***")
# logger.info("guid: %s" % (example.guid))
# logger.info("tokens: %s" % " ".join(
# [str(x) for x in tokens]))
# logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
# logger.info(
# "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
#logger.info("label: %s (id = %d)" % (example.labels, labels_ids))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
labels_ids=labels_ids))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels, mask):
output = out*mask
gold = labels*mask
mask = list()
o_flat = list(output.flatten())
g_flat = list(gold.flatten())
o_filtered, g_filtered = [], []
for o,g in zip(o_flat,g_flat):
if g !=0:
g_filtered.append(g)
o_filtered.append(o)
assert(len(o_filtered), len(g_filtered))
return accuracy_score(o_filtered, g_filtered)
def posprocess_labels(preds):
#This situation barely happens with LSTM's models
for i in range(1, len(preds)-2):
if preds[i] in ["-BOS-","-EOS-"] or preds[i].startswith("NONE"):
preds[i] = "1ROOT@S"
if len(preds) != 3 and not preds[-2].startswith("NONE"): preds[-2] = "NONE"
if preds[-1] != "-EOS-": preds[-1] = "-EOS-"
if len(preds)==3 and preds[1] == "ROOT":
preds[1] = "NONE"
return preds
def evaluate(model, device, logger, processor,data_dir, max_seq_length, tokenizer, label_list,
args,
eval_batch_size, output_dir,
#path_evaluation_script,
path_gold,
#path_x2labels,
test, parsing_paradigm, evaluation_params = False):
if test:
eval_examples = processor.get_test_examples(data_dir)
else:
eval_examples = processor.get_dev_examples(data_dir)
eval_features = convert_examples_to_features(
eval_examples, label_list, max_seq_length, tokenizer, args)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.labels_ids for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
# Run prediction for full data
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=eval_batch_size)
label_map_reverse = {i:l for i,l in enumerate(label_list)}
examples_texts = [example.text_a_list for example in eval_examples]
examples_postags = [example.text_a_postags for example in eval_examples]
examples_preds = []
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
masks = input_mask.cpu().numpy()
outputs = np.argmax(logits, axis=2)
for prediction, mask in zip(outputs, masks):
examples_preds.append([label_map_reverse[element] for element, m in zip(prediction, mask)
if m != 0])
for idx_out, (o, l) in enumerate(zip(outputs,label_ids)):
eval_accuracy += accuracy(o, l, masks[idx_out])
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
output_file_name = output_dir+".dev.outputs.txt.seq_lu" if not test else output_dir+".test.outputs.txt.seq_lu"
with open(output_file_name,"w") as temp_out:
#with tempfile.NamedTemporaryFile("w", delete=False) as temp_out:
content = []
for tokens, postags, preds in zip(examples_texts, examples_postags, examples_preds):
content.append("\n".join(["\t".join(element) for element in zip(tokens, postags, preds)]))
temp_out.write("\n\n".join(content))
temp_out.write("\n\n")
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy}
output_eval_file = os.path.join(output_dir.rsplit("/",1)[0], "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if parsing_paradigm.lower() == "dependencies":
command = [#"PYTHONPATH="+abspath(join(dirname(__file__), data.dep2labels)),
"python",
"./dep2labels/decode_output_file.py",
#path_x2labels+os.sep+"decode_output_file.py",
"--input",
temp_out.name,
"--output",
temp_out.name.replace(".seq_lu","")+".conllu"
]
p = subprocess.Popen(" ".join(command),stdout=subprocess.PIPE, shell=True)
out, err = p.communicate()
#options = "--verbose" if test else ""
options = "--verbose"
command = ["python",
"./dep2labels/conll17_ud_eval.py",
#path_evaluation_script,#
path_gold,
temp_out.name.replace(".seq_lu","")+".conllu",
#temp_out.name+".out"
options]
p = subprocess.Popen(" ".join(command),stdout=subprocess.PIPE, shell=True)
out, err = p.communicate()
score = [l for l in out.decode("utf-8").split("\n")
if l.startswith("LAS")]
out = out.decode("utf-8")
score = float(score[0].strip().split("|")[3])
# if test:
# print (out.decode("utf-8"))
elif parsing_paradigm.lower() == "constituency":
sentences = [[(line.split("\t")[0],line.split("\t")[1]) for line in sentence.split("\n")]
for sentence in content#.split("\n\n")
if sentence != ""]
preds = [posprocess_labels([line.split("\t")[-1] for line in sentence.split("\n")])
for sentence in content#.split("\n\n")
if sentence != ""]
parenthesized_trees = sequence_to_parenthesis(sentences,preds)#,None,None,None)
output_file_name = output_dir+".dev.outputs.txt" if not test else output_dir+".test.outputs.txt"
with open(output_file_name,"w") as f_out:
f_out.write("\n".join(parenthesized_trees))
#with open(output_dir+os.se,"w") as temp_evalb:
with tempfile.NamedTemporaryFile("w",delete=False) as temp_evalb:
command = [#"PYTHONPATH="+path_x2labels,
"python",
#path_x2labels+"/evaluate.py",
"./tree2labels/evaluate.py",
" --input ",
temp_out.name," --gold ",
path_gold,
" --evalb ", './tree2labels/EVALB/evalb']
#path_evaluation_script]
if evaluation_params:
#if path_evaluation_params is not None:
#command.extend(["--evalb_param", path_evaluation_params])#,">",temp_evalb.name]
command.extend(["--evalb_param", './tree2labels/EVALB/COLLINS.prm'])
p = subprocess.Popen(" ".join(command),stdout=subprocess.PIPE, shell=True)
out, err = p.communicate()
out = out.decode("utf-8")
score = float([l for l in out.split("\n")
if l.startswith("Bracketing FMeasure")][0].split("=")[1])
else:
raise NotImplementedError("Unknown parsing paradigm")
return eval_loss, eval_accuracy, score, out
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--model_dir",
default=None,
type=str,
required=True,
help="The output directory model will be written.")
parser.add_argument("--output_dir",
default=None,
type=str,
# required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test",
action="store_true",
help="Whether to run eval on the test set")
parser.add_argument("--do_lower_case",
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=8,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument("--not_finetune", dest="not_finetune", default=False, action="store_true",
help="Determine where to finetune BERT (flag to True) or just the output layer (flag set to False)")
parser.add_argument("--use_bilstms",
default=False,
action="store_true",
help="Further contextualized BERT outputs with BILSTMs")
parser.add_argument("--evalb_param",
default=None,
help="[True|False] to indicate whether use the COLLINS.prm parameter file")
parser.add_argument("--parsing_paradigm",
type=str,
help="[constituency|dependencies]")
parser.add_argument("--path_gold_parenthesized",
type=str,
help="Path to the constituency parenthesized files against which to compute the EVALB script")
parser.add_argument("--path_gold_conllu",
type=str,
help="Path to the gold file in conllu formart")
args = parser.parse_args()
processors = {"sl_tsv": SLProcessor}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval and not args.do_test:
raise ValueError("At least one of `do_train` or `do_eval` or `do_test` must be True.")
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
# raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
# os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
num_labels = len(processor.get_labels(args.data_dir)) #num_labels_task[task_name]
label_list = processor.get_labels(args.data_dir)
label_reverse_map = {i:label for i, label in enumerate(label_list)}
tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
train_examples = None
num_train_optimization_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
if args.local_rank != -1:
num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
model = MyBertForTokenClassification.from_pretrained(args.bert_model,
cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
num_labels=num_labels,
finetune=not args.not_finetune,
use_bilstms=args.use_bilstms)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
try:
from apex.parallel import DistributedDataParallel as DDP
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
model = DDP(model)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
if args.fp16:
try:
from apex.optimizers import FP16_Optimizer
from apex.optimizers import FusedAdam
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
optimizer = FusedAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
bias_correction=False,
max_grad_norm=1.0)
if args.loss_scale == 0:
optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
else:
optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
else:
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_optimization_steps)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer, args)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.labels_ids for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
model.train()
best_dev_evalb = 0
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
optimizer.backward(loss)
else:
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
# modify learning rate with special warm up BERT uses
# if args.fp16 is False, BertAdam is used that handles this automatically
lr_this_step = args.learning_rate * warmup_linear(global_step/num_train_optimization_steps, args.warmup_proportion)
for param_group in optimizer.param_groups:
param_group['lr'] = lr_this_step
optimizer.step()
optimizer.zero_grad()
global_step += 1
if args.parsing_paradigm.lower() == "constituency":
path_gold = args.path_gold_parenthesized
evaluation_params = True if args.evalb_param is not None and args.evalb_param.lower() == "true" else False
elif args.parsing_paradigm.lower() == "dependencies":
path_gold = args.path_gold_conllu
evaluation_params = False
else:
raise NotImplementedError("Unknown parsing paradigm")
dev_loss, dev_acc, dev_eval_score, _ = evaluate(model, device, logger, processor, args.data_dir,
args.max_seq_length, tokenizer, label_list,
args,
args.eval_batch_size, args.model_dir,
#path_evaluation,
path_gold,
#path_x2labels,
False,
parsing_paradigm=args.parsing_paradigm.lower(),
evaluation_params=evaluation_params)
print ("Current best on the dev set: ", best_dev_evalb)
if args.parsing_paradigm.lower() == "constituency":
print ("Using evaluation params file:", evaluation_params)
if best_dev_evalb < dev_eval_score:
print ("New best on the dev set: ", dev_eval_score)
best_dev_evalb = dev_eval_score
# Save a trained model
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(args.model_dir)
# output_model_file = os.path.join(args.model_dir, "pytorch_model.bin")
if args.do_train:
print ("Saving the best new model...")
torch.save(model_to_save.state_dict(), output_model_file)
model.train() #If not, following error: cudnn RNN backward can only be called in training mode
# Load a trained model that you have fine-tuned
output_model_file = os.path.join(args.model_dir)
model_state_dict = torch.load(output_model_file)
model = MyBertForTokenClassification.from_pretrained(args.bert_model,
state_dict=model_state_dict,
# cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
num_labels=num_labels,
finetune=not args.not_finetune,
use_bilstms=args.use_bilstms)
model.to(device)
if (args.do_eval or args.do_test) and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
if args.parsing_paradigm.lower() == "constituency":
evaluation_params = True if args.evalb_param is not None and args.evalb_param.lower() == "true" else False
path_gold = args.path_gold_parenthesized
elif args.parsing_paradigm.lower() == "dependencies":
path_gold = args.path_gold_conllu
evaluation_params=False
else:
raise NotImplementedError("Unknown parsing paradigm")
loss, acc, eval_score, detailed_score = evaluate(model, device, logger, processor, args.data_dir,
args.max_seq_length, tokenizer, label_list,
args,
args.eval_batch_size,
args.output_dir,
path_gold,
args.do_test,
parsing_paradigm=args.parsing_paradigm.lower(),
evaluation_params=evaluation_params)
print (detailed_score)
if __name__ == "__main__":
main()
| 44,196 | 42.629812 | 139 | py |
parsing-as-pretraining | parsing-as-pretraining-master/tree2labels/baselines.py | '''
It evaluates some traditional baselines using for regular PoS-tagging or chunking
It uses the implementations from the NLTK
TRAINING
PYTHONPATH=. python baselines/baselines.py \
--train /home/david.vilares/Escritorio/Papers/seq2constree/dataset/gold-tags-ptb-train.seqtrees \
--test /home/david.vilares/Escritorio/Papers/seq2constree/dataset/gold-tags-ptb-dev.seqtrees \
--out /home/david.vilares/Escritorio/Papers/seq2constree/baselines/gold-tags-ptb \
--status train
TEST
@author: david.vilares
'''
from argparse import ArgumentParser
from baseline_utils import *
from utils import sequence_to_parenthesis, flat_list, get_enriched_labels_for_retagger
from sklearn_crfsuite import CRF
from sklearn_crfsuite.metrics import flat_f1_score
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction import DictVectorizer
from keras.models import load_model
from keras.utils import np_utils
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Embedding, Input, Flatten
from keras.wrappers.scikit_learn import KerasClassifier
# Fit LabelEncoder with our list of classes
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
# Convert integers to dummy variables (one hot encoded)
import keras
import codecs
import functools
import os
import nltk
import pickle
import tempfile
import time
import os
import numpy as np
import sys
import tensorflow as tf
import random as rn
import uuid
#Uncomment/Comment these lines to determine when and which GPU(s) to use
#os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
#os.environ["CUDA_VISIBLE_DEVICES"] = "0"
reload(sys) # Reload does the trick!
sys.setdefaultencoding('UTF8')
STATUS_TEST = "test"
STATUS_TRAIN = "train"
SPLIT_SYMBOL = "~"
if __name__ == '__main__':
arg_parser = ArgumentParser()
arg_parser.add_argument("--train", dest="train", help="Path to the training file", default=None)
arg_parser.add_argument("--test", dest="test", help ="Path to the development/test file", default=None)
# arg_parser.add_argument("--dir", dest="dir", help="Path to the output directory where to store the models", default=None)
arg_parser.add_argument("--model", dest="model", help="Path to the model")
# arg_parser.add_argument("--name", dest="name", help="Path to the name of the file")
arg_parser.add_argument("--baseline", dest="baseline", help="Path to the baseline directory. Options: [emlp|mlp|crf]", default=None)
arg_parser.add_argument("--gold", dest="gold", help="Path to the gold file", default=None)
arg_parser.add_argument("--status", dest="status", help="")
arg_parser.add_argument("--prev_context",dest="prev_context",type=int, default=1)
arg_parser.add_argument("--next_context", dest="next_context",type=int,default=1)
arg_parser.add_argument("--retagger", dest="retagger", default=False, action="store_true")
arg_parser.add_argument("--unary", dest="unary",default=False, action="store_true")
arg_parser.add_argument("--output_unary", dest="output_unary", help="Use together with unary to store the output in the desired file")
arg_parser.add_argument("--output_decode", dest="output_decode", help="Path to store the predicted trees", default="/tmp/trees.txt")
arg_parser.add_argument("--evalb",dest="evalb",help="Path to the script EVALB")
arg_parser.add_argument("--gpu",dest="gpu",default="False")
args = arg_parser.parse_args()
if args.status.lower() == STATUS_TEST:
if args.gpu.lower() == "true":
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
else:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ""
#TODO: Change for a temporaryfile, but getting problems with the Chinese encoding
tmpfile = codecs.open(args.output_decode,"w")
with codecs.open(args.test, encoding="utf-8") as f_dev:
content = f_dev.read()
gold_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in content.split("\n\n")
if sentence != ""]
sentences =[[(word,postag) for word, postag, label in sample] for sample in gold_samples]
#######################################################################
# EVALUATING A PERCEPTRON WITH EMBEDDINGS
#######################################################################
if args.baseline.lower() == "emlp":
batch= 128
new_sentences = sentences
unary_preds = None
init_time = None
with codecs.open(args.model+".emlp.labels") as f:
label_encoder = pickle.load(f)
with codecs.open(args.model+".emlp.features") as f:
vocab,postags,all_labels, hidden_size, prev_context, next_context = pickle.load(f)
emlp_parser = EmbeddedPerceptronTagger(hidden_size, vocab, postags, len(all_labels))
emlp_parser.model = load_model(args.model+".emlp.hdf5")
#Loading and running the retagger, if needed
if args.retagger:
with codecs.open(args.model+"-unary.emlp.labels") as f:
unary_label_encoder = pickle.load(f)
with codecs.open(args.model+"-unary.emlp.features") as f:
re_vocab,re_postags,re_all_labels, re_hidden_size, re_prev_context, re_next_context = pickle.load(f)
emlp_retagger = EmbeddedPerceptronTagger(re_hidden_size, re_vocab, re_postags, len(re_all_labels))
emlp_retagger.model = load_model(args.model+"-unary.emlp.hdf5")
#The time starts here, applying the retagging, if needed
init_time = time.time()
X_test_unary,X_tags_test_unary = emlp_retagger.transform_test(sentences, re_prev_context, re_next_context)
X_test_unary = np.array(X_test_unary)
X_tags_test_unary = np.array(X_tags_test_unary)
unary_preds = emlp_retagger.model.predict_generator(emlp_retagger.samples_test(X_test_unary,X_tags_test_unary,batch),
steps= (X_test_unary.shape[0]/batch)+1)
unary_preds = list(unary_label_encoder.inverse_transform ( unary_preds.argmax(axis=-1) ))
new_sentences, unary_preds = get_samples_retagged(sentences, unary_preds)
#If we are not applying the retagging strategy, we start here to measure the time
if init_time is None:
init_time = time.time()
X_test, X_tags_test = emlp_parser.transform_test(new_sentences, prev_context, next_context)
X_test = np.array(X_test)
X_tags_test = np.array(X_tags_test)
preds = emlp_parser.model.predict_generator(emlp_parser.samples_test(X_test,X_tags_test,batch),
steps= (X_test.shape[0]/batch)+1)
preds = process_labels(new_sentences, preds, label_encoder, args.unary)
preds, unary_preds = format_output(new_sentences, preds, unary_preds, args.retagger)
#######################################################################
# EVALUATING A ONE-HOT VECTOR PERCEPTRON
#######################################################################
elif args.baseline.lower() == "mlp":
new_sentences = sentences
unary_preds = None
batch= 128
init_time = None
# loading_parsing_time = time.time()
with codecs.open(args.model+".mlp.features") as f:
dict_vectorizer, hidden_size, prev_context, next_context = pickle.load(f)
with codecs.open(args.model+".mlp.labels") as f:
label_encoder = pickle.load(f)
mlp_parser = PerceptronTagger.builder()
mlp_parser.model = load_model(args.model+".mlp.hdf5")
# end_loading_parsing_time = time.time() - loading_parsing_time
#Running the retagger, if needed
if args.retagger:
with codecs.open(args.model+"-unary.mlp.features") as f:
dict_unary_vectorizer,re_hidden_size, re_prev_context, re_next_context = pickle.load(f)
with codecs.open(args.model+"-unary.mlp.labels") as f:
unary_label_encoder = pickle.load(f)
mlp_retagger = PerceptronTagger.builder()
mlp_retagger.model = load_model(args.model+"-unary.mlp.hdf5")
init_time = time.time()
X_test_unary = mlp_retagger.transform_test(sentences, re_prev_context, re_next_context)
unary_preds = mlp_retagger.model.predict_generator(mlp_retagger.samples_test(X_test_unary,batch,
dict_unary_vectorizer),
steps= (len(X_test_unary)/batch)+1)
unary_preds = list(unary_label_encoder.inverse_transform ( unary_preds.argmax(axis=-1) ))
new_sentences, unary_preds = get_samples_retagged(sentences, unary_preds)
#If we are not applying the retagging strategy, we start here to measure the time
if init_time is None:
init_time = time.time()
X_test = mlp_parser.transform_test(new_sentences, prev_context,next_context)
preds = mlp_parser.model.predict_generator(mlp_parser.samples_test(X_test,batch, dict_vectorizer),
steps= (len(X_test)/batch)+1)
preds = process_labels(sentences, preds, label_encoder, args.unary)
preds, unary_preds = format_output(new_sentences, preds, unary_preds, args.retagger)
#######################################################################
# EVALUATING A CONDITIONAL RANDOM FIELDS
#######################################################################
elif args.baseline.lower() == "crf":
new_sentences = sentences
unary_preds = None
init_time = None
with codecs.open(args.model+".crf.pickle","rb") as f:
crf_parser, prev_context, next_context = pickle.load(f)
#Running the retagger
if args.retagger:
with codecs.open(args.model+"-unary.crf.pickle","rb") as f:
crf_retagger, re_prev_context, re_next_context= pickle.load(f)
init_time = time.time()
X_test = [sent2features_test(s,re_prev_context, re_next_context) for s in new_sentences]
unary_preds = crf_retagger.predict([x for x in X_test])
unary_preds_aux =[]
for unary_pred in unary_preds:
for element in unary_pred:
unary_preds_aux.append(element)
unary_preds = unary_preds_aux
new_sentences, unary_preds = get_samples_retagged(new_sentences, unary_preds)
if init_time is None:
init_time = time.time()
X_test = [sent2features_test(s,prev_context, next_context) for s in new_sentences]
preds = crf_parser.predict(X_test)
preds_aux =[]
for pred in preds:
for element in pred:
preds_aux.append(element)
preds = preds_aux
preds, unary_preds = format_output(new_sentences, preds, unary_preds, args.retagger)
#Postprocessing the labels for the CRF
for j,pred in enumerate(preds):
for k,p in enumerate(pred):
if (p in ["-EOS-","-BOS-"] or p.startswith("NONE")) and k != 0 and k < len(pred)-1:
pred[k] = "ROOT_S"
else:
raise NotImplementedError
#########################################################################
# DECODING AND POSPROCESS
#########################################################################
if args.unary:
if not os.path.exists(args.output_unary):
with codecs.open(args.output_unary,"w") as f:
for j,sentence in enumerate(sentences):
for (word,postag), retag in zip(sentence,preds[j]):
f.write("\t".join([word,postag,retag])+"\n")
f.write("\n")
else:
raise ValueError("File already exist:", args.output_unary)
exit()
parenthesized_trees = sequence_to_parenthesis(new_sentences,preds)
final_time = time.time()
tmpfile.write("\n".join(parenthesized_trees)+"\n")
os.system(" ".join([args.evalb,args.gold, tmpfile.name]))
gold_labels = [e[2] for e in flat_list(gold_samples)]
if args.retagger:
enriched_preds = get_enriched_labels_for_retagger(preds, unary_preds)
flat_preds = flat_list(enriched_preds)
else:
flat_preds = flat_list(preds)
print "Accuracy",round(accuracy_score(gold_labels, flat_preds),4)
total_time = final_time - init_time
print "Total time:", round(total_time,4)
print "Sents/s",round(len(gold_samples) / (total_time),2)
#########################################################
#
# TRAINING PHASE #
#
#########################################################
elif args.status.lower() == STATUS_TRAIN:
# For reproducibility, if wanted
os.environ['PYTHONHASHSEED'] = '17'
np.random.seed(17)
rn.seed(17)
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
tf.set_random_seed(17)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
###################################################################
# TRAINING AN EMBEDDED PERCEPTRON
###################################################################
if args.baseline.lower() == "emlp":
hidden_size = 100
batch = 8
context_len = 1+args.prev_context+args.next_context
with codecs.open(args.test, encoding="utf-8") as f_dev:
dev_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in f_dev.read().split("\n\n")
if sentence != ""]
with codecs.open(args.train, encoding="utf-8") as f_train:
train_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in f_train.read().split("\n\n")
if sentence != ""]
vocab = set([])
postags = set([])
labels = set([])
for g in train_samples:
for word,postag,label in g:
vocab.add(word)
postags.add(postag)
labels.add(label)
all_labels = labels
for g in dev_samples:
for _,_,label in g:
all_labels.add(label)
emlp_tagger = EmbeddedPerceptronTagger(hidden_size, vocab, postags, len(all_labels), context_len = context_len)
X_train, X_tags_train, y_train = emlp_tagger.transform(train_samples, args.prev_context, args.next_context)
X_dev, X_tags_dev, y_dev = emlp_tagger.transform(dev_samples, args.prev_context, args.next_context)
label_encoder = LabelEncoder()
label_encoder.fit(y_train + y_dev)
y_train = label_encoder.transform(y_train)
y_dev = label_encoder.transform(y_dev)
X_train = np.array(X_train)
X_tags_train = np.array(X_tags_train)
X_dev = np.array(X_dev)
X_tags_dev = np.array(X_tags_dev)
with codecs.open(args.model+".emlp.features","wb") as f:
pickle.dump((vocab,postags, all_labels, hidden_size, args.prev_context, args.next_context),f)
with codecs.open(args.model+".emlp.labels","wb") as f:
pickle.dump(label_encoder,f)
checkpoint = keras.callbacks.ModelCheckpoint(args.model+".emlp.hdf5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='auto')
emlp_tagger.model.fit_generator(emlp_tagger.samples(X_train,X_tags_train,y_train, batch, label_encoder),
validation_data=emlp_tagger.samples(X_dev,X_tags_dev,y_dev,batch, label_encoder),
steps_per_epoch=(X_train.shape[0]/batch)+1, epochs=30, verbose=1,
validation_steps=(X_dev.shape[0]/batch)+1,
callbacks=[checkpoint,early_stopping])
print emlp_tagger.model.evaluate_generator(emlp_tagger.samples(X_dev,X_tags_dev,y_dev,batch, label_encoder),
steps= (X_dev.shape[0]/batch)+1)
###################################################################
# TRAINING A DISCRETE MLP
###################################################################
elif args.baseline.lower() == "mlp":
hidden_size = 100
batch = 8
with codecs.open(args.test, encoding="utf-8") as f_dev:
dev_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in f_dev.read().split("\n\n")
if sentence != ""]
with codecs.open(args.train, encoding="utf-8") as f_train:
train_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in f_train.read().split("\n\n")
if sentence != ""]
print "Len dev samples", len(dev_samples)
print "Len train amples", len(train_samples)
X_train, y_train = PerceptronTagger.builder().transform(train_samples, args.prev_context, args.next_context)
X_dev, y_dev = PerceptronTagger.builder().transform(dev_samples, args.prev_context, args.next_context)
# Fit our DictVectorizer with our set of features
dict_vectorizer = DictVectorizer(sparse=True)
dict_vectorizer.fit(X_train + X_dev)
X_train = dict_vectorizer.transform(X_train)
X_dev = dict_vectorizer.transform(X_dev)
label_encoder = LabelEncoder()
label_encoder.fit(y_train + y_dev)
y_train = label_encoder.transform(y_train)
y_dev = label_encoder.transform(y_dev)
y_train = np_utils.to_categorical(y_train,num_classes=len(label_encoder.classes_))
y_dev = np_utils.to_categorical(y_dev,num_classes=len(label_encoder.classes_))
with codecs.open(args.model+".mlp.features","wb") as f:
pickle.dump((dict_vectorizer, hidden_size, args.prev_context, args.next_context),f)
with codecs.open(args.model+".mlp.labels","wb") as f:
pickle.dump(label_encoder,f)
checkpoint = keras.callbacks.ModelCheckpoint(args.model+".mlp.hdf5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='auto')
mlp_tagger = PerceptronTagger(X_train.shape[1],hidden_size, y_train.shape[1])
mlp_tagger.model.fit_generator(mlp_tagger.samples(X_train,y_train, batch),
validation_data=mlp_tagger.samples(X_dev,y_dev,batch),
steps_per_epoch=(X_train.shape[0]/batch)+1, epochs=30, verbose=1,
validation_steps=(X_dev.shape[0]/batch)+1,
callbacks=[checkpoint,early_stopping])
print mlp_tagger.model.evaluate_generator(mlp_tagger.samples(X_dev,y_dev,batch), steps= (X_dev.shape[0]/batch)+1)
###################################################################
# TRAINING A CONDITIONAL RANDOM FIELDS MODEL
###################################################################
elif args.baseline.lower() == "crf":
crf = CRF(
algorithm='lbfgs',
c1=0.1,
c2=0.1,
max_iterations=20,
all_possible_transitions=False,
model_filename=args.model+".crf",
)
with codecs.open(args.test, encoding="utf-8") as f_dev:
dev_samples = [[l.split("\t") for l in sentence.split("\n")] for sentence in f_dev.read().split("\n\n")
if sentence != ""]
with codecs.open(args.train, encoding="utf-8") as f_train:
train_samples = [[l.split("\t") for l in sentence.split("\n")] for sentence in f_train.read().split("\n\n")
if sentence != ""]
X_train = [sent2features(s,args.prev_context, args.next_context) for s in train_samples]
y_train = [sent2labels(s) for s in train_samples]
X_dev = [sent2features(s,args.prev_context, args.next_context) for s in dev_samples]
y_dev = [sent2labels(s) for s in dev_samples]
crf.fit(X_train, y_train)
y_pred = crf.predict(X_dev)
print "F-score",flat_f1_score(y_dev, y_pred, average='weighted')
print "Accuracy:", crf.score(X_dev, y_dev)
with codecs.open(args.model+".crf.pickle","wb") as f:
pickle.dump((crf, args.prev_context, args.next_context), f)
else:
raise NotImplementedError
else:
raise NotImplementedError
| 23,149 | 48.360341 | 138 | py |
parsing-as-pretraining | parsing-as-pretraining-master/tree2labels/baseline_utils.py | from keras.utils import np_utils
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Activation, Embedding, Input, Flatten
from keras.wrappers.scikit_learn import KerasClassifier
# Fit LabelEncoder with our list of classes
from sklearn.preprocessing import LabelEncoder
import keras
import numpy as np
"""
Class that implements a simple feed-forward network with one hidden layer that is used
for sequential labeling using a window both of previous and upcoming context.
It is fed with word and postag embeddings
"""
class EmbeddedPerceptronTagger(object):
EMPTY = "-EMPTY-"
def __init__(self, hidden_neurons, vocab, postags, n_labels, context_len=3):
self.vocab = vocab.add("-UNKNOWN-")
self.postags = postags.add("-UNKNOWN-")
self.iforms = {self.EMPTY:0}
self.iforms.update({w:i for i,w in enumerate(sorted(vocab),1)})
self.ipostags = {self.EMPTY:0}
self.ipostags.update({p:i for i,p in enumerate(sorted(postags),1)})
self.iforms_reverse = {self.iforms[w]:w for w in self.iforms}
input = Input(shape=(context_len,), dtype='float32')
input_tags = Input(shape=(context_len,), dtype='float32')
embedding_layer = Embedding(len(self.iforms),
100,
embeddings_initializer="glorot_uniform",
input_length=context_len,
name = "e_IW",
trainable=True)(input)
pos_embedding_layer = Embedding(len(self.ipostags),
20,
embeddings_initializer="glorot_uniform",
input_length=context_len,
name = "e_IP",
trainable=True)(input_tags)
x = keras.layers.concatenate([embedding_layer, pos_embedding_layer], axis=-1)
dr = 0.5
for l in range(0, 1):
x = Dense(hidden_neurons)(x)
x = Dropout(0.5)(x)
x = Flatten()(x)
x = Activation('relu')(x)
preds = Dense(n_labels, activation='softmax')(x)
self.model = Model(inputs = [input,input_tags], outputs=[preds])
self.model.compile(loss='categorical_crossentropy',
optimizer='sgd',#keras.optimizers.SGD(lr=0.01, momentum=0.9, decay=2e-6, nesterov=False),#'sgd',
metrics=['accuracy'])
def add_basic_features(self, sent,i , prev_words, next_words):
words = [self.iforms[sent[i][0]] if sent[i][0] in self.iforms else self.iforms["-UNKNOWN-"]]
postags = [self.ipostags[sent[i][1]] if sent[i][1] in self.ipostags else self.iforms["-UNKNOWN-"]]
for j in range(1,prev_words+1):
iaux = i-j
if i > 0:
word1 = self.iforms[sent[iaux][0]] if sent[iaux][0] in self.iforms else self.iforms["-UNKNOWN-"]
postag1 = self.ipostags[sent[iaux][1]] if sent[iaux][1] in self.ipostags else self.ipostags["-UNKNOWN-"]
words.append(word1)
postags.append(postag1)
else:
words.append(self.iforms["-EMPTY-"])
postags.append(self.ipostags["-EMPTY-"])
for j in range(1,next_words+1):
iaux = i+j
if i < len(sent)-j:
word1 = self.iforms[sent[iaux][0]] if sent[iaux][0] in self.iforms else self.iforms["-UNKNOWN-"]
postag1 = self.ipostags[sent[iaux][1]] if sent[iaux][1] in self.ipostags else self.ipostags["-UNKNOWN-"]
words.append(word1)
postags.append(postag1)
else:
words.append(self.iforms["-EMPTY-"])
postags.append(self.ipostags["-EMPTY-"])
return words,postags
def transform(self,sentences, previous, next):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X, X_tags, y = [], [], []
for sentence in sentences:
# print sentence
for index, (word, postag, label) in enumerate(sentence):
# Add basic NLP features for each token in the snippet
aux = self.add_basic_features(sentence, index,previous,next)
X.append(np.array(aux[0]))
X_tags.append(np.array(aux[1]))
y.append(label)
return X, X_tags, y
def transform_test(self,sentences, previous, next):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X, X_tags, y = [], [], []
for sentence in sentences:
for index, (word, postag) in enumerate(sentence):
# Add basic NLP features for each sentence term
aux = self.add_basic_features(sentence, index,previous,next)
X.append(np.array(aux[0]))
X_tags.append(np.array(aux[1]))
return X, X_tags
def samples(self,x_source, x_tags_source, y_source, size, label_encoder):
while True:
for i in range(0, x_source.shape[0], size):
j = i + size
if j > x_source.shape[0]:
j = x_source.shape[0]
yield [x_source[i:j], x_tags_source[i:j]], np_utils.to_categorical(y_source[i:j], num_classes=len(label_encoder.classes_))
def samples_test(self,x_source, x_tags_source, size):
while True:
for i in range(0, x_source.shape[0], size):
j = i + size
if j > x_source.shape[0]:
j = x_source.shape[0]
yield [x_source[i:j], x_tags_source[i:j]]
"""
Class that implements a simple feed-forward network with one hidden layer that is used
for sequential labeling using a window both of previous and upcoming context.
It is fed with word and postag embeddings
"""
class PerceptronTagger(object):
"""
Based on the tutorial https://techblog.cdiscount.com/part-speech-tagging-tutorial-keras-deep-learning-library/
"""
def __init__(self,input_dim, hidden_neurons, output_dim):
"""
Construct, compile and return a Keras model which will be used to fit/predict
"""
self.model = Sequential([
Dense(hidden_neurons, input_dim=input_dim),
Activation('relu'),
Dropout(0.5),
Dense(output_dim, activation='softmax')
])
self.model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
#return model
@classmethod
def builder(cls):
return PerceptronTagger(1,1,1)
def add_basic_features(self, sent, i, prev_words, next_words):
word = sent[i][0]
postag = sent[i][1]
features = {
'is_first': i == 0,
'is_last': i == len(sent) - 1,
'word.lower=': word.lower(),
'word[-3:]=': word[-3:],
'word[-2:]=': word[-2:],
'word.isupper=': word.isupper(),
'word.istitle=': word.istitle(),
'word.isdigit=': word.isdigit(),
'postag=': postag,
'postag[:2]=': postag[:2]
}
for j in range(1,prev_words+1):
iaux = i-j
if i > 0:
word1 = sent[iaux][0]
postag1 = sent[iaux][1]
features.update({
'-'+str(j)+':word.lower=': word1.lower(),
'-'+str(j)+':word.istitle=': word1.istitle(),
'-'+str(j)+':word.isupper=': word1.isupper(),
'-'+str(j)+':word.isdigit=': word1.isdigit(),
'-'+str(j)+':postag=': postag1,
'-'+str(j)+':postag[:2]=': postag1[:2]
})
for j in range(1,next_words+1):
iaux = i+j
if i < len(sent)-j:
word1 = sent[iaux][0]
postag1 = sent[iaux][1]
features.update({
'+'+str(j)+':word.lower=': word1.lower(),
'+'+str(j)+':word.istitle=': word1.istitle(),
'+'+str(j)+':word.isupper=': word1.isupper(),
'+'+str(j)+':word.isdigit=': word1.isdigit(),
'+'+str(j)+':postag=': postag1,
'+'+str(j)+':postag[:2]=': postag1[:2]})
return features
def samples(self,x_source, y_source, size):
while True:
for i in range(0, x_source.shape[0], size):
j = i + size
if j > x_source.shape[0]:
j = x_source.shape[0]
yield x_source[i:j].toarray(), y_source[i:j]#.toarray()
def samples_test(self,x_source, size, dict_vectorizer):
while True:
for i in range(0, len(x_source), size):
#for i in range(0, x_source.shape[0], size):
j = i + size
if j > len(x_source):#x_source.shape[0]:
j = len(x_source)#x_source.shape[0]
yield dict_vectorizer.transform(x_source[i:j]).toarray()# x_source[i:j].toarray()#.toarray()
def transform(self,sentences, previous, next):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X, y = [],[]
for sentence in sentences:
for index, (word, postag, label) in enumerate(sentence):
# Add basic NLP features for each sentence term
aux = self.add_basic_features(sentence, index,previous,next)
X.append(aux)
y.append(label)
return X, y
def transform_test(self,sentences,previous, next):
"""
Split tagged sentences to X and y datasets and append some basic features.
:param tagged_sentences: a list of POS tagged sentences
:param tagged_sentences: list of list of tuples (term_i, tag_i)
:return:
"""
X, y = [],[]
for sentence in sentences:
for index, (word, postag) in enumerate(sentence):
# Add basic NLP features for each sentence term
aux = self.add_basic_features(sentence, index,previous,next)
X.append(aux)
return X
def word2features(sent, i, prev_words, next_words):
word = sent[i][0]
postag = sent[i][1]
features = [
'bias',
'is_first=%s'% str(i == 0),
'is_last=%s' % str(i == len(sent) - 1),
'word.lower=' + word.lower(),
'word[-3:]=' + word[-3:],
'word[-2:]=' + word[-2:],
'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(),
'word.isdigit=%s' % word.isdigit(),
'postag=' + postag,
'postag[:2]=' + postag[:2],
]
for j in range(1,prev_words+1):
iaux = i-j
if i > 0:
word1 = sent[iaux][0]
postag1 = sent[iaux][1]
features.extend([
'-'+str(j)+':word.lower=' + word1.lower(),
'-'+str(j)+':word.istitle=%s' % word1.istitle(),
'-'+str(j)+':word.isupper=%s' % word1.isupper(),
'-'+str(j)+':word.isdigit=%s' % word1.isdigit(),
'-'+str(j)+':postag=' + postag1,
'-'+str(j)+':postag[:2]=' + postag1[:2],
])
else:
features.append('-'+str(j)+':BOS')
for j in range(1,next_words+1):
iaux = i+j
if i < len(sent)-j:
word1 = sent[iaux][0]
postag1 = sent[iaux][1]
features.extend([
'+'+str(j)+':word.lower=' + word1.lower(),
'+'+str(j)+':word.istitle=%s' % word1.istitle(),
'+'+str(j)+':word.isupper=%s' % word1.isupper(),
'+'+str(j)+':word.isdigit=%s' % word1.isdigit(),
'+'+str(j)+':postag=' + postag1,
'+'+str(j)+':postag[:2]=' + postag1[:2],
])
else:
features.append('+'+str(j)+':EOS')
return features
def sent2features(sent, ngram_prev, ngram_next):
return [word2features(sent, i, ngram_prev, ngram_next) for i in range(len(sent))]
def sent2features_test(sent, ngram_prev, ngram_next):
return [word2features(sent, i, ngram_prev, ngram_next) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, postag, label in sent]
def backoff_tagger(train_sents, tagger_classes, backoff=None):
for cls in tagger_classes:
backoff = cls(train_sents, backoff=backoff)
return backoff
"""
Prepares the sentences, previously processed by an leaf unary chain tagger, for
the sequence labeling parser
@param sentences: A list of list of tuples (word,postag) for each sentence
@param unary_preds: A list of unary predictions
"""
def get_samples_retagged(sentences, unary_preds):
unary_preds_aux = []
ipos = 0
new_sentences = []
for sentence in sentences:
new_sentence = []
for word,postag in sentence:
if unary_preds[ipos] == "-EMPTY-" or word in ["-BOS-","-EOS-"]:
# if word not in ["-BOS-","-EOS-"]:
# unary_preds_aux.append(postag)
unary_preds_aux.append(postag)
new_sentence.append((word,postag))
# f_aux.write("\t".join((word,postag,label))+"\n")
else:
unary_preds_aux.append(unary_preds[ipos]+"+"+postag)
new_sentence.append((word,unary_preds[ipos]+"+"+postag))
# f_aux.write("\t".join((word,unary_preds[ipos]+"+"+postag,label))+"\n")
ipos+=1
new_sentences.append(new_sentence)
unary_preds = unary_preds_aux
return new_sentences, unary_preds
"""
It changes to a predefined valid label missclassifications into the -BOS-, -EOS-
and NONE_X classes that occur in the middle on the sentence (and therefore they are not valid).
This happens marginally, but anyway we need to deal with it.
@returns A list of valid predictions
"""
def process_labels(sentences, preds, label_encoder, unaries):
if not unaries:
dummy_eos_labels = label_encoder.transform(["-EOS-"])
dummy_bos_labels = label_encoder.transform(["-BOS-"])
dummy_none_labels = set(label_encoder.transform([e for e in list(label_encoder.classes_)
if e.startswith("NONE")]))
#TODO: Workaround. This can be avoided if for sentences of length one we generate NONE and not ROOT,
#which should make perfectly sense
root_retagger = label_encoder.transform(["ROOT"]) if "ROOT" in label_encoder.classes_ else -1
none_label = label_encoder.transform(["NONE"])
try:
root_label = label_encoder.transform(["ROOT_S"])[0]
except ValueError:
root_label = label_encoder.transform(["ROOT_IP"])[0]
valid_none_indexes = set([])
valid_eos_indexes = set([])
valid_bos_indexes = set([])
i = 0
for s in sentences:
valid_bos_indexes.add(i)
valid_eos_indexes.add(i+len(s)-1)
valid_none_indexes.add(i+len(s)-2)
i+=len(s)
preds = preds.argmax(axis=-1)
for j,pred in enumerate(preds):
if pred in dummy_eos_labels and j not in valid_eos_indexes:
preds[j] = root_label
elif pred in dummy_bos_labels and j not in valid_bos_indexes:
preds[j] = root_label
elif pred in dummy_none_labels and j not in valid_none_indexes:
preds[j] = root_label
#TODO: This is currently needed as a workaround for the retagging strategy and sentences of length one
elif preds[j] == root_retagger:
preds[j] = none_label
else:
preds = preds.argmax(axis=-1)
preds = list(label_encoder.inverse_transform(preds))
return preds
def format_output(sentences,preds,unary_preds,retagger):
if retagger:
i=0
j=0
pred_aux = []
pred_unary_aux = []
for k,s in enumerate(sentences):
pred_aux.append( preds[i:i+len(s)] )
pred_unary_aux.append(unary_preds[j:(j+len(s))])
i+=len(s)
j+=len(s)
preds = pred_aux
unary_preds = pred_unary_aux
else:
i=0
pred_aux = []
for k,s in enumerate(sentences):
pred_aux.append( preds[i:i+len(s)] )
i+=len(s)
preds = pred_aux
unary_preds = None
return preds, unary_preds
| 17,773 | 35.052738 | 140 | py |
parsing-as-pretraining | parsing-as-pretraining-master/NCRFpp/main.py | # -*- coding: utf-8 -*-
# @Author: Jie
# @Date: 2017-06-15 14:11:08
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2019-01-14 16:09:16
from __future__ import print_function
import time
import sys
import argparse
import random
import torch
import gc
import torch.nn as nn
import torch.optim as optim
import numpy as np
import os
import tempfile
import subprocess
from subprocess import Popen, PIPE
from utils.metric import get_ner_fmeasure
from model.seqlabel import SeqLabel
from model.sentclassifier import SentClassifier
from utils.data import Data
from os.path import abspath, dirname, join
try:
import cPickle as pickle
except ImportError:
import pickle
seed_num = 17
random.seed(seed_num)
torch.manual_seed(seed_num)
np.random.seed(seed_num)
def data_initialization(data):
data.initial_feature_alphabets()
data.build_alphabet(data.train_dir)
data.build_alphabet(data.dev_dir)
data.build_alphabet(data.test_dir)
data.fix_alphabet()
def predict_check(pred_variable, gold_variable, mask_variable, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result, in numpy format
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred = pred_variable.cpu().data.numpy()
gold = gold_variable.cpu().data.numpy()
mask = mask_variable.cpu().data.numpy()
overlaped = (pred == gold)
if sentence_classification:
right_token = np.sum(overlaped)
total_token = overlaped.shape[0] ## =batch_size
else:
right_token = np.sum(overlaped * mask)
total_token = mask.sum()
return right_token, total_token
def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover, sentence_classification=False):
"""
input:
pred_variable (batch_size, sent_len): pred tag result
gold_variable (batch_size, sent_len): gold result variable
mask_variable (batch_size, sent_len): mask variable
"""
pred_variable = pred_variable[word_recover]
gold_variable = gold_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = gold_variable.size(0)
if sentence_classification:
pred_tag = pred_variable.cpu().data.numpy().tolist()
gold_tag = gold_variable.cpu().data.numpy().tolist()
pred_label = [label_alphabet.get_instance(pred) for pred in pred_tag]
gold_label = [label_alphabet.get_instance(gold) for gold in gold_tag]
else:
seq_len = gold_variable.size(1)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
gold_tag = gold_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
gold_label = []
for idx in range(batch_size):
pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0]
assert(len(pred)==len(gold))
pred_label.append(pred)
gold_label.append(gold)
return pred_label, gold_label
def recover_nbest_label(pred_variable, mask_variable, label_alphabet, word_recover):
"""
input:
pred_variable (batch_size, sent_len, nbest): pred tag result
mask_variable (batch_size, sent_len): mask variable
word_recover (batch_size)
output:
nbest_pred_label list: [batch_size, nbest, each_seq_len]
"""
# exit(0)
pred_variable = pred_variable[word_recover]
mask_variable = mask_variable[word_recover]
batch_size = pred_variable.size(0)
seq_len = pred_variable.size(1)
nbest = pred_variable.size(2)
mask = mask_variable.cpu().data.numpy()
pred_tag = pred_variable.cpu().data.numpy()
batch_size = mask.shape[0]
pred_label = []
for idx in range(batch_size):
pred = []
for idz in range(nbest):
each_pred = [label_alphabet.get_instance(pred_tag[idx][idy][idz]) for idy in range(seq_len) if mask[idx][idy] != 0]
pred.append(each_pred)
pred_label.append(pred)
return pred_label
def lr_decay(optimizer, epoch, decay_rate, init_lr):
lr = init_lr/(1+decay_rate*epoch)
print(" Learning rate is set as:", lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
def evaluate(data, model, name, nbest=None):
if name == "train":
instances = data.train_Ids
instances_texts = data.train_texts
elif name == "dev":
instances = data.dev_Ids
instances_texts = data.dev_texts
elif name == 'test':
instances = data.test_Ids
instances_texts = data.test_texts
elif name == 'raw':
instances = data.raw_Ids
instances_texts = data.raw_texts
else:
print("Error: wrong evaluate name,", name)
exit(1)
right_token = 0
whole_token = 0
nbest_pred_results = []
pred_scores = []
pred_results = []
gold_results = []
## set model in eval model
model.eval()
batch_size = data.HP_batch_size
start_time = time.time()
train_num = len(instances)
total_batch = train_num//batch_size+1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end > train_num:
end = train_num
instance = instances[start:end]
instance_texts = instances_texts[start:end]
if not instance:
continue
batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask, batch_word_text = batchify_with_label(instance, instance_texts, data.HP_gpu, False, data.sentence_classification)
if nbest and not data.sentence_classification:
scores, nbest_tag_seq = model.decode_nbest(batch_word,batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask, nbest)
nbest_pred_result = recover_nbest_label(nbest_tag_seq, mask, data.label_alphabet, batch_wordrecover)
nbest_pred_results += nbest_pred_result
pred_scores += scores[batch_wordrecover].cpu().data.numpy().tolist()
## select the best sequence to evalurate
tag_seq = nbest_tag_seq[:,:,0]
else:
tag_seq = model(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask, batch_word_text)
pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover, data.sentence_classification)
pred_results += pred_label
gold_results += gold_label
decode_time = time.time() - start_time
speed = len(instances)/decode_time
acc, p, r, f = get_ner_fmeasure(gold_results, pred_results, data.tagScheme)
if nbest and not data.sentence_classification:
return speed, acc, p, r, f, nbest_pred_results, pred_scores
return speed, acc, p, r, f, pred_results, pred_scores
def batchify_with_label(input_batch_list,input_text_batch_list, gpu, if_train=True, sentence_classification=False):
if sentence_classification:
return batchify_sentence_classification_with_label(input_batch_list, input_text_batch_list, gpu, if_train)
else:
return batchify_sequence_labeling_with_label(input_batch_list,input_text_batch_list, gpu, if_train)
def batchify_sequence_labeling_with_label(input_batch_list, input_text_batch_list, gpu, if_train=True):
"""
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
features: features ids for one sentence. (batch_size, sent_len, feature_num)
chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)
labels: label ids for one sentence. (batch_size, sent_len)
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
feature_seq_tensors: [(batch_size, max_sent_len),...] list of Variable
word_seq_lengths: (batch_size,1) Tensor
char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable
char_seq_lengths: (batch_size*max_sent_len,1) Tensor
char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order
label_seq_tensor: (batch_size, max_sent_len)
mask: (batch_size, max_sent_len)
"""
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
tokens = [sent[0] for sent in input_text_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0][0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
label_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())
mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()
for idx, (seq, label, seqlen) in enumerate(zip(words, labels, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
label_seq_tensor[idx, :seqlen] = torch.LongTensor(label)
mask[idx, :seqlen] = torch.Tensor([1]*seqlen)
for idy in range(feature_num):
feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
tokens = sorted(tokens, key=len, reverse=True)
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
### deal with char
# pad_chars (batch_size, max_seq_len)
pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):
for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)
char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)
char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
_, char_seq_recover = char_perm_idx.sort(0, descending=False)
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask, tokens
def batchify_sentence_classification_with_label(input_batch_list, input_text_batch_list, gpu, if_train=True):
"""
input: list of words, chars and labels, various length. [[words, features, chars, labels],[words, features, chars,labels],...]
words: word ids for one sentence. (batch_size, sent_len)
features: features ids for one sentence. (batch_size, feature_num), each sentence has one set of feature
chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length)
labels: label ids for one sentence. (batch_size,), each sentence has one set of feature
output:
zero padding for word and char, with their batch length
word_seq_tensor: (batch_size, max_sent_len) Variable
feature_seq_tensors: [(batch_size,), ... ] list of Variable
word_seq_lengths: (batch_size,1) Tensor
char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable
char_seq_lengths: (batch_size*max_sent_len,1) Tensor
char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order
label_seq_tensor: (batch_size, )
mask: (batch_size, max_sent_len)
"""
batch_size = len(input_batch_list)
words = [sent[0] for sent in input_batch_list]
features = [np.asarray(sent[1]) for sent in input_batch_list]
feature_num = len(features[0])
chars = [sent[2] for sent in input_batch_list]
labels = [sent[3] for sent in input_batch_list]
word_seq_lengths = torch.LongTensor(list(map(len, words)))
max_seq_len = word_seq_lengths.max().item()
word_seq_tensor = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).long()
label_seq_tensor = torch.zeros((batch_size, ), requires_grad = if_train).long()
feature_seq_tensors = []
for idx in range(feature_num):
feature_seq_tensors.append(torch.zeros((batch_size, max_seq_len),requires_grad = if_train).long())
mask = torch.zeros((batch_size, max_seq_len), requires_grad = if_train).byte()
label_seq_tensor = torch.LongTensor(labels)
# exit(0)
for idx, (seq, seqlen) in enumerate(zip(words, word_seq_lengths)):
seqlen = seqlen.item()
word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq)
mask[idx, :seqlen] = torch.Tensor([1]*seqlen)
for idy in range(feature_num):
feature_seq_tensors[idy][idx,:seqlen] = torch.LongTensor(features[idx][:,idy])
word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True)
word_seq_tensor = word_seq_tensor[word_perm_idx]
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx][word_perm_idx]
label_seq_tensor = label_seq_tensor[word_perm_idx]
mask = mask[word_perm_idx]
### deal with char
# pad_chars (batch_size, max_seq_len)
pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))]
length_list = [list(map(len, pad_char)) for pad_char in pad_chars]
max_word_len = max(map(max, length_list))
char_seq_tensor = torch.zeros((batch_size, max_seq_len, max_word_len), requires_grad = if_train).long()
char_seq_lengths = torch.LongTensor(length_list)
for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)):
for idy, (word, wordlen) in enumerate(zip(seq, seqlen)):
# print len(word), wordlen
char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word)
char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1)
char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,)
char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True)
char_seq_tensor = char_seq_tensor[char_perm_idx]
_, char_seq_recover = char_perm_idx.sort(0, descending=False)
_, word_seq_recover = word_perm_idx.sort(0, descending=False)
if gpu:
word_seq_tensor = word_seq_tensor.cuda()
for idx in range(feature_num):
feature_seq_tensors[idx] = feature_seq_tensors[idx].cuda()
word_seq_lengths = word_seq_lengths.cuda()
word_seq_recover = word_seq_recover.cuda()
label_seq_tensor = label_seq_tensor.cuda()
char_seq_tensor = char_seq_tensor.cuda()
char_seq_recover = char_seq_recover.cuda()
mask = mask.cuda()
return word_seq_tensor,feature_seq_tensors, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask
def train(data):
print("Training model...")
data.show_data_summary()
save_data_name = data.model_dir +".dset"
data.save(save_data_name)
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
print (model)
# loss_function = nn.NLLLoss()
if data.optimizer.lower() == "sgd":
optimizer = optim.SGD(model.parameters(), lr=data.HP_lr, momentum=data.HP_momentum,weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adagrad":
optimizer = optim.Adagrad(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adadelta":
optimizer = optim.Adadelta(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "rmsprop":
optimizer = optim.RMSprop(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
elif data.optimizer.lower() == "adam":
optimizer = optim.Adam(model.parameters(), lr=data.HP_lr, weight_decay=data.HP_l2)
else:
print("Optimizer illegal: %s"%(data.optimizer))
exit(1)
best_dev = -10
# data.HP_iteration = 1
## start training
for idx in range(data.HP_iteration):
epoch_start = time.time()
temp_start = epoch_start
print("Epoch: %s/%s" %(idx,data.HP_iteration))
if data.optimizer == "SGD":
optimizer = lr_decay(optimizer, idx, data.HP_lr_decay, data.HP_lr)
instance_count = 0
sample_id = 0
sample_loss = 0
total_loss = 0
right_token = 0
whole_token = 0
train_data = list(zip(data.train_Ids, data.train_texts))
random.shuffle(train_data)
data.train_Ids, data.train_texts = zip(*train_data)
model.train()
model.zero_grad()
batch_size = data.HP_batch_size
batch_id = 0
train_num = len(data.train_Ids)
total_batch = train_num//batch_size+1
for batch_id in range(total_batch):
start = batch_id*batch_size
end = (batch_id+1)*batch_size
if end >train_num:
end = train_num
instance = data.train_Ids[start:end]
instance_texts = data.train_texts[start:end]
if not instance:
continue
batch_word, batch_features, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask, batch_word_text = batchify_with_label(instance, instance_texts , data.HP_gpu, True, data.sentence_classification)
instance_count += 1
loss, tag_seq = model.neg_log_likelihood_loss(batch_word, batch_features, batch_wordlen, batch_char, batch_charlen, batch_charrecover, batch_label, mask, batch_word_text)
right, whole = predict_check(tag_seq, batch_label, mask, data.sentence_classification)
right_token += right
whole_token += whole
# print("loss:",loss.item())
sample_loss += loss.item()
total_loss += loss.item()
if end%500 == 0:
temp_time = time.time()
temp_cost = temp_time - temp_start
temp_start = temp_time
print(" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))
if sample_loss > 1e8 or str(sample_loss) == "nan":
print("ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....")
# exit(1)
sys.stdout.flush()
sample_loss = 0
loss.backward()
optimizer.step()
model.zero_grad()
temp_time = time.time()
temp_cost = temp_time - temp_start
print(" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f"%(end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))
epoch_finish = time.time()
epoch_cost = epoch_finish - epoch_start
print("Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s"%(idx, epoch_cost, train_num/epoch_cost, total_loss))
print("totalloss:", total_loss)
if total_loss > 1e8 or str(total_loss) == "nan":
print("ERROR: LOSS EXPLOSION (>1e8) ! PLEASE SET PROPER PARAMETERS AND STRUCTURE! EXIT....")
#exit(1)
# continue
speed, acc, p, r, f, pred_results, pred_scores = evaluate(data, model, "dev")
dev_finish = time.time()
dev_cost = dev_finish - epoch_finish
if data.optimize_with_evalb:
with tempfile.NamedTemporaryFile("w",delete=False) as f_decode:
if data.decode_dir is None:
data.decode_dir = f_decode.name
decoded_st_dir = f_decode.name
data.write_decoded_results(pred_results, 'dev')
command = ["PYTHONPATH="+data.tree2labels,"python",
data.evaluate," --input ",decoded_st_dir," --gold ",data.gold_dev_trees," --evalb ",data.evalb,">",f_decode.name+".out"]
os.system(" ".join(command))
f_decode = open(f_decode.name+".out","r")
current_score = float([l for l in f_decode.read().split("\n")
if l.startswith("Bracketing FMeasure")][0].split("=")[1])
print ("Current Score (from EVALB)", current_score, "Previous best dev (from EVALB)", best_dev)
elif data.optimize_with_las:
with tempfile.NamedTemporaryFile("w",delete=False) as f_decode:
if data.decode_dir is None:
data.decode_dir = f_decode.name
decoded_st_dir = f_decode.name
data.write_decoded_results(pred_results, 'dev')
#Transforming the output file into a CoNLL file
command = [#"PYTHONPATH="+abspath(join(dirname(__file__), data.dep2labels)),
"python",
data.dep2labels+os.sep+"decode_output_file.py",
"--input",
decoded_st_dir,
"--output",
f_decode.name+".out"
]
p = Popen(" ".join(command),stdout=subprocess.PIPE, shell=True)
out, err = p.communicate()
command = ["python",
data.conll_ud, f_decode.name+".out",
data.gold_dev_trees]#,">",f_decode.name+".out"]
p = Popen(" ".join(command),stdout=subprocess.PIPE, shell=True)
out, err = p.communicate()
out = out.decode("utf-8")
current_score = float(out.strip().split(":")[1])
print ("Current Score (from conll_ud)", current_score, "Previous best dev (from conll_ud)", best_dev)
else:
if data.seg:
current_score = f
print("Dev: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(dev_cost, speed, acc, p, r, f))
else:
current_score = acc
print("Dev: time: %.2fs speed: %.2fst/s; acc: %.4f"%(dev_cost, speed, acc))
if current_score > best_dev:
if data.seg:
print("Exceed previous best f score:", best_dev)
else:
print("Exceed previous best acc score:", best_dev)
model_name = data.model_dir + ".model"
#model_name = data.model_dir +'.'+ str(idx) + ".model"
print("Save current best model in file:", model_name)
torch.save(model.state_dict(), model_name)
best_dev = current_score
# ## decode test
speed, acc, p, r, f, _,_ = evaluate(data, model, "test")
test_finish = time.time()
test_cost = test_finish - dev_finish
if data.seg:
print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(test_cost, speed, acc, p, r, f))
else:
print("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f"%(test_cost, speed, acc))
gc.collect()
def load_model_decode(data, name):
print("Load Model from file: ", data.model_dir)
if data.sentence_classification:
model = SentClassifier(data)
else:
model = SeqLabel(data)
model.load_state_dict(torch.load(data.load_model_dir))
print("Decode %s data, nbest: %s ..."%(name, data.nbest))
start_time = time.time()
speed, acc, p, r, f, pred_results, pred_scores = evaluate(data, model, name, data.nbest)
end_time = time.time()
time_cost = end_time - start_time
if data.seg:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(name, time_cost, speed, acc, p, r, f))
else:
print("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f"%(name, time_cost, speed, acc))
return pred_results, pred_scores
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tuning with NCRF++')
# parser.add_argument('--status', choices=['train', 'decode'], help='update algorithm', default='train')
parser.add_argument('--config', help='Configuration File' )
args = parser.parse_args()
data = Data()
data.HP_gpu = torch.cuda.is_available()
data.read_config(args.config)
data.show_data_summary()
status = data.status.lower()
print("Seed num:",seed_num)
if status == 'train':
print("MODEL: train")
data_initialization(data)
data.generate_instance('train')
data.generate_instance('dev')
data.generate_instance('test')
data.build_pretrain_emb()
train(data)
elif status == 'decode':
print("MODEL: decode")
data.load(data.dset_dir)
data.read_config(args.config)
print(data.raw_dir)
# exit(0)
data.show_data_summary()
data.generate_instance('raw')
print("nbest: %s"%(data.nbest))
decode_results, pred_scores = load_model_decode(data, 'raw')
if data.nbest and not data.sentence_classification:
data.write_nbest_decoded_results(decode_results, pred_scores, 'raw')
else:
data.write_decoded_results(decode_results, 'raw')
else:
print("Invalid argument! Please use valid arguments! (train/test/decode)")
| 27,670 | 43.558776 | 253 | py |
parsing-as-pretraining | parsing-as-pretraining-master/NCRFpp/model/wordsequence.py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-10-17 16:47:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2019-01-11 13:55:41
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from .wordrep import WordRep
import torch.nn.functional as F
class WordSequence(nn.Module):
# def __init__(self, data):
# super(WordSequence, self).__init__()
# print("build word sequence feature extractor: %s..."%(data.word_feature_extractor))
# self.gpu = data.HP_gpu
# self.use_char = data.use_char
# self.wordrep = WordRep(data)
#
# if data.use_elmo:
# self.input_size = data.elmo_size
# else:
# self.input_size = data.word_emb_dim
#
# self.feature_num = data.feature_num
# if self.use_char:
# self.input_size += data.HP_char_hidden_dim
# if data.char_feature_extractor == "ALL":
# self.input_size += data.HP_char_hidden_dim
#
# if data.use_features:
# for idx in range(self.feature_num):
# self.input_size += data.feature_emb_dims[idx]
#
# self.hidden2tag = nn.Linear(self.input_size, data.label_alphabet_size)
#
# if self.gpu:
# self.hidden2tag = self.hidden2tag.cuda()
def __init__(self, data):
super(WordSequence, self).__init__()
print("build word sequence feature extractor: %s..."%(data.word_feature_extractor))
self.gpu = data.HP_gpu
self.use_char = data.use_char
# self.batch_size = data.HP_batch_size
# self.hidden_dim = data.HP_hidden_dim
self.droplstm = nn.Dropout(data.HP_dropout)
self.bilstm_flag = data.HP_bilstm
self.lstm_layer = data.HP_lstm_layer
self.wordrep = WordRep(data)
self.input_size = data.word_emb_dim
self.contextualize = data.contextualize
if data.use_elmo:
self.input_size = data.elmo_size
else:
self.input_size = data.word_emb_dim
self.feature_num = data.feature_num
if self.use_char:
self.input_size += data.HP_char_hidden_dim
if data.char_feature_extractor == "ALL":
self.input_size += data.HP_char_hidden_dim
if data.use_features:
for idx in range(self.feature_num):
self.input_size += data.feature_emb_dims[idx]
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
if self.contextualize:
if self.bilstm_flag:
lstm_hidden = data.HP_hidden_dim // 2
else:
lstm_hidden = data.HP_hidden_dim
self.word_feature_extractor = data.word_feature_extractor
if self.word_feature_extractor == "GRU":
self.lstm = nn.GRU(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag)
elif self.word_feature_extractor == "LSTM":
self.lstm = nn.LSTM(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag)
elif self.word_feature_extractor == "CNN":
# cnn_hidden = data.HP_hidden_dim
self.word2cnn = nn.Linear(self.input_size, data.HP_hidden_dim)
self.cnn_layer = data.HP_cnn_layer
print("CNN layer: ", self.cnn_layer)
self.cnn_list = nn.ModuleList()
self.cnn_drop_list = nn.ModuleList()
self.cnn_batchnorm_list = nn.ModuleList()
kernel = 3
pad_size = int((kernel-1)/2)
for idx in range(self.cnn_layer):
self.cnn_list.append(nn.Conv1d(data.HP_hidden_dim, data.HP_hidden_dim, kernel_size=kernel, padding=pad_size))
self.cnn_drop_list.append(nn.Dropout(data.HP_dropout))
self.cnn_batchnorm_list.append(nn.BatchNorm1d(data.HP_hidden_dim))
# The linear layer that maps from hidden state space to tag space
if data.contextualize:
self.hidden2tag = nn.Linear(data.HP_hidden_dim, data.label_alphabet_size)
else:
self.hidden2tag = nn.Linear(self.input_size, data.label_alphabet_size)
# self.hidden2tag = nn.Linear(data.HP_hidden_dim, data.label_alphabet_size)
if self.gpu:
self.droplstm = self.droplstm.cuda()
self.hidden2tag = self.hidden2tag.cuda()
if self.word_feature_extractor == "CNN":
self.word2cnn = self.word2cnn.cuda()
for idx in range(self.cnn_layer):
self.cnn_list[idx] = self.cnn_list[idx].cuda()
self.cnn_drop_list[idx] = self.cnn_drop_list[idx].cuda()
self.cnn_batchnorm_list[idx] = self.cnn_batchnorm_list[idx].cuda()
else:
self.lstm = self.lstm.cuda()
else:
self.hidden2tag = nn.Linear(self.input_size, data.label_alphabet_size)
if self.gpu:
self.hidden2tag = self.hidden2tag.cuda()
# def __init__(self, data):
# super(WordSequence, self).__init__()
# print("build word sequence feature extractor: %s..."%(data.word_feature_extractor))
# self.gpu = data.HP_gpu
# self.use_char = data.use_char
# # self.batch_size = data.HP_batch_size
# # self.hidden_dim = data.HP_hidden_dim
# self.droplstm = nn.Dropout(data.HP_dropout)
# self.bilstm_flag = data.HP_bilstm
# self.lstm_layer = data.HP_lstm_layer
# self.wordrep = WordRep(data)
# self.input_size = data.word_emb_dim
# self.feature_num = data.feature_num
# if self.use_char:
# self.input_size += data.HP_char_hidden_dim
# if data.char_feature_extractor == "ALL":
# self.input_size += data.HP_char_hidden_dim
# for idx in range(self.feature_num):
# self.input_size += data.feature_emb_dims[idx]
# # The LSTM takes word embeddings as inputs, and outputs hidden states
# # with dimensionality hidden_dim.
# if self.bilstm_flag:
# lstm_hidden = data.HP_hidden_dim // 2
# else:
# lstm_hidden = data.HP_hidden_dim
#
# self.word_feature_extractor = data.word_feature_extractor
# if self.word_feature_extractor == "GRU":
# self.lstm = nn.GRU(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag)
# elif self.word_feature_extractor == "LSTM":
# self.lstm = nn.LSTM(self.input_size, lstm_hidden, num_layers=self.lstm_layer, batch_first=True, bidirectional=self.bilstm_flag)
# elif self.word_feature_extractor == "CNN":
# # cnn_hidden = data.HP_hidden_dim
# self.word2cnn = nn.Linear(self.input_size, data.HP_hidden_dim)
# self.cnn_layer = data.HP_cnn_layer
# print("CNN layer: ", self.cnn_layer)
# self.cnn_list = nn.ModuleList()
# self.cnn_drop_list = nn.ModuleList()
# self.cnn_batchnorm_list = nn.ModuleList()
# kernel = 3
# pad_size = int((kernel-1)/2)
# for idx in range(self.cnn_layer):
# self.cnn_list.append(nn.Conv1d(data.HP_hidden_dim, data.HP_hidden_dim, kernel_size=kernel, padding=pad_size))
# self.cnn_drop_list.append(nn.Dropout(data.HP_dropout))
# self.cnn_batchnorm_list.append(nn.BatchNorm1d(data.HP_hidden_dim))
# # The linear layer that maps from hidden state space to tag space
#
# print ("data.HP_hidden_dim", data.HP_hidden_dim)
# print ("data.label_alphabet_size", data.label_alphabet_size)
# self.hidden2tag = nn.Linear(data.HP_hidden_dim, data.label_alphabet_size)
#
# if self.gpu:
# self.droplstm = self.droplstm.cuda()
# self.hidden2tag = self.hidden2tag.cuda()
# if self.word_feature_extractor == "CNN":
# self.word2cnn = self.word2cnn.cuda()
# for idx in range(self.cnn_layer):
# self.cnn_list[idx] = self.cnn_list[idx].cuda()
# self.cnn_drop_list[idx] = self.cnn_drop_list[idx].cuda()
# self.cnn_batchnorm_list[idx] = self.cnn_batchnorm_list[idx].cuda()
# else:
# self.lstm = self.lstm.cuda()
# def forward(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, word_text_input):
# """
# input:
# word_inputs: (batch_size, sent_len)
# feature_inputs: [(batch_size, sent_len), ...] list of variables
# word_seq_lengths: list of batch_size, (batch_size,1)
# char_inputs: (batch_size*sent_len, word_length)
# char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1)
# char_seq_recover: variable which records the char order information, used to recover char order
# output:
# Variable(batch_size, sent_len, hidden_dim)
# """
# word_represent = self.wordrep(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, word_text_input)
# outputs = self.hidden2tag(word_represent)
# return outputs
def forward(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, word_text_input):
"""
input:
word_inputs: (batch_size, sent_len)
feature_inputs: [(batch_size, sent_len), ...] list of variables
word_seq_lengths: list of batch_size, (batch_size,1)
char_inputs: (batch_size*sent_len, word_length)
char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1)
char_seq_recover: variable which records the char order information, used to recover char order
output:
Variable(batch_size, sent_len, hidden_dim)
"""
word_represent = self.wordrep(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, word_text_input)
if not self.contextualize:
outputs = self.hidden2tag(word_represent)
return outputs
else:
## word_embs (batch_size, seq_len, embed_size)
if self.word_feature_extractor == "CNN":
batch_size = word_inputs.size(0)
word_in = torch.tanh(self.word2cnn(word_represent)).transpose(2,1).contiguous()
for idx in range(self.cnn_layer):
if idx == 0:
cnn_feature = F.relu(self.cnn_list[idx](word_in))
else:
cnn_feature = F.relu(self.cnn_list[idx](cnn_feature))
cnn_feature = self.cnn_drop_list[idx](cnn_feature)
if batch_size > 1:
cnn_feature = self.cnn_batchnorm_list[idx](cnn_feature)
feature_out = cnn_feature.transpose(2,1).contiguous()
else:
packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True)
hidden = None
lstm_out, hidden = self.lstm(packed_words, hidden)
lstm_out, _ = pad_packed_sequence(lstm_out)
## lstm_out (seq_len, seq_len, hidden_size)
feature_out = self.droplstm(lstm_out.transpose(1,0))
## feature_out (batch_size, seq_len, hidden_size)
outputs = self.hidden2tag(feature_out)
return outputs
# def forward(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover):
# """
# input:
# word_inputs: (batch_size, sent_len)
# feature_inputs: [(batch_size, sent_len), ...] list of variables
# word_seq_lengths: list of batch_size, (batch_size,1)
# char_inputs: (batch_size*sent_len, word_length)
# char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1)
# char_seq_recover: variable which records the char order information, used to recover char order
# output:
# Variable(batch_size, sent_len, hidden_dim)
# """
#
# word_represent = self.wordrep(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
# print ("word_represent.shape", word_represent.shape)
# ## word_embs (batch_size, seq_len, embed_size)
# if self.word_feature_extractor == "CNN":
# batch_size = word_inputs.size(0)
# word_in = torch.tanh(self.word2cnn(word_represent)).transpose(2,1).contiguous()
# for idx in range(self.cnn_layer):
# if idx == 0:
# cnn_feature = F.relu(self.cnn_list[idx](word_in))
# else:
# cnn_feature = F.relu(self.cnn_list[idx](cnn_feature))
# cnn_feature = self.cnn_drop_list[idx](cnn_feature)
# if batch_size > 1:
# cnn_feature = self.cnn_batchnorm_list[idx](cnn_feature)
# feature_out = cnn_feature.transpose(2,1).contiguous()
# else:
# packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True)
# print ("packed_words", packed_words[0].shape)
# hidden = None
# lstm_out, hidden = self.lstm(packed_words, hidden)
# lstm_out, _ = pad_packed_sequence(lstm_out)
# ## lstm_out (seq_len, seq_len, hidden_size)
# print ("lstm_out.transpose(1,0)",lstm_out.transpose(1,0))
# feature_out = self.droplstm(lstm_out.transpose(1,0))
# ## feature_out (batch_size, seq_len, hidden_size)
# print (feature_out.shape)
# input("NEXT")
# outputs = self.hidden2tag(feature_out)
# return outputs
def sentence_representation(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover):
"""
input:
word_inputs: (batch_size, sent_len)
feature_inputs: [(batch_size, ), ...] list of variables
word_seq_lengths: list of batch_size, (batch_size,1)
char_inputs: (batch_size*sent_len, word_length)
char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1)
char_seq_recover: variable which records the char order information, used to recover char order
output:
Variable(batch_size, sent_len, hidden_dim)
"""
word_represent = self.wordrep(word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
## word_embs (batch_size, seq_len, embed_size)
batch_size = word_inputs.size(0)
if self.word_feature_extractor == "CNN":
word_in = torch.tanh(self.word2cnn(word_represent)).transpose(2,1).contiguous()
for idx in range(self.cnn_layer):
if idx == 0:
cnn_feature = F.relu(self.cnn_list[idx](word_in))
else:
cnn_feature = F.relu(self.cnn_list[idx](cnn_feature))
cnn_feature = self.cnn_drop_list[idx](cnn_feature)
if batch_size > 1:
cnn_feature = self.cnn_batchnorm_list[idx](cnn_feature)
feature_out = F.max_pool1d(cnn_feature, cnn_feature.size(2)).view(batch_size, -1)
else:
packed_words = pack_padded_sequence(word_represent, word_seq_lengths.cpu().numpy(), True)
hidden = None
lstm_out, hidden = self.lstm(packed_words, hidden)
## lstm_out (seq_len, seq_len, hidden_size)
## feature_out (batch_size, hidden_size)
feature_out = hidden[0].transpose(1,0).contiguous().view(batch_size,-1)
feature_list = [feature_out]
for idx in range(self.feature_num):
feature_list.append(self.feature_embeddings[idx](feature_inputs[idx]))
final_feature = torch.cat(feature_list, 1)
outputs = self.hidden2tag(self.droplstm(final_feature))
## outputs: (batch_size, label_alphabet_size)
return outputs
| 17,182 | 47.677054 | 159 | py |
parsing-as-pretraining | parsing-as-pretraining-master/NCRFpp/model/wordrep.py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-10-17 16:47:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2019-01-10 16:41:16
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import numpy as np
from .charbilstm import CharBiLSTM
from .charbigru import CharBiGRU
from .charcnn import CharCNN
from allennlp.modules.elmo import Elmo, batch_to_ids
from torch.autograd import Variable
from allennlp.commands.elmo import ElmoEmbedder
import time
class WordRep(nn.Module):
def __init__(self, data):
super(WordRep, self).__init__()
print("build word representation...")
self.gpu = data.HP_gpu
self.use_char = data.use_char
self.batch_size = data.HP_batch_size
self.char_hidden_dim = 0
self.char_all_feature = False
self.sentence_classification = data.sentence_classification
self.use_features = data.use_features
if self.use_char:
self.char_hidden_dim = data.HP_char_hidden_dim
self.char_embedding_dim = data.char_emb_dim
if data.char_feature_extractor == "CNN":
self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "LSTM":
self.char_feature = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "GRU":
self.char_feature = CharBiGRU(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
elif data.char_feature_extractor == "ALL":
self.char_all_feature = True
self.char_feature = CharCNN(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
self.char_feature_extra = CharBiLSTM(data.char_alphabet.size(), data.pretrain_char_embedding, self.char_embedding_dim, self.char_hidden_dim, data.HP_dropout, self.gpu)
else:
print("Error char feature selection, please check parameter data.char_feature_extractor (CNN/LSTM/GRU/ALL).")
exit(0)
self.embedding_dim = data.word_emb_dim
self.drop = nn.Dropout(data.HP_dropout)
self.use_elmo = data.use_elmo
self.fine_tune_emb = data.fine_tune_emb
if not self.use_elmo:
self.word_embedding = nn.Embedding(data.word_alphabet.size(), self.embedding_dim)
self.word_embedding.weight.requires_grad = self.fine_tune_emb
if data.pretrain_word_embedding is not None:
self.word_embedding.weight.data.copy_(torch.from_numpy(data.pretrain_word_embedding))
else:
self.word_embedding.weight.data.copy_(torch.from_numpy(self.random_embedding(data.word_alphabet.size(), self.embedding_dim)))
else:
options_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_options.json"
weight_file = "https://s3-us-west-2.amazonaws.com/allennlp/models/elmo/2x4096_512_2048cnn_2xhighway/elmo_2x4096_512_2048cnn_2xhighway_weights.hdf5"
# Compute two different representation for each token.
# Each representation is a linear weighted combination for the
# 3 layers in ELMo (i.e., charcnn, the outputs of the two BiLSTM))
if self.fine_tune_emb:
#self.elmo = Elmo(options_file, weight_file, 1, dropout=0, scalar_mix_parameters=[1.,1.,1.])#, requires_grad=self.fine_tune_emb)
self.elmo = Elmo(options_file, weight_file, 1, dropout=0, requires_grad=True)
else:
self.elmo = Elmo(options_file, weight_file, 1, dropout=0, scalar_mix_parameters=[0.,0.,0.])
if self.gpu:
self.elmo = self.elmo.cuda()
# self.feature_num = data.feature_num
# self.feature_embedding_dims = data.feature_emb_dims
# self.feature_embeddings = nn.ModuleList()
# for idx in range(self.feature_num):
# self.feature_embeddings.append(nn.Embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx]))
# for idx in range(self.feature_num):
# if data.pretrain_feature_embeddings[idx] is not None:
# self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(data.pretrain_feature_embeddings[idx]))
# else:
# self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(self.random_embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx])))
if data.use_features:
self.feature_num = data.feature_num
self.feature_embedding_dims = data.feature_emb_dims
self.feature_embeddings = nn.ModuleList()
for idx in range(self.feature_num):
self.feature_embeddings.append(nn.Embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx]))
for idx in range(self.feature_num):
if data.pretrain_feature_embeddings[idx] is not None:
self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(data.pretrain_feature_embeddings[idx]))
else:
self.feature_embeddings[idx].weight.data.copy_(torch.from_numpy(self.random_embedding(data.feature_alphabets[idx].size(), self.feature_embedding_dims[idx])))
if self.gpu:
self.drop = self.drop.cuda()
if not self.use_elmo:
self.word_embedding = self.word_embedding.cuda()
# for idx in range(self.feature_num):
# self.feature_embeddings[idx] = self.feature_embeddings[idx].cuda()
if data.use_features:
for idx in range(self.feature_num):
self.feature_embeddings[idx] = self.feature_embeddings[idx].cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def forward(self, word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, word_text_input):
"""
input:
word_inputs: (batch_size, sent_len)
features: list [(batch_size, sent_len), (batch_len, sent_len),...]
word_seq_lengths: list of batch_size, (batch_size,1)
char_inputs: (batch_size*sent_len, word_length)
char_seq_lengths: list of whole batch_size for char, (batch_size*sent_len, 1)
char_seq_recover: variable which records the char order information, used to recover char order
output:
Variable(batch_size, sent_len, hidden_dim)
"""
batch_size = word_inputs.size(0)
sent_len = word_inputs.size(1)
if self.use_elmo:
character_ids = batch_to_ids(word_text_input)
if self.gpu:
character_ids = character_ids.cuda()
elmo_output = self.elmo(character_ids)["elmo_representations"][0]
if not self.fine_tune_emb:
elmo_output = elmo_output.detach()
word_list = [elmo_output]
else:
word_embs = self.word_embedding(word_inputs)
word_list = [word_embs]
if not self.sentence_classification and self.use_features:
for idx in range(self.feature_num):
word_list.append(self.feature_embeddings[idx](feature_inputs[idx]))
if self.use_char:
## calculate char lstm last hidden
# print("charinput:", char_inputs)
# exit(0)
char_features = self.char_feature.get_last_hiddens(char_inputs, char_seq_lengths.cpu().numpy())
char_features = char_features[char_seq_recover]
char_features = char_features.view(batch_size,sent_len,-1)
## concat word and char together
word_list.append(char_features)
word_embs = torch.cat([word_embs, char_features], 2)
if self.char_all_feature:
char_features_extra = self.char_feature_extra.get_last_hiddens(char_inputs, char_seq_lengths.cpu().numpy())
char_features_extra = char_features_extra[char_seq_recover]
char_features_extra = char_features_extra.view(batch_size,sent_len,-1)
## concat word and char together
word_list.append(char_features_extra)
word_embs = torch.cat(word_list, 2)
word_represent = self.drop(word_embs)
return word_represent
| 9,348 | 49.809783 | 183 | py |
parsing-as-pretraining | parsing-as-pretraining-master/NCRFpp/model/sentclassifier.py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2019-01-01 21:11:50
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2019-01-14 14:56:28
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
from .wordsequence import WordSequence
class SentClassifier(nn.Module):
def __init__(self, data):
super(SentClassifier, self).__init__()
print("build sentence classification network...")
print("use_char: ", data.use_char)
if data.use_char:
print("char feature extractor: ", data.char_feature_extractor)
print("word feature extractor: ", data.word_feature_extractor)
self.gpu = data.HP_gpu
self.average_batch = data.average_batch_loss
label_size = data.label_alphabet_size
self.word_hidden = WordSequence(data)
def neg_log_likelihood_loss(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, batch_label, mask):
outs = self.word_hidden.sentence_representation(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
batch_size = word_inputs.size(0)
# loss_function = nn.CrossEntropyLoss(ignore_index=0, reduction='sum')
outs = outs.view(batch_size, -1)
# print("a",outs)
# score = F.log_softmax(outs, 1)
# print(score.size(), batch_label.view(batch_size).size())
# print(score)
# print(batch_label)
# exit(0)
total_loss = F.cross_entropy(outs, batch_label.view(batch_size))
# total_loss = loss_function(score, batch_label.view(batch_size))
_, tag_seq = torch.max(outs, 1)
if self.average_batch:
total_loss = total_loss / batch_size
return total_loss, tag_seq
def forward(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, mask):
outs = self.word_hidden.sentence_representation(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
batch_size = word_inputs.size(0)
outs = outs.view(batch_size, -1)
_, tag_seq = torch.max(outs, 1)
return tag_seq
| 2,329 | 39.877193 | 153 | py |
parsing-as-pretraining | parsing-as-pretraining-master/NCRFpp/model/charbigru.py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-10-17 16:47:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2018-10-18 11:12:13
from __future__ import print_function
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import numpy as np
class CharBiGRU(nn.Module):
def __init__(self, alphabet_size, pretrain_char_embedding, embedding_dim, hidden_dim, dropout, gpu, bidirect_flag = True):
super(CharBiGRU, self).__init__()
print("build char sequence feature extractor: GRU ...")
self.gpu = gpu
self.hidden_dim = hidden_dim
if bidirect_flag:
self.hidden_dim = hidden_dim // 2
self.char_drop = nn.Dropout(dropout)
self.char_embeddings = nn.Embedding(alphabet_size, embedding_dim)
if pretrain_char_embedding is not None:
self.char_embeddings.weight.data.copy_(torch.from_numpy(pretrain_char_embedding))
else:
self.char_embeddings.weight.data.copy_(torch.from_numpy(self.random_embedding(alphabet_size, embedding_dim)))
self.char_lstm = nn.GRU(embedding_dim, self.hidden_dim, num_layers=1, batch_first=True, bidirectional=bidirect_flag)
if self.gpu:
self.char_drop = self.char_drop.cuda()
self.char_embeddings = self.char_embeddings.cuda()
self.char_lstm = self.char_lstm.cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def get_last_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_hidden = None
pack_input = pack_padded_sequence(char_embeds, seq_lengths, True)
char_rnn_out, char_hidden = self.char_lstm(pack_input, char_hidden)
# char_rnn_out, _ = pad_packed_sequence(char_rnn_out)
return char_hidden.transpose(1,0).contiguous().view(batch_size,-1)
def get_all_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, word_length, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_hidden = None
pack_input = pack_padded_sequence(char_embeds, seq_lengths, True)
char_rnn_out, char_hidden = self.char_lstm(pack_input, char_hidden)
char_rnn_out, _ = pad_packed_sequence(char_rnn_out)
return char_rnn_out.transpose(1,0)
def forward(self, input, seq_lengths):
return self.get_all_hiddens(input, seq_lengths)
| 3,452 | 43.269231 | 126 | py |
parsing-as-pretraining | parsing-as-pretraining-master/NCRFpp/model/charcnn.py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-10-17 16:47:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2019-01-18 21:06:06
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
class CharCNN(nn.Module):
def __init__(self, alphabet_size, pretrain_char_embedding, embedding_dim, hidden_dim, dropout, gpu):
super(CharCNN, self).__init__()
print("build char sequence feature extractor: CNN ...")
self.gpu = gpu
self.hidden_dim = hidden_dim
self.char_drop = nn.Dropout(dropout)
self.char_embeddings = nn.Embedding(alphabet_size, embedding_dim)
if pretrain_char_embedding is not None:
self.char_embeddings.weight.data.copy_(torch.from_numpy(pretrain_char_embedding))
else:
self.char_embeddings.weight.data.copy_(torch.from_numpy(self.random_embedding(alphabet_size, embedding_dim)))
self.char_cnn = nn.Conv1d(embedding_dim, self.hidden_dim, kernel_size=3, padding=1)
if self.gpu:
self.char_drop = self.char_drop.cuda()
self.char_embeddings = self.char_embeddings.cuda()
self.char_cnn = self.char_cnn.cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def get_last_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_embeds = char_embeds.transpose(2,1).contiguous()
char_cnn_out = self.char_cnn(char_embeds)
char_cnn_out = F.max_pool1d(char_cnn_out, char_cnn_out.size(2)).view(batch_size, -1)
return char_cnn_out
def get_all_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, word_length, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_embeds = char_embeds.transpose(2,1).contiguous()
char_cnn_out = self.char_cnn(char_embeds).transpose(2,1).contiguous()
return char_cnn_out
def forward(self, input, seq_lengths):
return self.get_all_hiddens(input, seq_lengths)
| 3,086 | 40.716216 | 121 | py |
parsing-as-pretraining | parsing-as-pretraining-master/NCRFpp/model/charbilstm.py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-10-17 16:47:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2018-10-18 11:19:37
from __future__ import print_function
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
import numpy as np
class CharBiLSTM(nn.Module):
def __init__(self, alphabet_size, pretrain_char_embedding, embedding_dim, hidden_dim, dropout, gpu, bidirect_flag = True):
super(CharBiLSTM, self).__init__()
print("build char sequence feature extractor: LSTM ...")
self.gpu = gpu
self.hidden_dim = hidden_dim
if bidirect_flag:
self.hidden_dim = hidden_dim // 2
self.char_drop = nn.Dropout(dropout)
self.char_embeddings = nn.Embedding(alphabet_size, embedding_dim)
if pretrain_char_embedding is not None:
self.char_embeddings.weight.data.copy_(torch.from_numpy(pretrain_char_embedding))
else:
self.char_embeddings.weight.data.copy_(torch.from_numpy(self.random_embedding(alphabet_size, embedding_dim)))
self.char_lstm = nn.LSTM(embedding_dim, self.hidden_dim, num_layers=1, batch_first=True, bidirectional=bidirect_flag)
if self.gpu:
self.char_drop = self.char_drop.cuda()
self.char_embeddings = self.char_embeddings.cuda()
self.char_lstm = self.char_lstm.cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def get_last_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_hidden = None
pack_input = pack_padded_sequence(char_embeds, seq_lengths, True)
char_rnn_out, char_hidden = self.char_lstm(pack_input, char_hidden)
## char_hidden = (h_t, c_t)
# char_hidden[0] = h_t = (2, batch_size, lstm_dimension)
# char_rnn_out, _ = pad_packed_sequence(char_rnn_out)
return char_hidden[0].transpose(1,0).contiguous().view(batch_size,-1)
def get_all_hiddens(self, input, seq_lengths):
"""
input:
input: Variable(batch_size, word_length)
seq_lengths: numpy array (batch_size, 1)
output:
Variable(batch_size, word_length, char_hidden_dim)
Note it only accepts ordered (length) variable, length size is recorded in seq_lengths
"""
batch_size = input.size(0)
char_embeds = self.char_drop(self.char_embeddings(input))
char_hidden = None
pack_input = pack_padded_sequence(char_embeds, seq_lengths, True)
char_rnn_out, char_hidden = self.char_lstm(pack_input, char_hidden)
char_rnn_out, _ = pad_packed_sequence(char_rnn_out)
return char_rnn_out.transpose(1,0)
def forward(self, input, seq_lengths):
return self.get_all_hiddens(input, seq_lengths)
| 3,561 | 43.525 | 126 | py |
parsing-as-pretraining | parsing-as-pretraining-master/NCRFpp/model/crf.py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-12-04 23:19:38
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2018-12-16 22:15:56
from __future__ import print_function
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
START_TAG = -2
STOP_TAG = -1
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec, m_size):
"""
calculate log of exp sum
args:
vec (batch_size, vanishing_dim, hidden_dim) : input tensor
m_size : hidden_dim
return:
batch_size, hidden_dim
"""
_, idx = torch.max(vec, 1) # B * 1 * M
max_score = torch.gather(vec, 1, idx.view(-1, 1, m_size)).view(-1, 1, m_size) # B * M
return max_score.view(-1, m_size) + torch.log(torch.sum(torch.exp(vec - max_score.expand_as(vec)), 1)).view(-1, m_size) # B * M
class CRF(nn.Module):
def __init__(self, tagset_size, gpu):
super(CRF, self).__init__()
print("build CRF...")
self.gpu = gpu
# Matrix of transition parameters. Entry i,j is the score of transitioning from i to j.
self.tagset_size = tagset_size
# # We add 2 here, because of START_TAG and STOP_TAG
# # transitions (f_tag_size, t_tag_size), transition value from f_tag to t_tag
init_transitions = torch.zeros(self.tagset_size+2, self.tagset_size+2)
init_transitions[:,START_TAG] = -10000.0
init_transitions[STOP_TAG,:] = -10000.0
init_transitions[:,0] = -10000.0
init_transitions[0,:] = -10000.0
if self.gpu:
init_transitions = init_transitions.cuda()
self.transitions = nn.Parameter(init_transitions)
# self.transitions = nn.Parameter(torch.Tensor(self.tagset_size+2, self.tagset_size+2))
# self.transitions.data.zero_()
def _calculate_PZ(self, feats, mask):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
masks: (batch, seq_len)
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
# print feats.view(seq_len, tag_size)
assert(tag_size == self.tagset_size+2)
mask = mask.transpose(1,0).contiguous()
ins_num = seq_len * batch_size
## be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1)
feats = feats.transpose(1,0).contiguous().view(ins_num,1, tag_size).expand(ins_num, tag_size, tag_size)
## need to consider start
scores = feats + self.transitions.view(1,tag_size,tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size, 1) # bat_size * to_target_size
## add start score (from start to all tag, duplicate to batch_size)
# partition = partition + self.transitions[START_TAG,:].view(1, tag_size, 1).expand(batch_size, tag_size, 1)
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: bat_size * from_target * to_target
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
# print cur_partition.data
# (bat_size * from_target * to_target) -> (bat_size * to_target)
# partition = utils.switch(partition, cur_partition, mask[idx].view(bat_size, 1).expand(bat_size, self.tagset_size)).view(bat_size, -1)
mask_idx = mask[idx, :].view(batch_size, 1).expand(batch_size, tag_size)
## effective updated partition part, only keep the partition value of mask value = 1
masked_cur_partition = cur_partition.masked_select(mask_idx)
## let mask_idx broadcastable, to disable warning
mask_idx = mask_idx.contiguous().view(batch_size, tag_size, 1)
## replace the partition where the maskvalue=1, other partition value keeps the same
partition.masked_scatter_(mask_idx, masked_cur_partition)
# until the last state, add transition score for all partition (and do log_sum_exp) then select the value in STOP_TAG
cur_values = self.transitions.view(1,tag_size, tag_size).expand(batch_size, tag_size, tag_size) + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
cur_partition = log_sum_exp(cur_values, tag_size)
final_partition = cur_partition[:, STOP_TAG]
return final_partition.sum(), scores
def _viterbi_decode(self, feats, mask):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
mask: (batch, seq_len)
output:
decode_idx: (batch, seq_len) decoded sequence
path_score: (batch, 1) corresponding score for each sequence (to be implementated)
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
assert(tag_size == self.tagset_size+2)
## calculate sentence length for each sentence
length_mask = torch.sum(mask.long(), dim = 1).view(batch_size,1).long()
## mask to (seq_len, batch_size)
mask = mask.transpose(1,0).contiguous()
ins_num = seq_len * batch_size
## be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1)
feats = feats.transpose(1,0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
## need to consider start
scores = feats + self.transitions.view(1,tag_size,tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
## record the position of best score
back_points = list()
partition_history = list()
## reverse mask (bug for mask = 1- mask, use this as alternative choice)
# mask = 1 + (-1)*mask
mask = (1 - mask.long()).byte()
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, START_TAG, :].clone().view(batch_size, tag_size) # bat_size * to_target_size
# print "init part:",partition.size()
partition_history.append(partition)
# iter over last scores
for idx, cur_values in seq_iter:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * from_target)
# cur_values: batch_size * from_target * to_target
cur_values = cur_values + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
## forscores, cur_bp = torch.max(cur_values[:,:-2,:], 1) # do not consider START_TAG/STOP_TAG
# print "cur value:", cur_values.size()
partition, cur_bp = torch.max(cur_values, 1)
# print "partsize:",partition.size()
# exit(0)
# print partition
# print cur_bp
# print "one best, ",idx
partition_history.append(partition)
## cur_bp: (batch_size, tag_size) max source score position in current tag
## set padded label as 0, which will be filtered in post processing
cur_bp.masked_fill_(mask[idx].view(batch_size, 1).expand(batch_size, tag_size), 0)
back_points.append(cur_bp)
# exit(0)
### add score to final STOP_TAG
partition_history = torch.cat(partition_history, 0).view(seq_len, batch_size, -1).transpose(1,0).contiguous() ## (batch_size, seq_len. tag_size)
### get the last position for each setences, and select the last partitions using gather()
last_position = length_mask.view(batch_size,1,1).expand(batch_size, 1, tag_size) -1
last_partition = torch.gather(partition_history, 1, last_position).view(batch_size,tag_size,1)
### calculate the score from last partition to end state (and then select the STOP_TAG from it)
last_values = last_partition.expand(batch_size, tag_size, tag_size) + self.transitions.view(1,tag_size, tag_size).expand(batch_size, tag_size, tag_size)
_, last_bp = torch.max(last_values, 1)
pad_zero = autograd.Variable(torch.zeros(batch_size, tag_size)).long()
if self.gpu:
pad_zero = pad_zero.cuda()
back_points.append(pad_zero)
back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size)
## select end ids in STOP_TAG
pointer = last_bp[:, STOP_TAG]
insert_last = pointer.contiguous().view(batch_size,1,1).expand(batch_size,1, tag_size)
back_points = back_points.transpose(1,0).contiguous()
## move the end ids(expand to tag_size) to the corresponding position of back_points to replace the 0 values
# print "lp:",last_position
# print "il:",insert_last
back_points.scatter_(1, last_position, insert_last)
# print "bp:",back_points
# exit(0)
back_points = back_points.transpose(1,0).contiguous()
## decode from the end, padded position ids are 0, which will be filtered if following evaluation
decode_idx = autograd.Variable(torch.LongTensor(seq_len, batch_size))
if self.gpu:
decode_idx = decode_idx.cuda()
decode_idx[-1] = pointer.detach()
for idx in range(len(back_points)-2, -1, -1):
pointer = torch.gather(back_points[idx], 1, pointer.contiguous().view(batch_size, 1))
decode_idx[idx] = pointer.detach().view(batch_size)
path_score = None
decode_idx = decode_idx.transpose(1,0)
return path_score, decode_idx
def forward(self, feats):
path_score, best_path = self._viterbi_decode(feats)
return path_score, best_path
def _score_sentence(self, scores, mask, tags):
"""
input:
scores: variable (seq_len, batch, tag_size, tag_size)
mask: (batch, seq_len)
tags: tensor (batch, seq_len)
output:
score: sum of score for gold sequences within whole batch
"""
# Gives the score of a provided tag sequence
batch_size = scores.size(1)
seq_len = scores.size(0)
tag_size = scores.size(2)
## convert tag value into a new format, recorded label bigram information to index
new_tags = autograd.Variable(torch.LongTensor(batch_size, seq_len))
if self.gpu:
new_tags = new_tags.cuda()
for idx in range(seq_len):
if idx == 0:
## start -> first score
new_tags[:,0] = (tag_size - 2)*tag_size + tags[:,0]
else:
new_tags[:,idx] = tags[:,idx-1]*tag_size + tags[:,idx]
## transition for label to STOP_TAG
end_transition = self.transitions[:,STOP_TAG].contiguous().view(1, tag_size).expand(batch_size, tag_size)
## length for batch, last word position = length - 1
length_mask = torch.sum(mask.long(), dim = 1).view(batch_size,1).long()
## index the label id of last word
end_ids = torch.gather(tags, 1, length_mask - 1)
## index the transition score for end_id to STOP_TAG
end_energy = torch.gather(end_transition, 1, end_ids)
## convert tag as (seq_len, batch_size, 1)
new_tags = new_tags.transpose(1,0).contiguous().view(seq_len, batch_size, 1)
### need convert tags id to search from 400 positions of scores
tg_energy = torch.gather(scores.view(seq_len, batch_size, -1), 2, new_tags).view(seq_len, batch_size) # seq_len * bat_size
## mask transpose to (seq_len, batch_size)
tg_energy = tg_energy.masked_select(mask.transpose(1,0))
# ## calculate the score from START_TAG to first label
# start_transition = self.transitions[START_TAG,:].view(1, tag_size).expand(batch_size, tag_size)
# start_energy = torch.gather(start_transition, 1, tags[0,:])
## add all score together
# gold_score = start_energy.sum() + tg_energy.sum() + end_energy.sum()
gold_score = tg_energy.sum() + end_energy.sum()
return gold_score
def neg_log_likelihood_loss(self, feats, mask, tags):
# nonegative log likelihood
batch_size = feats.size(0)
forward_score, scores = self._calculate_PZ(feats, mask)
gold_score = self._score_sentence(scores, mask, tags)
# print "batch, f:", forward_score.data[0], " g:", gold_score.data[0], " dis:", forward_score.data[0] - gold_score.data[0]
# exit(0)
return forward_score - gold_score
def _viterbi_decode_nbest(self, feats, mask, nbest):
"""
input:
feats: (batch, seq_len, self.tag_size+2)
mask: (batch, seq_len)
output:
decode_idx: (batch, nbest, seq_len) decoded sequence
path_score: (batch, nbest) corresponding score for each sequence (to be implementated)
nbest decode for sentence with one token is not well supported, to be optimized
"""
batch_size = feats.size(0)
seq_len = feats.size(1)
tag_size = feats.size(2)
assert(tag_size == self.tagset_size+2)
## calculate sentence length for each sentence
length_mask = torch.sum(mask.long(), dim = 1).view(batch_size,1).long()
## mask to (seq_len, batch_size)
mask = mask.transpose(1,0).contiguous()
ins_num = seq_len * batch_size
## be careful the view shape, it is .view(ins_num, 1, tag_size) but not .view(ins_num, tag_size, 1)
feats = feats.transpose(1,0).contiguous().view(ins_num, 1, tag_size).expand(ins_num, tag_size, tag_size)
## need to consider start
scores = feats + self.transitions.view(1,tag_size,tag_size).expand(ins_num, tag_size, tag_size)
scores = scores.view(seq_len, batch_size, tag_size, tag_size)
# build iter
seq_iter = enumerate(scores)
## record the position of best score
back_points = list()
partition_history = list()
## reverse mask (bug for mask = 1- mask, use this as alternative choice)
# mask = 1 + (-1)*mask
mask = (1 - mask.long()).byte()
_, inivalues = next(seq_iter) # bat_size * from_target_size * to_target_size
# only need start from start_tag
partition = inivalues[:, START_TAG, :].clone() # bat_size * to_target_size
## initial partition [batch_size, tag_size]
partition_history.append(partition.view(batch_size, tag_size, 1).expand(batch_size, tag_size, nbest))
# iter over last scores
for idx, cur_values in seq_iter:
if idx == 1:
cur_values = cur_values.view(batch_size, tag_size, tag_size) + partition.contiguous().view(batch_size, tag_size, 1).expand(batch_size, tag_size, tag_size)
else:
# previous to_target is current from_target
# partition: previous results log(exp(from_target)), #(batch_size * nbest * from_target)
# cur_values: batch_size * from_target * to_target
cur_values = cur_values.view(batch_size, tag_size, 1, tag_size).expand(batch_size, tag_size, nbest, tag_size) + partition.contiguous().view(batch_size, tag_size, nbest, 1).expand(batch_size, tag_size, nbest, tag_size)
## compare all nbest and all from target
cur_values = cur_values.view(batch_size, tag_size*nbest, tag_size)
# print "cur size:",cur_values.size()
partition, cur_bp = torch.topk(cur_values, nbest, 1)
## cur_bp/partition: [batch_size, nbest, tag_size], id should be normize through nbest in following backtrace step
# print partition[:,0,:]
# print cur_bp[:,0,:]
# print "nbest, ",idx
if idx == 1:
cur_bp = cur_bp*nbest
partition = partition.transpose(2,1)
cur_bp = cur_bp.transpose(2,1)
# print partition
# exit(0)
#partition: (batch_size * to_target * nbest)
#cur_bp: (batch_size * to_target * nbest) Notice the cur_bp number is the whole position of tag_size*nbest, need to convert when decode
partition_history.append(partition)
## cur_bp: (batch_size,nbest, tag_size) topn source score position in current tag
## set padded label as 0, which will be filtered in post processing
## mask[idx] ? mask[idx-1]
cur_bp.masked_fill_(mask[idx].view(batch_size, 1, 1).expand(batch_size, tag_size, nbest), 0)
# print cur_bp[0]
back_points.append(cur_bp)
### add score to final STOP_TAG
partition_history = torch.cat(partition_history,0).view(seq_len, batch_size, tag_size, nbest).transpose(1,0).contiguous() ## (batch_size, seq_len, nbest, tag_size)
### get the last position for each setences, and select the last partitions using gather()
last_position = length_mask.view(batch_size,1,1,1).expand(batch_size, 1, tag_size, nbest) - 1
last_partition = torch.gather(partition_history, 1, last_position).view(batch_size, tag_size, nbest, 1)
### calculate the score from last partition to end state (and then select the STOP_TAG from it)
last_values = last_partition.expand(batch_size, tag_size, nbest, tag_size) + self.transitions.view(1, tag_size, 1, tag_size).expand(batch_size, tag_size, nbest, tag_size)
last_values = last_values.view(batch_size, tag_size*nbest, tag_size)
end_partition, end_bp = torch.topk(last_values, nbest, 1)
## end_partition: (batch, nbest, tag_size)
end_bp = end_bp.transpose(2,1)
# end_bp: (batch, tag_size, nbest)
pad_zero = autograd.Variable(torch.zeros(batch_size, tag_size, nbest)).long()
if self.gpu:
pad_zero = pad_zero.cuda()
back_points.append(pad_zero)
back_points = torch.cat(back_points).view(seq_len, batch_size, tag_size, nbest)
## select end ids in STOP_TAG
pointer = end_bp[:, STOP_TAG, :] ## (batch_size, nbest)
insert_last = pointer.contiguous().view(batch_size, 1, 1, nbest).expand(batch_size, 1, tag_size, nbest)
back_points = back_points.transpose(1,0).contiguous()
## move the end ids(expand to tag_size) to the corresponding position of back_points to replace the 0 values
# print "lp:",last_position
# print "il:",insert_last[0]
# exit(0)
## copy the ids of last position:insert_last to back_points, though the last_position index
## last_position includes the length of batch sentences
# print "old:", back_points[9,0,:,:]
back_points.scatter_(1, last_position, insert_last)
## back_points: [batch_size, seq_length, tag_size, nbest]
# print "new:", back_points[9,0,:,:]
# exit(0)
# print pointer[2]
'''
back_points: in simple demonstratration
x,x,x,x,x,x,x,x,x,7
x,x,x,x,x,4,0,0,0,0
x,x,6,0,0,0,0,0,0,0
'''
back_points = back_points.transpose(1,0).contiguous()
# print back_points[0]
## back_points: (seq_len, batch, tag_size, nbest)
## decode from the end, padded position ids are 0, which will be filtered in following evaluation
decode_idx = autograd.Variable(torch.LongTensor(seq_len, batch_size, nbest))
if self.gpu:
decode_idx = decode_idx.cuda()
decode_idx[-1] = pointer.data/nbest
# print "pointer-1:",pointer[2]
# exit(0)
# use old mask, let 0 means has token
for idx in range(len(back_points)-2, -1, -1):
# print "pointer: ",idx, pointer[3]
# print "back:",back_points[idx][3]
# print "mask:",mask[idx+1,3]
new_pointer = torch.gather(back_points[idx].view(batch_size, tag_size*nbest), 1, pointer.contiguous().view(batch_size,nbest))
decode_idx[idx] = new_pointer.data/nbest
# # use new pointer to remember the last end nbest ids for non longest
pointer = new_pointer + pointer.contiguous().view(batch_size,nbest)*mask[idx].view(batch_size,1).expand(batch_size, nbest).long()
# exit(0)
path_score = None
decode_idx = decode_idx.transpose(1,0)
## decode_idx: [batch, seq_len, nbest]
# print decode_idx[:,:,0]
# print "nbest:",nbest
# print "diff:", decode_idx[:,:,0]- decode_idx[:,:,4]
# print decode_idx[:,0,:]
# exit(0)
### calculate probability for each sequence
scores = end_partition[:, :, STOP_TAG]
## scores: [batch_size, nbest]
max_scores,_ = torch.max(scores, 1)
minus_scores = scores - max_scores.view(batch_size,1).expand(batch_size, nbest)
path_score = F.softmax(minus_scores, 1)
## path_score: [batch_size, nbest]
# exit(0)
return path_score, decode_idx
| 21,777 | 48.6082 | 233 | py |
parsing-as-pretraining | parsing-as-pretraining-master/NCRFpp/model/seqlabel.py | # -*- coding: utf-8 -*-
# @Author: Jie Yang
# @Date: 2017-10-17 16:47:32
# @Last Modified by: Jie Yang, Contact: jieynlp@gmail.com
# @Last Modified time: 2019-01-01 21:10:00
from __future__ import print_function
from __future__ import absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
from .wordsequence import WordSequence
from .crf import CRF
class SeqLabel(nn.Module):
def __init__(self, data):
super(SeqLabel, self).__init__()
self.use_crf = data.use_crf
print("build sequence labeling network...")
print("use_char: ", data.use_char)
if data.use_char:
print("char feature extractor: ", data.char_feature_extractor)
print("word feature extractor: ", data.word_feature_extractor)
print("use crf: ", self.use_crf)
self.gpu = data.HP_gpu
self.average_batch = data.average_batch_loss
## add two more label for downlayer lstm, use original label size for CRF
label_size = data.label_alphabet_size
data.label_alphabet_size += 2
self.word_hidden = WordSequence(data)
if self.use_crf:
self.crf = CRF(label_size, self.gpu)
def neg_log_likelihood_loss(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, batch_label, mask,
word_text_input):
outs = self.word_hidden(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, word_text_input)
batch_size = word_inputs.size(0)
seq_len = word_inputs.size(1)
if self.use_crf:
total_loss = self.crf.neg_log_likelihood_loss(outs, mask, batch_label)
scores, tag_seq = self.crf._viterbi_decode(outs, mask)
else:
# print ("ENTRA")
loss_function = nn.NLLLoss(ignore_index=0, size_average=False)
outs = outs.view(batch_size * seq_len, -1)
score = F.log_softmax(outs, 1)
total_loss = loss_function(score, batch_label.view(batch_size * seq_len))
_, tag_seq = torch.max(score, 1)
tag_seq = tag_seq.view(batch_size, seq_len)
# print (tag_seq)
# input("NEXT")
if self.average_batch:
total_loss = total_loss / batch_size
return total_loss, tag_seq
def forward(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, mask, word_text_input):
outs = self.word_hidden(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, word_text_input)
batch_size = word_inputs.size(0)
seq_len = word_inputs.size(1)
if self.use_crf:
scores, tag_seq = self.crf._viterbi_decode(outs, mask)
else:
outs = outs.view(batch_size * seq_len, -1)
_, tag_seq = torch.max(outs, 1)
tag_seq = tag_seq.view(batch_size, seq_len)
## filter padded position with zero
tag_seq = mask.long() * tag_seq
return tag_seq
# def get_lstm_features(self, word_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover):
# return self.word_hidden(word_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
def decode_nbest(self, word_inputs, feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover, mask, nbest):
if not self.use_crf:
print("Nbest output is currently supported only for CRF! Exit...")
exit(0)
outs = self.word_hidden(word_inputs,feature_inputs, word_seq_lengths, char_inputs, char_seq_lengths, char_seq_recover)
batch_size = word_inputs.size(0)
seq_len = word_inputs.size(1)
scores, tag_seq = self.crf._viterbi_decode_nbest(outs, mask, nbest)
return scores, tag_seq
| 3,970 | 42.163043 | 152 | py |
biokge | biokge-main/src/trainer.py | import torch
import tqdm
from torch.nn import functional as F
from torch.utils.data import DataLoader
import wandb
from .evaluators.classification_evaluator import calculate_metrics
from .models.classifier import Classifier
class Trainer:
def __init__(
self,
configs,
entity_size,
num_relations,
rank,
pretrained_model_path,
freeze_embedding,
device,
):
self.configs = configs
self.entity_size = entity_size
self.num_relations = num_relations
self.grad_clip = configs.model_configs.grad_clip
self.model = Classifier(entity_size, num_relations, rank).to(device)
self.device = device
if pretrained_model_path is not None:
self.load_embedding_weights(pretrained_model_path, freeze_embedding)
else:
self.init_embedding_weights()
if self.num_relations > 1:
self.loss_fn = torch.nn.NLLLoss()
else:
self.loss_fn = torch.nn.BCELoss()
self.optimizer = torch.optim.Adam(
[param for param in self.model.parameters() if param.requires_grad == True],
lr=self.configs.model_configs.learning_rate,
)
def load_embedding_weights(self, pretrained_model_path, freeze_embedding=True):
model_state_dict = torch.load(pretrained_model_path, map_location=self.device)
pretrained_weight = model_state_dict["model"][0][
"_entity_embedder._embeddings.weight"
]
num_oov_tokens = pretrained_weight.size(
0
) - self.model.entity_embeddings.weight.data.size(0)
if num_oov_tokens >= 0:
num_oov_vectors = torch.zeros((num_oov_tokens, pretrained_weight.size(1)))
pretrained_weight = torch.cat((pretrained_weight, num_oov_vectors), dim=0)
self.model.entity_embeddings.weight.data = pretrained_weight
if freeze_embedding:
self.model.entity_embeddings.weight.requires_grad = False
def init_embedding_weights(self, init_range=1, init_size=1e-3):
self.model.entity_embeddings.weight.data.uniform_(-init_range, init_range)
self.model.entity_embeddings.weight.data *= init_size
def training_epoch(self, inputs, labels, epoch):
# Set model mode to train
self.model.train()
inputs = torch.from_numpy(inputs)
labels = torch.from_numpy(labels)
if self.num_relations > 1:
labels = labels.argmax(dim=1)
## Create DataLoader for sampling
data_loader = DataLoader(
range(inputs.size(0)),
self.configs.model_configs.batch_size,
shuffle=True,
)
total_loss = 0
total_examples = 0
for iteration, perm in tqdm.tqdm(
enumerate(data_loader),
desc=f"EPOCH {epoch}, batch ",
unit="",
total=len(data_loader),
):
self.optimizer.zero_grad()
batch_inputs = inputs[perm].to(self.device)
batch_labels = labels[perm].to(self.device)
predictions = self.model(batch_inputs)
# print(predictions)
if self.num_relations > 1:
loss = self.loss_fn(F.log_softmax(predictions, dim=-1), batch_labels)
else:
loss = self.loss_fn(torch.sigmoid(predictions).squeeze(), batch_labels)
loss.backward()
# Check gradient norm if loss turns into infinity
# if torch.isinf(loss):
# total_norm = 0
# for p in self.model.parameters():
# param_norm = p.grad.data.norm(2)
# total_norm += param_norm.item() ** 2
# total_norm = total_norm ** (1.0 / 2)
# print(total_norm)
# raise Exception("loss is infinity. Stopping training...")
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)
self.optimizer.step()
num_examples = predictions.size(0)
total_loss += loss.item() * num_examples
total_examples += num_examples
return total_loss / total_examples
@torch.no_grad()
def test(self, inputs, labels, split) -> dict:
# Set model mode to evaluation
self.model.eval()
inputs = torch.from_numpy(inputs).to(self.device)
if self.num_relations > 1:
preds = F.softmax(self.model(inputs), dim=1)
else:
preds = F.sigmoid(self.model(inputs))
results = calculate_metrics(
labels, preds.cpu().numpy(), self.num_relations, split
)
return results
def train(
self,
train_inputs,
valid_inputs,
test_inputs,
train_labels,
valid_labels,
test_labels,
):
for epoch in range(1, 1 + self.configs.testing_configs.epochs):
train_loss = self.training_epoch(train_inputs, train_labels, epoch)
train_results = self.test(train_inputs, train_labels, "train")
valid_results = self.test(valid_inputs, valid_labels, "valid")
test_results = self.test(test_inputs, test_labels, "test")
wandb_logs = {
"epoch": epoch,
"train_loss": train_loss,
}
wandb_logs.update(train_results)
wandb_logs.update(valid_results)
wandb_logs.update(test_results)
for metrics_name, metrics_value in wandb_logs.items():
if metrics_name.endswith(
(
"loss",
"mean_average_precision",
"averaged_auc_precision_recall",
"averaged_auc_roc",
)
):
print(f" {metrics_name}: {metrics_value}")
wandb.log(wandb_logs)
| 5,954 | 33.622093 | 88 | py |
biokge | biokge-main/src/models/classifier.py | import torch
class Classifier(torch.nn.Module):
def __init__(self, entity_size: int, num_relations: int, rank: int):
super().__init__()
self.entity_size = entity_size
self.num_relations = num_relations
self.rank = rank
self.entity_embeddings = torch.nn.Embedding(entity_size, rank)
self.classifier = torch.nn.Linear(rank * 2, num_relations)
# self.subject_linear = nn.Linear(rank, int(rank / 2))
# self.object_linear = nn.Linear(rank, int(rank / 2))
# self.classifier = nn.Linear(rank, num_relations)
self.init_weight()
def init_weight(self, init_range=1):
self.entity_embeddings.weight.data.uniform_(-init_range, init_range)
self.classifier.weight.data.uniform_(-init_range, init_range)
self.entity_embeddings.weight.data *= 1e-3
self.classifier.weight.data *= 1e-3
def forward(self, input_pairs):
subject = self.entity_embeddings(input_pairs[:, 0])
object = self.entity_embeddings(input_pairs[:, 1])
# subject = self.subject_linear(subject)
# object = self.object_linear(object)
# Do something with the subject and object
representation = torch.cat((subject, object), dim=-1)
preds = self.classifier(representation)
return preds
| 1,329 | 34 | 76 | py |
biokge | biokge-main/src/models/regularizers.py | from abc import ABC, abstractmethod
from typing import Tuple
import torch
from torch import nn
class Regularizer(ABC):
def __init__(self, lmbda: float):
"""
Base class of regularizers
Args:
lmbda (float): Penalty coefficient of a regularizer
"""
self.lmbda = lmbda
@abstractmethod
def penalty(self, factors: Tuple[torch.Tensor]):
pass
def checkpoint(self, regularizer_cache_path: str, epoch_id: int):
if regularizer_cache_path is not None:
print(f"Save the regularizer at epoch {epoch_id}")
path = regularizer_cache_path + f"{epoch_id}.reg"
torch.save(self.state_dict(), path)
print(f"Regularizer Checkpoint: {path}")
class F2(Regularizer):
def __init__(self, lmbda: float):
"""
F2/L2 regularizer
Args:
lmbda (float): Penalty coefficient
"""
super().__init__(lmbda)
def penalty(self, factors: Tuple[torch.Tensor]) -> float:
"""
Args:
factors (Tuple[torch.Tensor]): Model factors
Returns:
float: regularization loss
"""
norm = 0
for factor in factors:
norm += self.lmbda * torch.sum(factor**2)
return norm / factors[0].shape[0]
class N3(Regularizer):
def __init__(self, lmbda: float):
"""
N3 regularizer http://arxiv.org/abs/1806.07297
Args:
lmbda (float): Penalty coefficient
"""
super().__init__(lmbda)
def penalty(self, factors: Tuple[torch.Tensor]):
"""
Args:
factors (Tuple[torch.Tensor]): Model factors
Returns:
float: regularization loss
"""
norm = 0
for factor in factors:
norm += self.lmbda * torch.sum(torch.abs(factor) ** 3)
return norm / factors[0].shape[0]
| 1,921 | 24.289474 | 69 | py |
biokge | biokge-main/src/utils/logger.py | from typing import List
import torch
class Logger(object):
def __init__(self, output_dir: str, filename: str) -> None:
"""
A logger object to mainly store results to a txt file
Args:
output_dir (str): Output directory of the experiment
filename (str): Name of the log file.
Likely to be based on the metrics name.
"""
self.output_file = f"{output_dir}/{filename}_log.txt"
self.results = []
def add_result(self, result: List[float]) -> None:
"""
Add results per evaluation step
Args:
result (_type_): train, validation, and test results
"""
assert len(result) == 3
self.results.append(result)
def save_statistics(self) -> None:
"""
Save the logged results to the dedicated output logs file
"""
result = 100 * torch.tensor(self.results)
with open(self.output_file, "w") as txt_file:
txt_file.write("Train, Validation, Final Train, Test\n")
for r in result:
train1 = r[0].item()
valid = r[1].item()
test = r[2].item()
txt_file.write("{},{},{}\n".format(train1, valid, test))
| 1,268 | 29.214286 | 72 | py |
biokge | biokge-main/src/utils/common_utils.py | import os
import random
from datetime import datetime
from typing import Optional, Tuple
import numpy as np
import torch
import yaml
from torch import nn
def setup_experiment_folder(outputs_dir: str) -> Tuple[str, str]:
"""
Utility function to create and setup the experiment output directory.
Return both output and checkpoint directories.
Args:
outputs_dir (str): The parent directory to store
all outputs across experiments.
Returns:
Tuple[str, str]:
outputs_dir: Directory of the outputs (checkpoint_dir and logs)
checkpoint_dir: Directory of the training checkpoints
"""
now = datetime.now().strftime("%Y_%m_%d__%H_%M_%S")
outputs_dir = os.path.join(outputs_dir, now)
checkpoint_dir = os.path.join(outputs_dir, "checkpoint")
os.makedirs(checkpoint_dir, exist_ok=True)
return outputs_dir, checkpoint_dir
def setup_device(device: Optional[str] = None) -> torch.device:
"""
Utility function to setup the device for training.
Set to the selected CUDA device(s) if exist.
Set to "mps" if using Apple silicon chip.
Set to "cpu" for others.
Args:
device (Optional[str], optional): Integer of the GPU device id,
or str of comma-separated device ids. Defaults to None.
Returns:
torch.device: The chosen device(s) for the training
"""
if torch.cuda.is_available():
device = f"cuda:{device}"
else:
try:
if torch.backends.mps.is_available():
device = "mps"
except:
device = "cpu"
return torch.device(device)
def setup_random_seed(seed: int, is_deterministic: bool = True) -> None:
"""
Utility function to setup random seed. Apply this function early on the training script.
Args:
seed (int): Integer indicating the desired seed.
is_deterministic (bool, optional): Set deterministic flag of CUDNN. Defaults to True.
"""
# set the seeds
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if is_deterministic is True:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def count_parameters(model: nn.Module) -> int:
"""
Utility function to calculate the number of parameters in a model.
Args:
model (nn.Module): Model in question.
Returns:
int: Number of parameters of the model in question.
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def load_yaml(filepath: str) -> dict:
"""
Utility function to load yaml file, mainly for config files.
Args:
filepath (str): Path to the config file.
Raises:
exc: Stop process if there is a problem when loading the file.
Returns:
dict: Training configs.
"""
with open(filepath, "r") as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
raise exc
| 3,144 | 27.590909 | 93 | py |
biokge | biokge-main/scripts/benchmarking/triple_classification.py | import argparse
import random
import numpy
import sklearn
import torch
import kge
import kge.model
def evaluate(model_path):
# Load model
model = kge.model.KgeModel.create_from(kge.util.io.load_checkpoint(model_path))
# Load data
dataset = model.dataset
train_triples = dataset.load_triples("train")
valid_triples = dataset.load_triples("valid")
test_triples = dataset.load_triples("test")
# Set seed
random.seed(42)
def get_metrics_per_relation(relation):
valid_metrics = {}
test_metrics = {}
def get_adjacency_matrix(triples):
matrix = numpy.zeros(
(dataset._num_entities, dataset._num_entities),
dtype=bool,
)
matrix[
triples[triples[:, 1] == relation, 0],
triples[triples[:, 1] == relation, 2],
] = 1
return matrix
# Create negative samples
def get_negatives(triples, others_triples, others_negatives, ratio=1):
adjacency_matrix = get_adjacency_matrix(triples)
adjacency_others = [
get_adjacency_matrix(other_triples) for other_triples in others_triples
]
count = 0
negatives = set()
while count != int(len(triples[:, 1] == relation) * ratio):
subject = random.randrange(dataset._num_entities)
object = random.randrange(dataset._num_entities)
other_relation = random.randrange(dataset._num_relations)
if (
not adjacency_matrix[subject, object]
and all(
[
not adjacency_other[subject, object]
for adjacency_other in adjacency_others
]
)
and (subject, other_relation, object) not in negatives
and all(
[
(subject, other_relation, object) not in other_negatives
for other_negatives in others_negatives
]
)
):
negatives.add((subject, other_relation, object))
count += 1
return negatives
valid_negatives = get_negatives(
valid_triples, [train_triples, test_triples], []
)
test_negatives = get_negatives(
test_triples,
[train_triples, valid_triples],
[valid_negatives],
)
# Stack
valid_negatives = torch.stack(
[
torch.tensor([subject, relation, object])
for subject, relation, object in valid_negatives
]
)
test_negatives = torch.stack(
[
torch.tensor([subject, relation, object])
for subject, relation, object in test_negatives
]
)
# Save
numpy.savetxt(
"valid_negatives.del",
valid_negatives,
fmt="%i",
delimiter="\t",
)
numpy.savetxt(
"test_negatives.del",
test_negatives,
fmt="%i",
delimiter="\t",
)
# Combine
relation_valid_triples = torch.vstack([valid_triples, valid_negatives])
relation_test_triples = torch.vstack([test_triples, test_negatives])
relation_valid_trues = torch.concatenate(
[
torch.full((len(valid_triples),), 1),
torch.full((len(valid_negatives),), 0),
]
)
relation_test_trues = torch.concatenate(
[
torch.full((len(test_triples),), 1),
torch.full((len(test_negatives),), 0),
]
)
# Get scores
relation_valid_scores = model.score_spo(
relation_valid_triples[:, 0],
relation_valid_triples[:, 1],
relation_valid_triples[:, 2],
"o",
).detach()
relation_test_scores = model.score_spo(
relation_test_triples[:, 0],
relation_test_triples[:, 1],
relation_test_triples[:, 2],
"o",
).detach()
## Calculate metrics
# AUROC
valid_metrics["auc_roc"] = sklearn.metrics.roc_auc_score(
relation_valid_trues, relation_valid_scores
)
test_metrics["auc_roc"] = sklearn.metrics.roc_auc_score(
relation_test_trues, relation_test_scores
)
# AUPRC
relation_valid_prc = sklearn.metrics.precision_recall_curve(
relation_valid_trues, relation_valid_scores
)
relation_test_prc = sklearn.metrics.precision_recall_curve(
relation_test_trues, relation_test_scores
)
valid_metrics["auc_prc"] = sklearn.metrics.auc(
relation_valid_prc[1], relation_valid_prc[0]
)
test_metrics["auc_prc"] = sklearn.metrics.auc(
relation_test_prc[1], relation_test_prc[0]
)
# MAP
valid_metrics["map"] = sklearn.metrics.average_precision_score(
relation_valid_trues, relation_valid_scores
)
test_metrics["map"] = sklearn.metrics.average_precision_score(
relation_test_trues, relation_test_scores
)
return valid_metrics, test_metrics
# Get metrics per relation
metrics = [
get_metrics_per_relation(relation) for relation in range(dataset._num_relations)
]
# Print relation average
print(
f"Validation AUROC = {sum([valid_metrics['auc_roc'] for valid_metrics, _ in metrics]) / dataset._num_relations}"
)
print(
f"Testing AUROC = {sum([test_metrics['auc_roc'] for _, test_metrics in metrics]) / dataset._num_relations}"
)
print(
f"Validation AUPRC = {sum([valid_metrics['auc_prc'] for valid_metrics, _ in metrics]) / dataset._num_relations}"
)
print(
f"Testing AUPRC = {sum([test_metrics['auc_prc'] for _, test_metrics in metrics]) / dataset._num_relations}"
)
print(
f"Validation MAP = {sum([valid_metrics['map'] for valid_metrics, _ in metrics]) / dataset._num_relations}"
)
print(
f"Testing MAP = {sum([test_metrics['map'] for _, test_metrics in metrics]) / dataset._num_relations}"
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Protein Knowledge Graph Embedding Project: benchmark triple classification"
)
parser.add_argument(
"--model",
type=str,
)
args = parser.parse_args()
evaluate(args.model)
| 6,796 | 31.061321 | 121 | py |
BIFI | BIFI-main/utils/fairseq/setup.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from setuptools import setup, find_packages, Extension
import sys
if sys.version_info < (3, 6):
sys.exit('Sorry, Python >= 3.6 is required for fairseq.')
with open('README.md') as f:
readme = f.read()
if sys.platform == 'darwin':
extra_compile_args = ['-stdlib=libc++', '-O3']
else:
extra_compile_args = ['-std=c++11', '-O3']
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
'fairseq.libbleu',
sources=[
'fairseq/clib/libbleu/libbleu.cpp',
'fairseq/clib/libbleu/module.cpp',
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.data_utils_fast',
sources=['fairseq/data/data_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.token_block_utils_fast',
sources=['fairseq/data/token_block_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat',
sources=[
'fairseq/clib/libnat/edit_dist.cpp',
],
)
])
if 'CUDA_HOME' in os.environ:
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat_cuda',
sources=[
'fairseq/clib/libnat_cuda/edit_dist.cu',
'fairseq/clib/libnat_cuda/binding.cpp'
],
)])
cmdclass['build_ext'] = cpp_extension.BuildExtension
except ImportError:
pass
if 'READTHEDOCS' in os.environ:
# don't build extensions when generating docs
extensions = []
if 'build_ext' in cmdclass:
del cmdclass['build_ext']
# use CPU build of PyTorch
dependency_links = [
'https://download.pytorch.org/whl/cpu/torch-1.3.0%2Bcpu-cp36-cp36m-linux_x86_64.whl'
]
else:
dependency_links = []
if 'clean' in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(['rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd'], shell=True)
setup(
name='fairseq',
version='0.9.0',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
url='https://github.com/pytorch/fairseq',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
setup_requires=[
'cython',
'numpy',
'setuptools>=18.0',
],
install_requires=[
'cffi',
'cython',
'numpy',
'regex',
'sacrebleu',
'torch',
'tqdm',
],
dependency_links=dependency_links,
packages=find_packages(exclude=['scripts', 'tests']),
ext_modules=extensions,
test_suite='tests',
entry_points={
'console_scripts': [
'fairseq-eval-lm = fairseq_cli.eval_lm:cli_main',
'fairseq-generate = fairseq_cli.generate:cli_main',
'fairseq-interactive = fairseq_cli.interactive:cli_main',
'fairseq-preprocess = fairseq_cli.preprocess:cli_main',
'fairseq-score = fairseq_cli.score:cli_main',
'fairseq-train = fairseq_cli.train:cli_main',
'fairseq-validate = fairseq_cli.validate:cli_main',
],
},
cmdclass=cmdclass,
zip_safe=False,
)
| 4,365 | 25.785276 | 101 | py |
BIFI | BIFI-main/utils/fairseq/hubconf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from fairseq.hub_utils import BPEHubInterface as bpe # noqa
from fairseq.hub_utils import TokenizerHubInterface as tokenizer # noqa
from fairseq.models import MODEL_REGISTRY
dependencies = [
'numpy',
'regex',
'requests',
'torch',
]
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast
except (ImportError, ModuleNotFoundError):
try:
import cython
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), 'setup.py'),
['build_ext', '--inplace'],
)
except (ImportError, ModuleNotFoundError):
print(
'Unable to build Cython components. Please make sure Cython is '
'installed if the torch.hub model you are loading depends on it.'
)
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
# to simplify the interface we only expose named models
# globals()[_model_type] = _cls.from_pretrained
| 1,432 | 28.244898 | 78 | py |
BIFI | BIFI-main/utils/fairseq/examples/wav2vec/vq-wav2vec_featurize.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import pprint
import glob, os, argparse
import torch
from torch import nn
try:
import tqdm
except:
print("Install tqdm to use --log-format=tqdm")
from fairseq.models.wav2vec import Wav2VecModel
import tqdm
import soundfile as sf
from torch.utils.data import DataLoader
import os.path as osp
class FilesDataset:
def __init__(self, files, labels):
self.files = files
if labels and osp.exists(labels):
with open(labels, 'r') as lbl_f:
self.labels = [line.rstrip() for line in lbl_f]
else:
self.labels = labels
def __len__(self):
return len(self.files)
def __getitem__(self, index):
fname = self.files[index]
wav, sr = sf.read(fname)
assert sr == 16000
wav = torch.from_numpy(wav).float()
lbls = None
if self.labels:
if isinstance(self.labels, str):
lbl_file = osp.splitext(fname)[0] + "." + self.labels
with open(lbl_file, 'r') as lblf:
lbls = lblf.readline()
assert lbls is not None
else:
lbls = self.labels[index]
return wav, lbls
def collate(self, batch):
return batch
class ArgTypes:
@staticmethod
def existing_path(arg):
arg = str(arg)
assert osp.exists(arg), f"File {arg} does not exist"
return arg
@staticmethod
def mkdir(arg):
arg = str(arg)
os.makedirs(arg, exist_ok=True)
return arg
class DatasetWriter:
def __init__(self):
self.args = self.load_config()
pprint.pprint(self.args.__dict__)
self.model = self.load_model()
def __getattr__(self, attr):
return getattr(self.args, attr)
def read_manifest(self, fname):
with open(fname, "r") as fp:
lines = fp.read().split("\n")
root = lines.pop(0).strip()
fnames = [
osp.join(root, line.split("\t")[0]) for line in lines if len(line) > 0
]
return fnames
def process_splits(self):
if self.args.shard is not None or self.args.num_shards is not None:
assert self.args.shard is not None and self.args.num_shards is not None
for split in self.splits:
print(split)
if self.extension == "tsv":
datadir = osp.join(self.data_dir, f"{split}.{self.extension}")
print("Reading manifest file: ", datadir)
files = self.read_manifest(datadir)
else:
datadir = osp.join(self.data_dir, split, f"**/*.{self.extension}")
files = glob.glob(datadir, recursive=True)
assert len(files) > 0
if self.args.shard is not None:
files = files[self.args.shard::self.args.num_shards]
lbls = []
with open(self.data_file(split), 'w') as srcf:
for line, lbl in self.iterate(files):
print(line, file=srcf)
if self.args.labels:
lbls.append(lbl + '\n')
if self.args.labels:
assert all(a is not None for a in lbls)
with open(self.lbl_file(split), 'w') as lblf:
lblf.writelines(lbls)
def iterate(self, files):
data = self.load_data(files)
for samples in tqdm.tqdm(data, total=len(files)//32):
for wav, lbl in samples:
x = wav.unsqueeze(0).float().cuda()
div = 1
while x.size(-1) // div > self.args.max_size:
div += 1
xs = x.chunk(div, dim=-1)
result = []
for x in xs:
torch.cuda.empty_cache()
x = self.model.feature_extractor(x)
if self.quantize_location == "encoder":
with torch.no_grad():
_, idx = self.model.vector_quantizer.forward_idx(x)
idx = idx.squeeze(0).cpu()
else:
with torch.no_grad():
z = self.model.feature_aggregator(x)
_, idx = self.model.vector_quantizer.forward_idx(z)
idx = idx.squeeze(0).cpu()
result.append(idx)
idx = torch.cat(result, dim=0)
yield " ".join("-".join(map(str, a.tolist())) for a in idx), lbl
def lbl_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.lbl{shard_part}")
def data_file(self, name):
shard_part = "" if self.args.shard is None else f".{self.args.shard}"
return osp.join(self.output_dir, f"{name}.src{shard_part}")
def var_file(self):
return osp.join(self.output_dir, f"vars.pt")
def load_config(self):
parser = argparse.ArgumentParser("Vector Quantized wav2vec features")
# Model Arguments
parser.add_argument("--checkpoint", type=ArgTypes.existing_path, required=True)
parser.add_argument("--data-parallel", action="store_true")
# Output Arguments
parser.add_argument("--output-dir", type=ArgTypes.mkdir, required=True)
# Data Arguments
parser.add_argument("--data-dir", type=ArgTypes.existing_path, required=True)
parser.add_argument("--splits", type=str, nargs="+", required=True)
parser.add_argument("--extension", type=str, required=True)
parser.add_argument("--labels", type=str, required=False)
parser.add_argument("--shard", type=int, default=None)
parser.add_argument("--num-shards", type=int, default=None)
parser.add_argument("--max-size", type=int, default=1300000)
# Logger Arguments
parser.add_argument(
"--log-format", type=str, choices=["none", "simple", "tqdm"]
)
return parser.parse_args()
def load_data(self, fnames):
dataset = FilesDataset(fnames, self.args.labels)
loader = DataLoader(
dataset, batch_size=32, collate_fn=dataset.collate, num_workers=8
)
return loader
def load_model(self):
cp = torch.load(self.checkpoint, map_location=lambda x, _: x)
model = Wav2VecModel.build_model(cp["args"], None)
self.quantize_location = getattr(cp["args"], "vq", "encoder")
model.load_state_dict(cp["model"])
model.eval().float()
model.cuda()
if self.data_parallel:
model = nn.DataParallel(model)
return model
def __call__(self):
self.process_splits()
if hasattr(self.model.feature_extractor, "vars") and (self.args.shard is None or self.args.shard == 0):
vars = (
self.model.feature_extractor.vars.view(
self.model.feature_extractor.banks,
self.model.feature_extractor.num_vars,
-1,
)
.cpu()
.detach()
)
print("writing learned latent variable embeddings: ", vars.shape)
torch.save(vars, self.var_file())
if __name__ == "__main__":
write_data = DatasetWriter()
write_data()
print("Done.") | 7,706 | 29.705179 | 111 | py |
BIFI | BIFI-main/utils/fairseq/examples/wav2vec/wav2vec_featurize.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import soundfile as sf
import numpy as np
import torch
from torch import nn
import tqdm
from fairseq.models.wav2vec import Wav2VecModel
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
checkpoint = torch.load(fname)
self.args = checkpoint["args"]
model = Wav2VecModel.build_model(self.args, None)
model.load_state_dict(checkpoint["model"])
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for wav2letter++ datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i",
help="Input Directory", **kwargs)
self.add_argument("--output", "-o",
help="Output Directory", **kwargs)
self.add_argument("--model",
help="Path to model checkpoint", **kwargs)
self.add_argument("--split",
help="Dataset Splits", nargs='+', **kwargs)
self.add_argument("--ext", default="wav", required=False,
help="Audio file extension")
self.add_argument("--no-copy-labels", action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.")
self.add_argument("--use-feat", action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features")
self.add_argument("--gpu",
help="GPU to use", default=0, type=int)
class Prediction():
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer():
""" Write features as hdf5 file in wav2letter++ compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
""" Given a model and a wav2letter++ dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the wav2letter++ dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(self, input_root, output_root, split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), \
"Input path '{}' does not exist".format(self.input_path)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(filter(lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))))
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(lambda x: os.path.join(self.output_path, x.replace("." + self.extension, ".h5context")), \
map(os.path.basename, paths))
for name, target_fname in self._progress(zip(paths, fnames_context), total=len(self)):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| 7,102 | 28.970464 | 135 | py |
BIFI | BIFI-main/utils/fairseq/examples/translation_moe/src/mean_pool_gating_network.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
class MeanPoolGatingNetwork(torch.nn.Module):
"""A simple mean-pooling gating network for selecting experts.
This module applies mean pooling over an encoder's output and returns
reponsibilities for each expert. The encoder format is expected to match
:class:`fairseq.models.transformer.TransformerEncoder`.
"""
def __init__(self, embed_dim, num_experts, dropout=None):
super().__init__()
self.embed_dim = embed_dim
self.num_experts = num_experts
self.fc1 = torch.nn.Linear(embed_dim, embed_dim)
self.dropout = torch.nn.Dropout(dropout) if dropout is not None else None
self.fc2 = torch.nn.Linear(embed_dim, num_experts)
def forward(self, encoder_out):
if not (
hasattr(encoder_out, 'encoder_out')
and hasattr(encoder_out, 'encoder_padding_mask')
and encoder_out.encoder_out.size(2) == self.embed_dim
):
raise ValueError('Unexpected format for encoder_out')
# mean pooling over time
encoder_padding_mask = encoder_out.encoder_padding_mask # B x T
encoder_out = encoder_out.encoder_out.transpose(0, 1) # B x T x C
if encoder_padding_mask is not None:
encoder_out = encoder_out.clone() # required because of transpose above
encoder_out[encoder_padding_mask] = 0
ntokens = torch.sum(~encoder_padding_mask, dim=1, keepdim=True)
x = torch.sum(encoder_out, dim=1) / ntokens.type_as(encoder_out)
else:
x = torch.mean(encoder_out, dim=1)
x = torch.tanh(self.fc1(x))
if self.dropout is not None:
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=-1, dtype=torch.float32).type_as(x)
| 2,007 | 38.372549 | 84 | py |
BIFI | BIFI-main/utils/fairseq/examples/translation_moe/src/logsumexp_moe.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class LogSumExpMoE(torch.autograd.Function):
"""Standard LogSumExp forward pass, but use *posterior* for the backward.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
"""
@staticmethod
def forward(ctx, logp, posterior, dim=-1):
ctx.save_for_backward(posterior)
ctx.dim = dim
return torch.logsumexp(logp, dim=dim)
@staticmethod
def backward(ctx, grad_output):
posterior, = ctx.saved_tensors
grad_logp = grad_output.unsqueeze(ctx.dim) * posterior
return grad_logp, None, None
| 835 | 29.962963 | 78 | py |
BIFI | BIFI-main/utils/fairseq/examples/translation_moe/src/translation_moe.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import metrics, utils
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask
from .logsumexp_moe import LogSumExpMoE
from .mean_pool_gating_network import MeanPoolGatingNetwork
@register_task('translation_moe')
class TranslationMoETask(TranslationTask):
"""
Translation task for Mixture of Experts (MoE) models.
See `"Mixture Models for Diverse Machine Translation: Tricks of the Trade"
(Shen et al., 2019) <https://arxiv.org/abs/1902.07816>`_.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument('--method', default='hMoEup',
choices=['sMoElp', 'sMoEup', 'hMoElp', 'hMoEup'])
parser.add_argument('--num-experts', default=3, type=int, metavar='N',
help='number of experts')
parser.add_argument('--mean-pool-gating-network', action='store_true',
help='use a simple mean-pooling gating network')
parser.add_argument('--mean-pool-gating-network-dropout', type=float,
help='dropout for mean-pooling gating network')
parser.add_argument('--mean-pool-gating-network-encoder-dim', type=float,
help='encoder output dim for mean-pooling gating network')
parser.add_argument('--gen-expert', type=int, default=0,
help='which expert to use for generation')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
if args.method == 'sMoElp':
# soft MoE with learned prior
self.uniform_prior = False
self.hard_selection = False
elif args.method == 'sMoEup':
# soft MoE with uniform prior
self.uniform_prior = True
self.hard_selection = False
elif args.method == 'hMoElp':
# hard MoE with learned prior
self.uniform_prior = False
self.hard_selection = True
elif args.method == 'hMoEup':
# hard MoE with uniform prior
self.uniform_prior = True
self.hard_selection = True
# add indicator tokens for each expert
for i in range(args.num_experts):
# add to both dictionaries in case we're sharing embeddings
src_dict.add_symbol('<expert_{}>'.format(i))
tgt_dict.add_symbol('<expert_{}>'.format(i))
super().__init__(args, src_dict, tgt_dict)
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
if not self.uniform_prior and not hasattr(model, 'gating_network'):
if self.args.mean_pool_gating_network:
if getattr(args, 'mean_pool_gating_network_encoder_dim', None):
encoder_dim = args.mean_pool_gating_network_encoder_dim
elif getattr(args, 'encoder_embed_dim', None):
# assume that encoder_embed_dim is the encoder's output dimension
encoder_dim = args.encoder_embed_dim
else:
raise ValueError('Must specify --mean-pool-gating-network-encoder-dim')
if getattr(args, 'mean_pool_gating_network_dropout', None):
dropout = args.mean_pool_gating_network_dropout
elif getattr(args, 'dropout', None):
dropout = args.dropout
else:
raise ValueError('Must specify --mean-pool-gating-network-dropout')
model.gating_network = MeanPoolGatingNetwork(
encoder_dim, args.num_experts, dropout,
)
else:
raise ValueError(
'translation_moe task with learned prior requires the model to '
'have a gating network; try using --mean-pool-gating-network'
)
return model
def expert_index(self, i):
return i + self.tgt_dict.index('<expert_0>')
def _get_loss(self, sample, model, criterion):
assert hasattr(criterion, 'compute_loss'), \
'translation_moe task requires the criterion to implement the compute_loss() method'
k = self.args.num_experts
bsz = sample['target'].size(0)
def get_lprob_y(encoder_out, prev_output_tokens_k):
net_output = model.decoder(
prev_output_tokens=prev_output_tokens_k,
encoder_out=encoder_out,
)
loss, _ = criterion.compute_loss(model, net_output, sample, reduce=False)
loss = loss.view(bsz, -1)
return -loss.sum(dim=1, keepdim=True) # -> B x 1
def get_lprob_yz(winners=None):
encoder_out = model.encoder(
src_tokens=sample['net_input']['src_tokens'],
src_lengths=sample['net_input']['src_lengths'],
)
if winners is None:
lprob_y = []
for i in range(k):
prev_output_tokens_k = sample['net_input']['prev_output_tokens'].clone()
assert not prev_output_tokens_k.requires_grad
prev_output_tokens_k[:, 0] = self.expert_index(i)
lprob_y.append(get_lprob_y(encoder_out, prev_output_tokens_k))
lprob_y = torch.cat(lprob_y, dim=1) # -> B x K
else:
prev_output_tokens_k = sample['net_input']['prev_output_tokens'].clone()
prev_output_tokens_k[:, 0] = self.expert_index(winners)
lprob_y = get_lprob_y(encoder_out, prev_output_tokens_k) # -> B
if self.uniform_prior:
lprob_yz = lprob_y
else:
lprob_z = model.gating_network(encoder_out) # B x K
if winners is not None:
lprob_z = lprob_z.gather(dim=1, index=winners.unsqueeze(-1))
lprob_yz = lprob_y + lprob_z.type_as(lprob_y) # B x K
return lprob_yz
# compute responsibilities without dropout
with utils.eval(model): # disable dropout
with torch.no_grad(): # disable autograd
lprob_yz = get_lprob_yz() # B x K
prob_z_xy = torch.nn.functional.softmax(lprob_yz, dim=1)
assert not prob_z_xy.requires_grad
# compute loss with dropout
if self.hard_selection:
winners = prob_z_xy.max(dim=1)[1]
loss = -get_lprob_yz(winners)
else:
lprob_yz = get_lprob_yz() # B x K
loss = -LogSumExpMoE.apply(lprob_yz, prob_z_xy, 1)
loss = loss.sum()
sample_size = sample['target'].size(0) if self.args.sentence_avg else sample['ntokens']
logging_output = {
'loss': utils.item(loss.data),
'ntokens': sample['ntokens'],
'nsentences': bsz,
'sample_size': sample_size,
'posterior': prob_z_xy.float().sum(dim=0).cpu(),
}
return loss, sample_size, logging_output
def train_step(self, sample, model, criterion, optimizer, update_num, ignore_grad=False):
model.train()
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = self._get_loss(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None, expert=None):
expert = expert or self.args.gen_expert
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=self.expert_index(expert),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
metrics.log_scalar(
'posterior',
sum(log['posterior'] for log in logging_outputs if 'posterior' in log)
)
| 9,078 | 40.268182 | 96 | py |
BIFI | BIFI-main/utils/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
from fairseq.data import (
data_utils,
Dictionary,
encoders,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
)
from fairseq.tasks import FairseqTask, register_task
@register_task('commonsense_qa')
class CommonsenseQATask(FairseqTask):
"""Task to finetune RoBERTa for Commonsense QA."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='DIR',
help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
parser.add_argument('--num-classes', type=int, default=5)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'sentence_ranking', 'Must set --criterion=sentence_ranking'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def binarize(s, append_bos=False):
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s, append_eos=True, add_if_not_exist=False,
).long()
if append_bos and self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
src_tokens = [[] for i in range(self.args.num_classes)]
src_lengths = [[] for i in range(self.args.num_classes)]
labels = []
with open(data_path) as h:
for line in h:
example = json.loads(line.strip())
if 'answerKey' in example:
label = ord(example['answerKey']) - ord('A')
labels.append(label)
question = example['question']['stem']
assert len(example['question']['choices']) == self.args.num_classes
# format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>`
question = 'Q: ' + question
question_toks = binarize(question, append_bos=True)
for i, choice in enumerate(example['question']['choices']):
src = 'A: ' + choice['text']
src_bin = torch.cat([question_toks, binarize(src)])
src_tokens[i].append(src_bin)
src_lengths[i].append(len(src_bin))
assert all(len(src_tokens[0]) == len(src_tokens[i]) for i in range(self.args.num_classes))
assert len(src_tokens[0]) == len(src_lengths[0])
assert len(labels) == 0 or len(labels) == len(src_tokens[0])
for i in range(self.args.num_classes):
src_lengths[i] = np.array(src_lengths[i])
src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])
src_lengths[i] = ListDataset(src_lengths[i])
dataset = {
'id': IdDataset(),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_tokens[0], reduce=True),
}
for i in range(self.args.num_classes):
dataset.update({
'net_input{}'.format(i + 1): {
'src_tokens': RightPadDataset(
src_tokens[i],
pad_idx=self.source_dictionary.pad(),
),
'src_lengths': src_lengths[i],
}
})
if len(labels) > 0:
dataset.update({'target': RawLabelDataset(labels)})
dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
with data_utils.numpy_seed(self.args.seed):
dataset = SortDataset(
dataset,
# shuffle
sort_order=[np.random.permutation(len(dataset))],
)
print('| Loaded {} with {} samples'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
'sentence_classification_head',
num_classes=1,
)
return model
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
| 5,921 | 32.84 | 103 | py |
BIFI | BIFI-main/utils/fairseq/examples/roberta/wsc/wsc_task.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import tempfile
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import (
data_utils,
Dictionary,
encoders,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
PadDataset,
SortDataset,
)
from fairseq.tasks import FairseqTask, register_task
from . import wsc_utils
@register_task('wsc')
class WSCTask(FairseqTask):
"""Task to finetune RoBERTa for Winograd Schemas."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='DIR',
help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
# hack to handle GPT-2 BPE, which includes leading spaces
if args.bpe == 'gpt2':
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'wsc', 'Must set --criterion=wsc'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def binarize(self, s: str, append_eos: bool = False):
if self.tokenizer is not None:
s = self.tokenizer.encode(s)
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s, append_eos=append_eos, add_if_not_exist=False,
).long()
if self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.bool)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start:mask_start + mask_size] = 1
return toks, mask
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[:pronoun_span.start].text
suffix = sentence[pronoun_span.end:].text_with_ws
# spaCy spans include trailing spaces, but we need to know about
# leading spaces for the GPT-2 BPE
leading_space = ' ' if sentence[:pronoun_span.start].text_with_ws.endswith(' ') else ''
trailing_space = ' ' if pronoun_span.text_with_ws.endswith(' ') else ''
# get noun phrases, excluding pronouns and anything overlapping with the query
cand_spans = wsc_utils.filter_noun_chunks(
wsc_utils.extended_noun_chunks(sentence),
exclude_pronouns=True,
exclude_query=query,
exact_match=False,
)
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_masks = [], []
for cand_span in cand_spans:
toks, mask = self.binarize_with_mask(
cand_span.text, prefix, suffix, leading_space, trailing_space,
)
cand_toks.append(toks)
cand_masks.append(mask)
# collate candidates
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert cand_toks.size() == cand_masks.size()
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, [1]*len(labels))
dataset = {
'id': IdDataset(),
'query_tokens': query_tokens,
'query_masks': query_masks,
'candidate_tokens': candidate_tokens,
'candidate_masks': candidate_masks,
'labels': labels,
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + '\n').encode('utf-8'))
dataset = self.load_dataset(
'disambiguate_pronoun',
data_path=h.name,
return_only=True,
)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask.bool()] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
logits, _ = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
cand_lprobs = get_lprobs(
sample['candidate_tokens'][0],
sample['candidate_masks'][0],
)
if sample['query_tokens'][0] is not None:
query_lprobs = get_lprobs(
sample['query_tokens'][0].unsqueeze(0),
sample['query_masks'][0].unsqueeze(0),
)
return (query_lprobs >= cand_lprobs).all().item() == 1
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample['candidate_tokens'][0][best_idx]
mask = sample['candidate_masks'][0][best_idx]
toks = full_cand[mask.bool()]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
@register_task('winogrande')
class WinograndeTask(WSCTask):
"""
Task for WinoGrande dataset. Efficient implementation for Winograd schema
tasks with exactly two candidates, one of which is correct.
"""
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'winogrande', 'Must set --criterion=winogrande'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=1, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == 'test'))
for sample in itr:
sentence, pronoun_span, query, cand_text = sample
prefix = sentence[:pronoun_span[0]].rstrip()
suffix = sentence[pronoun_span[1]:]
leading_space = ' ' if sentence[:pronoun_span[0]].endswith(' ') else ''
trailing_space = ''
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space,
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_mask = self.binarize_with_mask(
cand_text, prefix, suffix, leading_space, trailing_space,
)
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_mask)
candidate_lengths.append(cand_toks.size(0))
query_lengths = np.array(query_lengths)
def get_pad_dataset_fn(tokens, length, pad_idx):
return PadDataset(
ListDataset(tokens, length),
pad_idx=pad_idx,
left_pad=False,
)
query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = get_pad_dataset_fn(candidate_tokens, candidate_lengths, self.vocab.pad())
candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
dataset = {
'id': IdDataset(),
'query_tokens': query_tokens,
'query_masks': query_masks,
'candidate_tokens': candidate_tokens,
'candidate_masks': candidate_masks,
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
| 13,148 | 33.970745 | 103 | py |
BIFI | BIFI-main/utils/fairseq/examples/roberta/wsc/wsc_criterion.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encoders
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
@register_criterion('wsc')
class WSCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, 'w')
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument('--wsc-margin-alpha', type=float, metavar='A', default=1.0)
parser.add_argument('--wsc-margin-beta', type=float, metavar='B', default=0.0)
parser.add_argument('--wsc-cross-entropy', action='store_true',
help='use cross entropy formulation instead of margin loss')
parser.add_argument('--save-predictions', metavar='FILE',
help='file to save predictions to')
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
- query_lprobs
+ self.args.wsc_margin_alpha * (
cand_lprobs - query_lprobs + self.args.wsc_margin_beta
).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0., 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample['labels']):
query_lprobs = self.get_lprobs(
model,
sample['query_tokens'][i].unsqueeze(0),
sample['query_masks'][i].unsqueeze(0),
)
cand_lprobs = self.get_lprobs(
model,
sample['candidate_tokens'][i],
sample['candidate_masks'][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample['id'][i].item()
if self.prediction_h is not None:
print('{}\t{}\t{}'.format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'ncorrect': ncorrect,
'nqueries': nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {
'loss': loss_sum / sample_size / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
ncorrect = sum(log.get('ncorrect', 0) for log in logging_outputs)
nqueries = sum(log.get('nqueries', 0) for log in logging_outputs)
if nqueries > 0:
agg_output['accuracy'] = ncorrect / float(nqueries)
return agg_output
@register_criterion('winogrande')
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.get_lprobs(
model,
sample['query_tokens'],
sample['query_masks'],
)
cand_lprobs = self.get_lprobs(
model,
sample['candidate_tokens'],
sample['candidate_masks'],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample['query_tokens'].size(0)
ncorrect = pred.sum().item()
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'ncorrect': ncorrect,
'nqueries': sample_size,
}
return loss, sample_size, logging_output
| 6,034 | 35.137725 | 88 | py |
BIFI | BIFI-main/utils/fairseq/examples/speech_recognition/infer.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Run inference for pre-processed data with a trained model.
"""
import logging
import math
import os
import sentencepiece as spm
import torch
from fairseq import checkpoint_utils, options, utils, tasks
from fairseq.logging import meters, progress_bar
from fairseq.utils import import_user_module
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def add_asr_eval_argument(parser):
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary\
output units",
)
parser.add_argument(
"--lm-weight",
"--lm_weight",
type=float,
default=0.2,
help="weight for lm while interpolating with neural score",
)
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
parser.add_argument(
"--w2l-decoder", choices=["viterbi", "kenlm"], help="use a w2l decoder"
)
parser.add_argument("--lexicon", help="lexicon for w2l decoder")
parser.add_argument("--kenlm-model", help="kenlm model for w2l decoder")
parser.add_argument("--beam-threshold", type=float, default=25.0)
parser.add_argument("--word-score", type=float, default=1.0)
parser.add_argument("--unk-weight", type=float, default=-math.inf)
parser.add_argument("--sil-weight", type=float, default=0.0)
return parser
def check_args(args):
assert args.path is not None, "--path required for generation!"
assert args.results_path is not None, "--results_path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.dataset_impl == "raw"
), "--replace-unk requires a raw text dataset (--dataset-impl=raw)"
def get_dataset_itr(args, task):
return task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=(1000000.0, 1000000.0),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
def process_predictions(
args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
):
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
hyp_words = sp.DecodePieces(hyp_pieces.split())
print(
"{} ({}-{})".format(hyp_pieces, speaker, id), file=res_files["hypo.units"]
)
print("{} ({}-{})".format(hyp_words, speaker, id), file=res_files["hypo.words"])
tgt_pieces = tgt_dict.string(target_tokens)
tgt_words = sp.DecodePieces(tgt_pieces.split())
print("{} ({}-{})".format(tgt_pieces, speaker, id), file=res_files["ref.units"])
print("{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"])
# only score top hypothesis
if not args.quiet:
logger.debug("HYPO:" + hyp_words)
logger.debug("TARGET:" + tgt_words)
logger.debug("___________________")
def prepare_result_files(args):
def get_res_file(file_prefix):
path = os.path.join(
args.results_path,
"{}-{}-{}.txt".format(
file_prefix, os.path.basename(args.path), args.gen_subset
),
)
return open(path, "w", buffering=1)
return {
"hypo.words": get_res_file("hypo.word"),
"hypo.units": get_res_file("hypo.units"),
"ref.words": get_res_file("ref.word"),
"ref.units": get_res_file("ref.units"),
}
def load_models_and_criterions(filenames, arg_overrides=None, task=None):
models = []
criterions = []
for filename in filenames:
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = checkpoint_utils.load_checkpoint_to_cpu(filename, arg_overrides)
args = state["args"]
if task is None:
task = tasks.setup_task(args)
# build model for ensemble
model = task.build_model(args)
model.load_state_dict(state["model"], strict=True)
models.append(model)
criterion = task.build_criterion(args)
if "criterion" in state:
criterion.load_state_dict(state["criterion"], strict=True)
criterions.append(criterion)
return models, criterions, args
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation
"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
def main(args):
check_args(args)
import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 30000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
logger.info(
"| {} {} {} examples".format(
args.data, args.gen_subset, len(task.dataset(args.gen_subset))
)
)
# Set dictionary
tgt_dict = task.target_dictionary
logger.info("| decoding with criterion {}".format(args.criterion))
# Load ensemble
logger.info("| loading model(s) from {}".format(args.path))
models, criterions, _model_args = load_models_and_criterions(
args.path.split(os.pathsep),
arg_overrides=eval(args.model_overrides), # noqa
task=task,
)
optimize_models(args, use_cuda, models)
# hack to pass transitions to W2lDecoder
if args.criterion == "asg_loss":
trans = criterions[0].asg.trans.data
args.asg_transitions = torch.flatten(trans).tolist()
# Load dataset (possibly sharded)
itr = get_dataset_itr(args, task)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
# Initialize generator
gen_timer = meters.StopwatchMeter()
generator = task.build_generator(args)
num_sentences = 0
if not os.path.exists(args.results_path):
os.makedirs(args.results_path)
sp = spm.SentencePieceProcessor()
sp.Load(os.path.join(args.data, "spm.model"))
res_files = prepare_result_files(args)
wps_meter = meters.TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample["target"][:, : args.prefix_size]
gen_timer.start()
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
speaker = task.dataset(args.gen_subset).speakers[int(sample_id)]
id = task.dataset(args.gen_subset).ids[int(sample_id)]
target_tokens = (
utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu()
)
# Process top predictions
process_predictions(
args, hypos[i], sp, tgt_dict, target_tokens, res_files, speaker, id
)
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += sample["nsentences"]
logger.info(
"| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
"sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
def cli_main():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
| 9,010 | 31.886861 | 88 | py |
BIFI | BIFI-main/utils/fairseq/examples/speech_recognition/w2l_decoder.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wav2letter decoders.
"""
import math
import itertools as it
import torch
from fairseq import utils
from examples.speech_recognition.data.replabels import unpack_replabels
try:
from wav2letter.common import create_word_dict, load_words
from wav2letter.criterion import CpuViterbiPath, get_data_ptr_as_bytes
from wav2letter.decoder import (
CriterionType,
DecoderOptions,
KenLM,
SmearingMode,
Trie,
WordLMDecoder,
)
except ImportError:
# wav2letter is a required dependency for the speech_recognition
# example, but don't break on import
pass
class W2lDecoder(object):
def __init__(self, args, tgt_dict):
self.tgt_dict = tgt_dict
self.vocab_size = len(tgt_dict)
self.nbest = args.nbest
# criterion-specific init
if args.criterion == "ctc_loss":
self.criterion_type = CriterionType.CTC
self.blank = tgt_dict.index("<ctc_blank>")
self.asg_transitions = None
elif args.criterion == "asg_loss":
self.criterion_type = CriterionType.ASG
self.blank = -1
self.asg_transitions = args.asg_transitions
self.max_replabel = args.max_replabel
assert len(self.asg_transitions) == self.vocab_size ** 2
else:
raise RuntimeError(f"unknown criterion: {args.criterion}")
def generate(self, models, sample, prefix_tokens=None):
"""Generate a batch of inferences."""
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
}
emissions = self.get_emissions(models, encoder_input)
return self.decode(emissions)
def get_emissions(self, models, encoder_input):
"""Run encoder and normalize emissions"""
encoder_out = models[0].encoder(**encoder_input)
if self.criterion_type == CriterionType.CTC:
emissions = models[0].get_normalized_probs(encoder_out, log_probs=True)
elif self.criterion_type == CriterionType.ASG:
emissions = encoder_out["encoder_out"]
return emissions.transpose(0, 1).float().cpu().contiguous()
def get_tokens(self, idxs):
"""Normalize tokens by handling CTC blank, ASG replabels, etc."""
idxs = (g[0] for g in it.groupby(idxs))
idxs = filter(lambda x: x >= 0, idxs)
if self.criterion_type == CriterionType.CTC:
idxs = filter(lambda x: x != self.blank, idxs)
elif self.criterion_type == CriterionType.ASG:
idxs = unpack_replabels(list(idxs), self.tgt_dict, self.max_replabel)
return torch.LongTensor(list(idxs))
class W2lViterbiDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
if self.asg_transitions is None:
transitions = torch.FloatTensor(N, N).zero_()
else:
transitions = torch.FloatTensor(self.asg_transitions).view(N, N)
viterbi_path = torch.IntTensor(B, T)
workspace = torch.ByteTensor(CpuViterbiPath.get_workspace_size(B, T, N))
CpuViterbiPath.compute(
B,
T,
N,
get_data_ptr_as_bytes(emissions),
get_data_ptr_as_bytes(transitions),
get_data_ptr_as_bytes(viterbi_path),
get_data_ptr_as_bytes(workspace),
)
return [
[{"tokens": self.get_tokens(viterbi_path[b].tolist()), "score": 0}]
for b in range(B)
]
class W2lKenLMDecoder(W2lDecoder):
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.silence = tgt_dict.index(args.silence_token)
self.lexicon = load_words(args.lexicon)
self.word_dict = create_word_dict(self.lexicon)
self.unk_word = self.word_dict.get_index("<unk>")
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.trie = Trie(self.vocab_size, self.silence)
start_state = self.lm.start(False)
for word, spellings in self.lexicon.items():
word_idx = self.word_dict.get_index(word)
_, score = self.lm.score(start_state, word_idx)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = DecoderOptions(
args.beam,
args.beam_threshold,
args.lm_weight,
args.word_score,
args.unk_weight,
False,
args.sil_weight,
self.criterion_type,
)
self.decoder = WordLMDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
)
def decode(self, emissions):
B, T, N = emissions.size()
hypos = []
for b in range(B):
emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0)
nbest_results = self.decoder.decode(emissions_ptr, T, N)[: self.nbest]
hypos.append(
[
{"tokens": self.get_tokens(result.tokens), "score": result.score}
for result in nbest_results
]
)
return hypos
| 5,896 | 34.10119 | 85 | py |
BIFI | BIFI-main/utils/fairseq/examples/speech_recognition/criterions/CTC_loss.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from itertools import groupby
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from examples.speech_recognition.data.data_utils import encoder_padding_mask_to_lengths
from examples.speech_recognition.utils.wer_utils import Code, EditDistance, Token
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def arr_to_toks(arr):
toks = []
for a in arr:
toks.append(Token(str(a), 0.0, 0.0))
return toks
def compute_ctc_uer(logprobs, targets, input_lengths, target_lengths, blank_idx):
"""
Computes utterance error rate for CTC outputs
Args:
logprobs: (Torch.tensor) N, T1, D tensor of log probabilities out
of the encoder
targets: (Torch.tensor) N, T2 tensor of targets
input_lengths: (Torch.tensor) lengths of inputs for each sample
target_lengths: (Torch.tensor) lengths of targets for each sample
blank_idx: (integer) id of blank symbol in target dictionary
Returns:
batch_errors: (float) errors in the batch
batch_total: (float) total number of valid samples in batch
"""
batch_errors = 0.0
batch_total = 0.0
for b in range(logprobs.shape[0]):
predicted = logprobs[b][: input_lengths[b]].argmax(1).tolist()
target = targets[b][: target_lengths[b]].tolist()
# dedup predictions
predicted = [p[0] for p in groupby(predicted)]
# remove blanks
nonblanks = []
for p in predicted:
if p != blank_idx:
nonblanks.append(p)
predicted = nonblanks
# compute the alignment based on EditDistance
alignment = EditDistance(False).align(
arr_to_toks(predicted), arr_to_toks(target)
)
# compute the number of errors
# note that alignment.codes can also be used for computing
# deletion, insersion and substitution error breakdowns in future
for a in alignment.codes:
if a != Code.match:
batch_errors += 1
batch_total += len(target)
return batch_errors, batch_total
@register_criterion("ctc_loss")
class CTCCriterion(LegacyFairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.blank_idx = task.target_dictionary.index("<ctc_blank>")
self.pad_idx = task.target_dictionary.pad()
self.task = task
@staticmethod
def add_args(parser):
parser.add_argument(
"--use-source-side-sample-size",
action="store_true",
default=False,
help=(
"when compute average loss, using number of source tokens "
+ "as denominator. "
+ "This argument will be no-op if sentence-avg is used."
),
)
def forward(self, model, sample, reduce=True, log_probs=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the encoder output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
max_seq_len = lprobs.size(0)
bsz = lprobs.size(1)
else:
max_seq_len = lprobs.size(1)
bsz = lprobs.size(0)
device = net_output["encoder_out"].device
input_lengths = encoder_padding_mask_to_lengths(
net_output["encoder_padding_mask"], max_seq_len, bsz, device
)
target_lengths = sample["target_lengths"]
targets = sample["target"]
if batch_first:
# N T D -> T N D (F.ctc_loss expects this)
lprobs = lprobs.transpose(0, 1)
pad_mask = sample["target"] != self.pad_idx
targets_flat = targets.masked_select(pad_mask)
loss = F.ctc_loss(
lprobs,
targets_flat,
input_lengths,
target_lengths,
blank=self.blank_idx,
reduction="sum",
zero_infinity=True,
)
lprobs = lprobs.transpose(0, 1) # T N D -> N T D
errors, total = compute_ctc_uer(
lprobs, targets, input_lengths, target_lengths, self.blank_idx
)
if self.args.sentence_avg:
sample_size = sample["target"].size(0)
else:
if self.args.use_source_side_sample_size:
sample_size = torch.sum(input_lengths).item()
else:
sample_size = sample["ntokens"]
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"errors": errors,
"total": total,
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
errors = sum(log.get("errors", 0) for log in logging_outputs)
total = sum(log.get("total", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": 100.0 - min(errors * 100.0 / total, 100.0),
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
return agg_output
| 7,008 | 34.94359 | 87 | py |
BIFI | BIFI-main/utils/fairseq/examples/speech_recognition/criterions/cross_entropy_acc.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, task, sentence_avg):
super().__init__(task)
self.sentence_avg = sentence_avg
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| 5,372 | 40.015267 | 85 | py |
BIFI | BIFI-main/utils/fairseq/examples/speech_recognition/criterions/ASG_loss.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from fairseq import utils
from fairseq.criterions import LegacyFairseqCriterion, register_criterion
from examples.speech_recognition.data.replabels import pack_replabels
@register_criterion("asg_loss")
class ASGCriterion(LegacyFairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(self, args, task):
from wav2letter.criterion import ASGLoss, CriterionScaleMode
super().__init__(args, task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(args.silence_token)
if args.silence_token in self.tgt_dict
else None
)
self.max_replabel = args.max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
args.asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = args.linseg_updates
self.linseg_message_state = "none" if args.hide_linseg_messages else "start"
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
| 5,509 | 34.548387 | 85 | py |
BIFI | BIFI-main/utils/fairseq/examples/speech_recognition/models/vggtransformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
from collections.abc import Iterable
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
FairseqIncrementalDecoder,
FairseqEncoderDecoderModel,
register_model,
register_model_architecture,
)
from fairseq.modules import LinearizedConvolution
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock
@register_model("asr_vggtransformer")
class VGGTransformerModel(FairseqEncoderDecoderModel):
"""
Transformers with convolutional context for ASR
https://arxiv.org/abs/1904.11660
"""
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock:
[(out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
use_layer_norm), ...])
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help=""""
a tuple containing the configuration of the encoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]')
""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="""
encoder output dimension, can be None. If specified, projecting the
transformer output to the specified dimension""",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--tgt-embed-dim",
type=int,
metavar="N",
help="embedding dimension of the decoder target tokens",
)
parser.add_argument(
"--transformer-dec-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the decoder transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ...]
""",
)
parser.add_argument(
"--conv-dec-config",
type=str,
metavar="EXPR",
help="""
an array of tuples for the decoder 1-D convolution config
[(out_channels, conv_kernel_size, use_layer_norm), ...]""",
)
@classmethod
def build_encoder(cls, args, task):
return VGGTransformerEncoder(
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
)
@classmethod
def build_decoder(cls, args, task):
return TransformerDecoder(
dictionary=task.target_dictionary,
embed_dim=args.tgt_embed_dim,
transformer_config=eval(args.transformer_dec_config),
conv_config=eval(args.conv_dec_config),
encoder_output_dim=args.enc_output_dim,
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure that all args are properly defaulted
# (in case there are any new ones)
base_architecture(args)
encoder = cls.build_encoder(args, task)
decoder = cls.build_decoder(args, task)
return cls(encoder, decoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (B, T, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = True
return lprobs
DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2
DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2
# 256: embedding dimension
# 4: number of heads
# 1024: FFN
# True: apply layerNorm before (dropout + resiaul) instead of after
# 0.2 (dropout): dropout after MultiheadAttention and second FC
# 0.2 (attention_dropout): dropout in MultiheadAttention
# 0.2 (relu_dropout): dropout after ReLu
DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2
DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2
# TODO: repace transformer encoder config from one liner
# to explicit args to get rid of this transformation
def prepare_transformer_encoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.encoder_embed_dim = input_dim
args.encoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.encoder_normalize_before = normalize_before
args.encoder_ffn_embed_dim = ffn_dim
return args
def prepare_transformer_decoder_params(
input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout,
):
args = argparse.Namespace()
args.decoder_embed_dim = input_dim
args.decoder_attention_heads = num_heads
args.attention_dropout = attention_dropout
args.dropout = dropout
args.activation_dropout = relu_dropout
args.decoder_normalize_before = normalize_before
args.decoder_ffn_embed_dim = ffn_dim
return args
class VGGTransformerEncoder(FairseqEncoder):
"""VGG + Transformer encoder"""
def __init__(
self,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
"""constructor for VGGTransformerEncoder
Args:
- input_feat_per_channel: feature dim (not including stacked,
just base feature)
- in_channel: # input channels (e.g., if stack 8 feature vector
together, this is 8)
- vggblock_config: configuration of vggblock, see comments on
DEFAULT_ENC_VGGBLOCK_CONFIG
- transformer_config: configuration of transformer layer, see comments
on DEFAULT_ENC_TRANSFORMER_CONFIG
- encoder_output_dim: final transformer output embedding dimension
- transformer_context: (left, right) if set, self-attention will be focused
on (t-left, t+right)
- transformer_sampling: an iterable of int, must match with
len(transformer_config), transformer_sampling[i] indicates sampling
factor for i-th transformer layer, after multihead att and feedfoward
part
"""
super().__init__(None)
self.num_vggblocks = 0
if vggblock_config is not None:
if not isinstance(vggblock_config, Iterable):
raise ValueError("vggblock_config is not iterable")
self.num_vggblocks = len(vggblock_config)
self.conv_layers = nn.ModuleList()
self.in_channels = in_channels
self.input_dim = input_feat_per_channel
if vggblock_config is not None:
for _, config in enumerate(vggblock_config):
(
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
layer_norm,
) = config
self.conv_layers.append(
VGGBlock(
in_channels,
out_channels,
conv_kernel_size,
pooling_kernel_size,
num_conv_layers,
input_dim=input_feat_per_channel,
layer_norm=layer_norm,
)
)
in_channels = out_channels
input_feat_per_channel = self.conv_layers[-1].output_dim
transformer_input_dim = self.infer_conv_output_dim(
self.in_channels, self.input_dim
)
# transformer_input_dim is the output dimension of VGG part
self.validate_transformer_config(transformer_config)
self.transformer_context = self.parse_transformer_context(transformer_context)
self.transformer_sampling = self.parse_transformer_sampling(
transformer_sampling, len(transformer_config)
)
self.transformer_layers = nn.ModuleList()
if transformer_input_dim != transformer_config[0][0]:
self.transformer_layers.append(
Linear(transformer_input_dim, transformer_config[0][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[0])
)
)
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.transformer_layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.transformer_layers.append(
TransformerEncoderLayer(
prepare_transformer_encoder_params(*transformer_config[i])
)
)
self.encoder_output_dim = encoder_output_dim
self.transformer_layers.extend(
[
Linear(transformer_config[-1][0], encoder_output_dim),
LayerNorm(encoder_output_dim),
]
)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
bsz, max_seq_len, _ = src_tokens.size()
x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim)
x = x.transpose(1, 2).contiguous()
# (B, C, T, feat)
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
bsz, _, output_seq_len, _ = x.size()
# (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat)
x = x.transpose(1, 2).transpose(0, 1)
x = x.contiguous().view(output_seq_len, bsz, -1)
subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5)
# TODO: shouldn't subsampling_factor determined in advance ?
input_lengths = (src_lengths.float() / subsampling_factor).ceil().long()
encoder_padding_mask, _ = lengths_to_encoder_padding_mask(
input_lengths, batch_first=True
)
if not encoder_padding_mask.any():
encoder_padding_mask = None
attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor)
transformer_layer_idx = 0
for layer_idx in range(len(self.transformer_layers)):
if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer):
x = self.transformer_layers[layer_idx](
x, encoder_padding_mask, attn_mask
)
if self.transformer_sampling[transformer_layer_idx] != 1:
sampling_factor = self.transformer_sampling[transformer_layer_idx]
x, encoder_padding_mask, attn_mask = self.slice(
x, encoder_padding_mask, attn_mask, sampling_factor
)
transformer_layer_idx += 1
else:
x = self.transformer_layers[layer_idx](x)
# encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate
# whether encoder_output[t, b] is valid or not (valid=0, invalid=1)
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": encoder_padding_mask.t()
if encoder_padding_mask is not None
else None,
# (B, T) --> (T, B)
}
def infer_conv_output_dim(self, in_channels, input_dim):
sample_seq_len = 200
sample_bsz = 10
x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim)
for i, _ in enumerate(self.conv_layers):
x = self.conv_layers[i](x)
x = x.transpose(1, 2)
mb, seq = x.size()[:2]
return x.contiguous().view(mb, seq, -1).size(-1)
def validate_transformer_config(self, transformer_config):
for config in transformer_config:
input_dim, num_heads = config[:2]
if input_dim % num_heads != 0:
msg = (
"ERROR in transformer config {}:".format(config)
+ "input dimension {} ".format(input_dim)
+ "not dividable by number of heads".format(num_heads)
)
raise ValueError(msg)
def parse_transformer_context(self, transformer_context):
"""
transformer_context can be the following:
- None; indicates no context is used, i.e.,
transformer can access full context
- a tuple/list of two int; indicates left and right context,
any number <0 indicates infinite context
* e.g., (5, 6) indicates that for query at x_t, transformer can
access [t-5, t+6] (inclusive)
* e.g., (-1, 6) indicates that for query at x_t, transformer can
access [0, t+6] (inclusive)
"""
if transformer_context is None:
return None
if not isinstance(transformer_context, Iterable):
raise ValueError("transformer context must be Iterable if it is not None")
if len(transformer_context) != 2:
raise ValueError("transformer context must have length 2")
left_context = transformer_context[0]
if left_context < 0:
left_context = None
right_context = transformer_context[1]
if right_context < 0:
right_context = None
if left_context is None and right_context is None:
return None
return (left_context, right_context)
def parse_transformer_sampling(self, transformer_sampling, num_layers):
"""
parsing transformer sampling configuration
Args:
- transformer_sampling, accepted input:
* None, indicating no sampling
* an Iterable with int (>0) as element
- num_layers, expected number of transformer layers, must match with
the length of transformer_sampling if it is not None
Returns:
- A tuple with length num_layers
"""
if transformer_sampling is None:
return (1,) * num_layers
if not isinstance(transformer_sampling, Iterable):
raise ValueError(
"transformer_sampling must be an iterable if it is not None"
)
if len(transformer_sampling) != num_layers:
raise ValueError(
"transformer_sampling {} does not match with the number "
+ "of layers {}".format(transformer_sampling, num_layers)
)
for layer, value in enumerate(transformer_sampling):
if not isinstance(value, int):
raise ValueError("Invalid value in transformer_sampling: ")
if value < 1:
raise ValueError(
"{} layer's subsampling is {}.".format(layer, value)
+ " This is not allowed! "
)
return transformer_sampling
def slice(self, embedding, padding_mask, attn_mask, sampling_factor):
"""
embedding is a (T, B, D) tensor
padding_mask is a (B, T) tensor or None
attn_mask is a (T, T) tensor or None
"""
embedding = embedding[::sampling_factor, :, :]
if padding_mask is not None:
padding_mask = padding_mask[:, ::sampling_factor]
if attn_mask is not None:
attn_mask = attn_mask[::sampling_factor, ::sampling_factor]
return embedding, padding_mask, attn_mask
def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1):
"""
create attention mask according to sequence lengths and transformer
context
Args:
- input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is
the length of b-th sequence
- subsampling_factor: int
* Note that the left_context and right_context is specified in
the input frame-level while input to transformer may already
go through subsampling (e.g., the use of striding in vggblock)
we use subsampling_factor to scale the left/right context
Return:
- a (T, T) binary tensor or None, where T is max(input_lengths)
* if self.transformer_context is None, None
* if left_context is None,
* attn_mask[t, t + right_context + 1:] = 1
* others = 0
* if right_context is None,
* attn_mask[t, 0:t - left_context] = 1
* others = 0
* elsif
* attn_mask[t, t - left_context: t + right_context + 1] = 0
* others = 1
"""
if self.transformer_context is None:
return None
maxT = torch.max(input_lengths).item()
attn_mask = torch.zeros(maxT, maxT)
left_context = self.transformer_context[0]
right_context = self.transformer_context[1]
if left_context is not None:
left_context = math.ceil(self.transformer_context[0] / subsampling_factor)
if right_context is not None:
right_context = math.ceil(self.transformer_context[1] / subsampling_factor)
for t in range(maxT):
if left_context is not None:
st = 0
en = max(st, t - left_context)
attn_mask[t, st:en] = 1
if right_context is not None:
st = t + right_context + 1
st = min(st, maxT - 1)
attn_mask[t, st:] = 1
return attn_mask.to(input_lengths.device)
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs.
Default: ``False``
left_pad (bool, optional): whether the input is left-padded. Default:
``False``
"""
def __init__(
self,
dictionary,
embed_dim=512,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
conv_config=DEFAULT_DEC_CONV_CONFIG,
encoder_output_dim=512,
):
super().__init__(dictionary)
vocab_size = len(dictionary)
self.padding_idx = dictionary.pad()
self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx)
self.conv_layers = nn.ModuleList()
for i in range(len(conv_config)):
out_channels, kernel_size, layer_norm = conv_config[i]
if i == 0:
conv_layer = LinearizedConv1d(
embed_dim, out_channels, kernel_size, padding=kernel_size - 1
)
else:
conv_layer = LinearizedConv1d(
conv_config[i - 1][0],
out_channels,
kernel_size,
padding=kernel_size - 1,
)
self.conv_layers.append(conv_layer)
if layer_norm:
self.conv_layers.append(nn.LayerNorm(out_channels))
self.conv_layers.append(nn.ReLU())
self.layers = nn.ModuleList()
if conv_config[-1][0] != transformer_config[0][0]:
self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0]))
self.layers.append(TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[0])
))
for i in range(1, len(transformer_config)):
if transformer_config[i - 1][0] != transformer_config[i][0]:
self.layers.append(
Linear(transformer_config[i - 1][0], transformer_config[i][0])
)
self.layers.append(TransformerDecoderLayer(
prepare_transformer_decoder_params(*transformer_config[i])
))
self.fc_out = Linear(transformer_config[-1][0], vocab_size)
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for input feeding/teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the last decoder layer's output of shape `(batch, tgt_len,
vocab)`
- the last decoder layer's attention weights of shape `(batch,
tgt_len, src_len)`
"""
target_padding_mask = (
(prev_output_tokens == self.padding_idx).to(prev_output_tokens.device)
if incremental_state is None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
# embed tokens
x = self.embed_tokens(prev_output_tokens)
# B x T x C -> T x B x C
x = self._transpose_if_training(x, incremental_state)
for layer in self.conv_layers:
if isinstance(layer, LinearizedConvolution):
x = layer(x, incremental_state)
else:
x = layer(x)
# B x T x C -> T x B x C
x = self._transpose_if_inference(x, incremental_state)
# decoder layers
for layer in self.layers:
if isinstance(layer, TransformerDecoderLayer):
x, _, _ = layer(
x,
(encoder_out["encoder_out"] if encoder_out is not None else None),
(
encoder_out["encoder_padding_mask"].t()
if encoder_out["encoder_padding_mask"] is not None
else None
),
incremental_state,
self_attn_mask=(
self.buffered_future_mask(x)
if incremental_state is None
else None
),
self_attn_padding_mask=(
target_padding_mask if incremental_state is None else None
),
)
else:
x = layer(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
x = self.fc_out(x)
return x, None
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
if self._future_mask.size(0) < dim:
self._future_mask = torch.triu(
utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def _transpose_if_training(self, x, incremental_state):
if incremental_state is None:
x = x.transpose(0, 1)
return x
def _transpose_if_inference(self, x, incremental_state):
if incremental_state:
x = x.transpose(0, 1)
return x
@register_model("asr_vggtransformer_encoder")
class VGGTransformerEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--vggblock-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one vggblock
[(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...]
""",
)
parser.add_argument(
"--transformer-enc-config",
type=str,
metavar="EXPR",
help="""
a tuple containing the configuration of the Transformer layers
configurations:
[(input_dim,
num_heads,
ffn_dim,
normalize_before,
dropout,
attention_dropout,
relu_dropout), ]""",
)
parser.add_argument(
"--enc-output-dim",
type=int,
metavar="N",
help="encoder output dimension, projecting the LSTM output",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--transformer-context",
type=str,
metavar="EXPR",
help="""
either None or a tuple of two ints, indicating left/right context a
transformer can have access to""",
)
parser.add_argument(
"--transformer-sampling",
type=str,
metavar="EXPR",
help="""
either None or a tuple of ints, indicating sampling factor in each layer""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
base_architecture_enconly(args)
encoder = VGGTransformerEncoderOnly(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
vggblock_config=eval(args.vggblock_enc_config),
transformer_config=eval(args.transformer_enc_config),
encoder_output_dim=args.enc_output_dim,
in_channels=args.in_channels,
transformer_context=eval(args.transformer_context),
transformer_sampling=eval(args.transformer_sampling),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
# net_output['encoder_out'] is a (T, B, D) tensor
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
# lprobs is a (T, B, D) tensor
# we need to transoose to get (B, T, D) tensor
lprobs = lprobs.transpose(0, 1).contiguous()
lprobs.batch_first = True
return lprobs
class VGGTransformerEncoderOnly(VGGTransformerEncoder):
def __init__(
self,
vocab_size,
input_feat_per_channel,
vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG,
transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG,
encoder_output_dim=512,
in_channels=1,
transformer_context=None,
transformer_sampling=None,
):
super().__init__(
input_feat_per_channel=input_feat_per_channel,
vggblock_config=vggblock_config,
transformer_config=transformer_config,
encoder_output_dim=encoder_output_dim,
in_channels=in_channels,
transformer_context=transformer_context,
transformer_sampling=transformer_sampling,
)
self.fc_out = Linear(self.encoder_output_dim, vocab_size)
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
enc_out = super().forward(src_tokens, src_lengths)
x = self.fc_out(enc_out["encoder_out"])
# x = F.log_softmax(x, dim=-1)
# Note: no need this line, because model.get_normalized_prob will call
# log_softmax
return {
"encoder_out": x, # (T, B, C)
"encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B)
}
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
# nn.init.uniform_(m.weight, -0.1, 0.1)
# nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True, dropout=0):
"""Linear layer (input: N x T x C)"""
m = nn.Linear(in_features, out_features, bias=bias)
# m.weight.data.uniform_(-0.1, 0.1)
# if bias:
# m.bias.data.uniform_(-0.1, 0.1)
return m
def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs):
"""Weight-normalized Conv1d layer optimized for decoding"""
m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs)
std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels))
nn.init.normal_(m.weight, mean=0, std=std)
nn.init.constant_(m.bias, 0)
return nn.utils.weight_norm(m, dim=2)
def LayerNorm(embedding_dim):
m = nn.LayerNorm(embedding_dim)
return m
# seq2seq models
def base_architecture(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.transformer_dec_config = getattr(
args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG
)
args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG)
args.transformer_context = getattr(args, "transformer_context", "None")
@register_model_architecture("asr_vggtransformer", "vggtransformer_1")
def vggtransformer_1(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_2")
def vggtransformer_2(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args,
"transformer_dec_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6",
)
@register_model_architecture("asr_vggtransformer", "vggtransformer_base")
def vggtransformer_base(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512)
args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4")
args.transformer_dec_config = getattr(
args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6"
)
# Size estimations:
# Encoder:
# - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K
# Transformer:
# - input dimension adapter: 2560 x 512 -> 1.31M
# - transformer_layers (x12) --> 37.74M
# * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M
# * FFN weight: 512*2048*2 = 2.097M
# - output dimension adapter: 512 x 512 -> 0.26 M
# Decoder:
# - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3
# - transformer_layer: (x6) --> 25.16M
# * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M
# * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M
# * FFN: 512*2048*2 = 2.097M
# Final FC:
# - FC: 512*5000 = 256K (assuming vocab size 5K)
# In total:
# ~65 M
# CTC models
def base_architecture_enconly(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2"
)
args.transformer_enc_config = getattr(
args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2"
)
args.enc_output_dim = getattr(args, "enc_output_dim", 512)
args.in_channels = getattr(args, "in_channels", 1)
args.transformer_context = getattr(args, "transformer_context", "None")
args.transformer_sampling = getattr(args, "transformer_sampling", "None")
@register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1")
def vggtransformer_enc_1(args):
# vggtransformer_1 is the same as vggtransformer_enc_big, except the number
# of layers is increased to 16
# keep it here for backward compatiablity purpose
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.vggblock_enc_config = getattr(
args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]"
)
args.transformer_enc_config = getattr(
args,
"transformer_enc_config",
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16",
)
args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
| 37,045 | 35.788481 | 88 | py |
BIFI | BIFI-main/utils/fairseq/examples/speech_recognition/models/w2l_conv_glu_enc.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import (
FairseqEncoder,
FairseqEncoderModel,
register_model,
register_model_architecture,
)
default_conv_enc_config = """[
(400, 13, 170, 0.2),
(440, 14, 0, 0.214),
(484, 15, 0, 0.22898),
(532, 16, 0, 0.2450086),
(584, 17, 0, 0.262159202),
(642, 18, 0, 0.28051034614),
(706, 19, 0, 0.30014607037),
(776, 20, 0, 0.321156295296),
(852, 21, 0, 0.343637235966),
(936, 22, 0, 0.367691842484),
(1028, 23, 0, 0.393430271458),
(1130, 24, 0, 0.42097039046),
(1242, 25, 0, 0.450438317792),
(1366, 26, 0, 0.481969000038),
(1502, 27, 0, 0.51570683004),
(1652, 28, 0, 0.551806308143),
(1816, 29, 0, 0.590432749713),
]"""
@register_model("asr_w2l_conv_glu_encoder")
class W2lConvGluEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
parser.add_argument(
"--input-feat-per-channel",
type=int,
metavar="N",
help="encoder input dimension per input channel",
)
parser.add_argument(
"--in-channels",
type=int,
metavar="N",
help="number of encoder input channels",
)
parser.add_argument(
"--conv-enc-config",
type=str,
metavar="EXPR",
help="""
an array of tuples each containing the configuration of one conv layer
[(out_channels, kernel_size, padding, dropout), ...]
""",
)
@classmethod
def build_model(cls, args, task):
"""Build a new model instance."""
conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
encoder = W2lConvGluEncoder(
vocab_size=len(task.target_dictionary),
input_feat_per_channel=args.input_feat_per_channel,
in_channels=args.in_channels,
conv_enc_config=eval(conv_enc_config),
)
return cls(encoder)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample)
lprobs.batch_first = False
return lprobs
class W2lConvGluEncoder(FairseqEncoder):
def __init__(
self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config
):
super().__init__(None)
self.input_dim = input_feat_per_channel
if in_channels != 1:
raise ValueError("only 1 input channel is currently supported")
self.conv_layers = nn.ModuleList()
self.linear_layers = nn.ModuleList()
self.dropouts = []
cur_channels = input_feat_per_channel
for out_channels, kernel_size, padding, dropout in conv_enc_config:
layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding)
layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init
self.conv_layers.append(nn.utils.weight_norm(layer))
self.dropouts.append(dropout)
if out_channels % 2 != 0:
raise ValueError("odd # of out_channels is incompatible with GLU")
cur_channels = out_channels // 2 # halved by GLU
for out_channels in [2 * cur_channels, vocab_size]:
layer = nn.Linear(cur_channels, out_channels)
layer.weight.data.mul_(math.sqrt(3))
self.linear_layers.append(nn.utils.weight_norm(layer))
cur_channels = out_channels // 2
def forward(self, src_tokens, src_lengths, **kwargs):
"""
src_tokens: padded tensor (B, T, C * feat)
src_lengths: tensor of original lengths of input utterances (B,)
"""
B, T, _ = src_tokens.size()
x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1
for layer_idx in range(len(self.conv_layers)):
x = self.conv_layers[layer_idx](x)
x = F.glu(x, dim=1)
x = F.dropout(x, p=self.dropouts[layer_idx], training=self.training)
x = x.transpose(1, 2).contiguous() # (B, T, 908)
x = self.linear_layers[0](x)
x = F.glu(x, dim=2)
x = F.dropout(x, p=self.dropouts[-1])
x = self.linear_layers[1](x)
assert x.size(0) == B
assert x.size(1) == T
encoder_out = x.transpose(0, 1) # (T, B, vocab_size)
# need to debug this -- find a simpler/elegant way in pytorch APIs
encoder_padding_mask = (
torch.arange(T).view(1, T).expand(B, -1).to(x.device)
>= src_lengths.view(B, 1).expand(-1, T)
).t() # (B x T) -> (T x B)
return {
"encoder_out": encoder_out, # (T, B, vocab_size)
"encoder_padding_mask": encoder_padding_mask, # (T, B)
}
def reorder_encoder_out(self, encoder_out, new_order):
encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select(
1, new_order
)
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(1, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return (1e6, 1e6) # an arbitrary large number
@register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc")
def w2l_conv_glu_enc(args):
args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80)
args.in_channels = getattr(args, "in_channels", 1)
args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
| 5,987 | 33.217143 | 87 | py |
BIFI | BIFI-main/utils/fairseq/examples/speech_recognition/tasks/speech_recognition.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import re
import torch
from fairseq.data import Dictionary
from fairseq.tasks import FairseqTask, register_task
from examples.speech_recognition.data import AsrDataset
from examples.speech_recognition.data.replabels import replabel_symbol
def get_asr_dataset_from_json(data_json_path, tgt_dict):
"""
Parse data json and create dataset.
See scripts/asr_prep_json.py which pack json from raw files
Json example:
{
"utts": {
"4771-29403-0025": {
"input": {
"length_ms": 170,
"path": "/tmp/file1.flac"
},
"output": {
"text": "HELLO \n",
"token": "HE LLO",
"tokenid": "4815, 861"
}
},
"1564-142299-0096": {
...
}
}
"""
if not os.path.isfile(data_json_path):
raise FileNotFoundError("Dataset not found: {}".format(data_json_path))
with open(data_json_path, "rb") as f:
data_samples = json.load(f)["utts"]
assert len(data_samples) != 0
sorted_samples = sorted(
data_samples.items(),
key=lambda sample: int(sample[1]["input"]["length_ms"]),
reverse=True,
)
aud_paths = [s[1]["input"]["path"] for s in sorted_samples]
ids = [s[0] for s in sorted_samples]
speakers = []
for s in sorted_samples:
m = re.search("(.+?)-(.+?)-(.+?)", s[0])
speakers.append(m.group(1) + "_" + m.group(2))
frame_sizes = [s[1]["input"]["length_ms"] for s in sorted_samples]
tgt = [
[int(i) for i in s[1]["output"]["tokenid"].split(", ")]
for s in sorted_samples
]
# append eos
tgt = [[*t, tgt_dict.eos()] for t in tgt]
return AsrDataset(aud_paths, frame_sizes, tgt, tgt_dict, ids, speakers)
@register_task("speech_recognition")
class SpeechRecognitionTask(FairseqTask):
"""
Task for training speech recognition model.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument("data", help="path to data directory")
parser.add_argument(
"--silence-token", default="\u2581", help="token for silence (used by w2l)"
)
def __init__(self, args, tgt_dict):
super().__init__(args)
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries)."""
dict_path = os.path.join(args.data, "dict.txt")
if not os.path.isfile(dict_path):
raise FileNotFoundError("Dict not found: {}".format(dict_path))
tgt_dict = Dictionary.load(dict_path)
if args.criterion == "ctc_loss":
tgt_dict.add_symbol("<ctc_blank>")
elif args.criterion == "asg_loss":
for i in range(1, args.max_replabel + 1):
tgt_dict.add_symbol(replabel_symbol(i))
print("| dictionary: {} types".format(len(tgt_dict)))
return cls(args, tgt_dict)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
data_json_path = os.path.join(self.args.data, "{}.json".format(split))
self.datasets[split] = get_asr_dataset_from_json(data_json_path, self.tgt_dict)
def build_generator(self, args):
w2l_decoder = getattr(args, "w2l_decoder", None)
if w2l_decoder == "viterbi":
from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
return W2lViterbiDecoder(args, self.target_dictionary)
elif w2l_decoder == "kenlm":
from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
return W2lKenLMDecoder(args, self.target_dictionary)
else:
return super().build_generator(args)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.tgt_dict
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
return None
| 4,531 | 32.57037 | 87 | py |
BIFI | BIFI-main/utils/fairseq/examples/byte_level_bpe/gru_transformer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
import torch.nn.functional as F
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import TransformerModel, TransformerEncoder
@register_model("gru_transformer")
class GRUTransformerModel(TransformerModel):
@classmethod
def build_encoder(cls, args, src_dict, embed_tokens):
return GRUTransformerEncoder(args, src_dict, embed_tokens)
class GRUTransformerEncoder(TransformerEncoder):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
self.emb_ctx = nn.GRU(input_size=embed_tokens.embedding_dim,
hidden_size=embed_tokens.embedding_dim // 2,
num_layers=1, bidirectional=True)
def forward_embedding(self, src_tokens):
# embed tokens and positions
x = embed = self.embed_scale * self.embed_tokens(src_tokens)
if self.embed_positions is not None:
x = embed + self.embed_positions(src_tokens)
# contextualize embeddings
x = x.transpose(0, 1)
x = F.dropout(x, p=self.dropout, training=self.training)
x, _ = self.emb_ctx.forward(x)
x = x.transpose(0, 1)
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x, embed
@register_model_architecture("gru_transformer", "gru_transformer")
def gru_transformer_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.layer_wise_attention = getattr(args, "layer_wise_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
@register_model_architecture("gru_transformer", "gru_transformer_big")
def gru_transformer_big(args):
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.3)
gru_transformer_base_architecture(args)
| 5,088 | 47.466667 | 87 | py |
BIFI | BIFI-main/utils/fairseq/scripts/average_checkpoints.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
import torch
import os
import re
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for f in inputs:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
averaged_params[k].div_(num_models)
new_state['model'] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r'checkpoint_\d+_(\d+)\.pt')
else:
pt_regexp = re.compile(r'checkpoint(\d+)\.pt')
files = os.listdir(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description='Tool to average the params of input checkpoints to '
'produce a new checkpoint',
)
# fmt: off
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, '
'and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, '
'and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int,
help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, '
'when using --num-update-checkpoints, this will set an upper bound on which update to use'
'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.'
'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would be averaged assuming --save-interval-updates 500'
)
# fmt: on
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
assert args.checkpoint_upper_bound is None or (args.num_epoch_checkpoints is not None or args.num_update_checkpoints is not None), \
'--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints'
assert args.num_epoch_checkpoints is None or args.num_update_checkpoints is None, \
'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints'
if num is not None:
args.inputs = last_n_checkpoints(
args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound,
)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
torch.save(new_state, args.output)
print('Finished writing averaged checkpoint to {}.'.format(args.output))
if __name__ == '__main__':
main()
| 5,676 | 38.423611 | 175 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_train.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from io import StringIO
import unittest
from unittest.mock import MagicMock, patch
import torch
from fairseq import data, checkpoint_utils
def mock_trainer(epoch, num_updates, iterations_in_epoch):
trainer = MagicMock()
trainer.load_checkpoint.return_value = {
'train_iterator': {
'epoch': epoch,
'iterations_in_epoch': iterations_in_epoch,
'shuffle': False,
},
}
trainer.get_num_updates.return_value = num_updates
return trainer
def mock_dict():
d = MagicMock()
d.pad.return_value = 1
d.eos.return_value = 2
d.unk.return_value = 3
return d
def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch):
tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1)
tokens_ds = data.TokenBlockDataset(
tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False,
)
trainer = mock_trainer(epoch, num_updates, iterations_in_epoch)
dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False)
epoch_itr = data.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=[[i] for i in range(epoch_size)],
)
return trainer, epoch_itr
class TestLoadCheckpoint(unittest.TestCase):
def setUp(self):
self.args_mock = MagicMock()
self.args_mock.optimizer_overrides = '{}'
self.args_mock.reset_dataloader = False
self.args_mock.reset_meters = False
self.args_mock.reset_optimizer = False
self.patches = {
'os.makedirs': MagicMock(),
'os.path.join': MagicMock(),
'os.path.isfile': MagicMock(return_value=True),
'os.path.isabs': MagicMock(return_value=False),
}
self.applied_patches = [patch(p, d) for p, d in self.patches.items()]
[p.start() for p in self.applied_patches]
def test_load_partial_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 50)
self.assertEqual(epoch_itr.iterations_in_epoch, 51)
for _ in range(150 - 52):
next(itr)
self.assertEqual(epoch_itr.iterations_in_epoch, 149)
self.assertTrue(itr.has_next())
next(itr)
self.assertFalse(itr.has_next())
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertTrue(itr.has_next())
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
def test_load_full_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def test_load_no_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
self.patches['os.path.isfile'].return_value = False
_, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 1)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def tearDown(self):
patch.stopall()
if __name__ == '__main__':
unittest.main()
| 4,690 | 35.364341 | 94 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_average_checkpoints.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import os
import tempfile
import unittest
import shutil
import numpy as np
import torch
from torch import nn
from scripts.average_checkpoints import average_checkpoints
class ModelWithSharedParameter(nn.Module):
def __init__(self):
super(ModelWithSharedParameter, self).__init__()
self.embedding = nn.Embedding(1000, 200)
self.FC1 = nn.Linear(200, 200)
self.FC2 = nn.Linear(200, 200)
# tie weight in FC2 to FC1
self.FC2.weight = nn.Parameter(self.FC1.weight)
self.FC2.bias = nn.Parameter(self.FC1.bias)
self.relu = nn.ReLU()
def forward(self, input):
return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input)
class TestAverageCheckpoints(unittest.TestCase):
def test_average_checkpoints(self):
params_0 = collections.OrderedDict(
[
('a', torch.DoubleTensor([100.0])),
('b', torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])),
('c', torch.IntTensor([7, 8, 9])),
]
)
params_1 = collections.OrderedDict(
[
('a', torch.DoubleTensor([1.0])),
('b', torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])),
('c', torch.IntTensor([2, 2, 2])),
]
)
params_avg = collections.OrderedDict(
[
('a', torch.DoubleTensor([50.5])),
('b', torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])),
# We expect truncation for integer division
('c', torch.IntTensor([4, 5, 5])),
]
)
fd_0, path_0 = tempfile.mkstemp()
fd_1, path_1 = tempfile.mkstemp()
torch.save(collections.OrderedDict([('model', params_0)]), path_0)
torch.save(collections.OrderedDict([('model', params_1)]), path_1)
output = average_checkpoints([path_0, path_1])['model']
os.close(fd_0)
os.remove(path_0)
os.close(fd_1)
os.remove(path_1)
for (k_expected, v_expected), (k_out, v_out) in zip(
params_avg.items(), output.items()):
self.assertEqual(
k_expected, k_out, 'Key mismatch - expected {} but found {}. '
'(Expected list of keys: {} vs actual list of keys: {})'.format(
k_expected, k_out, params_avg.keys(), output.keys()
)
)
np.testing.assert_allclose(
v_expected.numpy(),
v_out.numpy(),
err_msg='Tensor value mismatch for key {}'.format(k_expected)
)
def test_average_checkpoints_with_shared_parameters(self):
def _construct_model_with_shared_parameters(path, value):
m = ModelWithSharedParameter()
nn.init.constant_(m.FC1.weight, value)
torch.save(
{'model': m.state_dict()},
path
)
return m
tmpdir = tempfile.mkdtemp()
paths = []
path = os.path.join(tmpdir, "m1.pt")
m1 = _construct_model_with_shared_parameters(path, 1.0)
paths.append(path)
path = os.path.join(tmpdir, "m2.pt")
m2 = _construct_model_with_shared_parameters(path, 2.0)
paths.append(path)
path = os.path.join(tmpdir, "m3.pt")
m3 = _construct_model_with_shared_parameters(path, 3.0)
paths.append(path)
new_model = average_checkpoints(paths)
self.assertTrue(
torch.equal(
new_model['model']['embedding.weight'],
(m1.embedding.weight +
m2.embedding.weight +
m3.embedding.weight) / 3.0
)
)
self.assertTrue(
torch.equal(
new_model['model']['FC1.weight'],
(m1.FC1.weight +
m2.FC1.weight +
m3.FC1.weight) / 3.0
)
)
self.assertTrue(
torch.equal(
new_model['model']['FC2.weight'],
(m1.FC2.weight +
m2.FC2.weight +
m3.FC2.weight) / 3.0
)
)
shutil.rmtree(tmpdir)
if __name__ == '__main__':
unittest.main()
| 4,494 | 30.215278 | 80 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_reproducibility.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from io import StringIO
import json
import os
import tempfile
import unittest
import torch
from . import test_binaries
class TestReproducibility(unittest.TestCase):
def _test_reproducibility(
self,
name,
extra_flags=None,
delta=0.0001,
resume_checkpoint='checkpoint1.pt',
max_epoch=3,
):
if extra_flags is None:
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
with self.assertLogs() as logs:
test_binaries.create_dummy_data(data_dir)
test_binaries.preprocess_translation_data(data_dir)
# train epochs 1 and 2 together
with self.assertLogs() as logs:
test_binaries.train_translation_model(
data_dir, 'fconv_iwslt_de_en', [
'--dropout', '0.0',
'--log-format', 'json',
'--log-interval', '1',
'--max-epoch', str(max_epoch),
] + extra_flags,
)
train_log, valid_log = map(lambda rec: json.loads(rec.msg), logs.records[-4:-2])
# train epoch 2, resuming from previous checkpoint 1
os.rename(
os.path.join(data_dir, resume_checkpoint),
os.path.join(data_dir, 'checkpoint_last.pt'),
)
with self.assertLogs() as logs:
test_binaries.train_translation_model(
data_dir, 'fconv_iwslt_de_en', [
'--dropout', '0.0',
'--log-format', 'json',
'--log-interval', '1',
'--max-epoch', str(max_epoch),
] + extra_flags,
)
train_res_log, valid_res_log = map(lambda rec: json.loads(rec.msg), logs.records[-4:-2])
for k in ['train_loss', 'train_ppl', 'train_num_updates', 'train_gnorm']:
self.assertAlmostEqual(float(train_log[k]), float(train_res_log[k]), delta=delta)
for k in ['valid_loss', 'valid_ppl', 'valid_num_updates', 'valid_best_loss']:
self.assertAlmostEqual(float(valid_log[k]), float(valid_res_log[k]), delta=delta)
def test_reproducibility(self):
self._test_reproducibility('test_reproducibility')
@unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU')
def test_reproducibility_fp16(self):
self._test_reproducibility('test_reproducibility_fp16', [
'--fp16',
'--fp16-init-scale', '4096',
], delta=0.011)
@unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU')
def test_reproducibility_memory_efficient_fp16(self):
self._test_reproducibility('test_reproducibility_memory_efficient_fp16', [
'--memory-efficient-fp16',
'--fp16-init-scale', '4096',
])
def test_mid_epoch_reproducibility(self):
self._test_reproducibility(
'test_mid_epoch_reproducibility',
['--save-interval-updates', '3'],
resume_checkpoint='checkpoint_1_3.pt',
max_epoch=1,
)
if __name__ == '__main__':
unittest.main()
| 3,459 | 34.670103 | 100 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_sequence_scorer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import torch
from fairseq.sequence_scorer import SequenceScorer
import tests.utils as test_utils
class TestSequenceScorer(unittest.TestCase):
def test_sequence_scorer(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
eos = d.eos()
w1 = 4
w2 = 5
# construct dataloader
data = [
{
'source': torch.LongTensor([w1, w2, eos]),
'target': torch.LongTensor([w1, w2, w1, eos]),
},
{
'source': torch.LongTensor([w2, eos]),
'target': torch.LongTensor([w2, w1, eos]),
},
{
'source': torch.LongTensor([w2, eos]),
'target': torch.LongTensor([w2, eos]),
},
]
data_itr = test_utils.dummy_dataloader(data)
# specify expected output probabilities
args = argparse.Namespace()
unk = 0.
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
[0.0, unk, 0.6, 0.4], # sentence 1
[0.0, unk, 0.4, 0.6], # sentence 2
[0.0, unk, 0.7, 0.3], # sentence 3
]),
# step 1:
torch.FloatTensor([
# eos w1 w2
[0.0, unk, 0.2, 0.7], # sentence 1
[0.0, unk, 0.8, 0.2], # sentence 2
[0.7, unk, 0.1, 0.2], # sentence 3
]),
# step 2:
torch.FloatTensor([
# eos w1 w2
[0.10, unk, 0.50, 0.4], # sentence 1
[0.15, unk, 0.15, 0.7], # sentence 2
[0.00, unk, 0.00, 0.0], # sentence 3
]),
# step 3:
torch.FloatTensor([
# eos w1 w2
[0.9, unk, 0.05, 0.05], # sentence 1
[0.0, unk, 0.00, 0.0], # sentence 2
[0.0, unk, 0.00, 0.0], # sentence 3
]),
]
expected_scores = [
[0.6, 0.7, 0.5, 0.9], # sentence 1
[0.6, 0.8, 0.15], # sentence 2
[0.3, 0.7], # sentence 3
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
scorer = SequenceScorer(task.target_dictionary)
for sample in data_itr:
hypos = task.inference_step(scorer, [model], sample)
for id, hypos_id in zip(sample['id'].tolist(), hypos):
self.assertHypoTokens(hypos_id[0], data[id]['target'])
self.assertHypoScore(hypos_id[0], expected_scores[id])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()**lenpen
self.assertLess(abs(score - hypo['score']), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == '__main__':
unittest.main()
| 3,949 | 33.051724 | 75 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_memory_efficient_fp16.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
import unittest
import torch
from fairseq.optim.adam import FairseqAdam
from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer
@unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU')
class TestMemoryEfficientFP16(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_load_state_dict(self):
# define simple FP16 model
model = torch.nn.Linear(5, 5).cuda().half()
params = list(model.parameters())
# initialize memory efficient FP16 optimizer
optimizer = FairseqAdam(
argparse.Namespace(
lr=[0.00001],
adam_betas='(0.9, 0.999)',
adam_eps=1e-8,
weight_decay=0.0,
),
params,
)
me_optimizer = MemoryEfficientFP16Optimizer(
argparse.Namespace(
fp16_init_scale=1,
fp16_scale_window=1,
fp16_scale_tolerance=1,
threshold_loss_scale=1,
min_loss_scale=1e-4,
),
params,
optimizer,
)
# optimizer state is created in the first step
loss = model(torch.rand(5).cuda().half()).sum()
me_optimizer.backward(loss)
me_optimizer.step()
# reload state
state = me_optimizer.state_dict()
me_optimizer.load_state_dict(state)
for k, v in me_optimizer.optimizer.state.items():
self.assertTrue(k.dtype == torch.float16)
for v_i in v.values():
if torch.is_tensor(v_i):
self.assertTrue(v_i.dtype == torch.float32)
if __name__ == '__main__':
unittest.main()
| 2,002 | 28.028986 | 70 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_multihead_attention.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import unittest
from fairseq.modules.multihead_attention import MultiheadAttention
class TestMultiheadAttention(unittest.TestCase):
def test_append_prev_key_padding_mask(self):
bsz = 1
src_len = 4
cases = [
# no padding mask
(None, None, None),
# current padding mask only
(
torch.tensor([[1]]).bool(),
None,
torch.tensor([[0, 0, 0, 1]]).bool(),
),
# previous padding mask only
(
None,
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 0]]).bool(),
),
# both padding masks
(
torch.tensor([[1]]).bool(),
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 1]]).bool(),
),
]
for c in cases:
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
c[0],
c[1],
batch_size=bsz,
src_len=src_len,
static_kv=False,
)
if key_padding_mask is not None:
self.assertTrue(
torch.all(torch.eq(key_padding_mask, c[2])),
f'Unexpected resultant key padding mask: {key_padding_mask}'
f' given current: {c[0]} and previous: {c[1]}',
)
self.assertEqual(key_padding_mask.size(0), bsz)
self.assertEqual(key_padding_mask.size(1), src_len)
else:
self.assertIsNone(c[2])
if __name__ == '__main__':
unittest.main()
| 1,904 | 30.229508 | 80 | py |
BIFI | BIFI-main/utils/fairseq/tests/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.data.language_pair_dataset import collate
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
)
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.tasks import FairseqTask
def dummy_dictionary(vocab_size, prefix='token_'):
d = Dictionary()
for i in range(vocab_size):
token = prefix + str(i)
d.add_symbol(token)
d.finalize(padding_factor=1) # don't add extra padding symbols
return d
def dummy_dataloader(
samples,
padding_idx=1,
eos_idx=2,
batch_size=None,
):
if batch_size is None:
batch_size = len(samples)
# add any missing data to samples
for i, sample in enumerate(samples):
if 'id' not in sample:
sample['id'] = i
# create dataloader
dataset = TestDataset(samples)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)),
)
return iter(dataloader)
def sequence_generator_setup():
# construct dummy dictionary
d = dummy_dictionary(vocab_size=2)
eos = d.eos()
w1 = 4
w2 = 5
# construct source data
src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]),
# step 1:
torch.FloatTensor([
# eos w1 w2 prefix
# sentence 1:
[1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0)
[0.0, unk, 0.9, 0.1], # w2: 0.1
# sentence 2:
[0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25)
[0.00, unk, 0.10, 0.9], # w2: 0.3
]),
# step 2:
torch.FloatTensor([
# eos w1 w2 prefix
# sentence 1:
[0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9
[0.6, unk, 0.2, 0.2], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6)
# sentence 2:
[0.60, unk, 0.4, 0.00], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6)
[0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9
]),
# step 3:
torch.FloatTensor([
# eos w1 w2 prefix
# sentence 1:
[1.0, unk, 0.0, 0.0], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0)
[1.0, unk, 0.0, 0.0], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0)
# sentence 2:
[0.1, unk, 0.5, 0.4], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1)
[1.0, unk, 0.0, 0.0], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0)
]),
]
task = TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
tgt_dict = task.target_dictionary
return tgt_dict, w1, w2, src_tokens, src_lengths, model
class TestDataset(torch.utils.data.Dataset):
def __init__(self, data):
super().__init__()
self.data = data
self.sizes = None
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class TestTranslationTask(FairseqTask):
def __init__(self, args, src_dict, tgt_dict, model):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.model = model
@classmethod
def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None):
return cls(args, src_dict, tgt_dict, model)
def build_model(self, args):
return TestModel.build_model(args, self)
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.tgt_dict
class TestModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
return EncoderOut(
encoder_out=src_tokens,
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
)
class TestIncrementalDecoder(FairseqIncrementalDecoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
assert hasattr(args, 'beam_probs') or hasattr(args, 'probs')
args.max_decoder_positions = getattr(args, 'max_decoder_positions', 100)
self.args = args
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bbsz = prev_output_tokens.size(0)
vocab = len(self.dictionary)
src_len = encoder_out.encoder_out.size(1)
tgt_len = prev_output_tokens.size(1)
# determine number of steps
if incremental_state is not None:
# cache step number
step = utils.get_incremental_state(self, incremental_state, 'step')
if step is None:
step = 0
utils.set_incremental_state(self, incremental_state, 'step', step + 1)
steps = [step]
else:
steps = list(range(tgt_len))
# define output in terms of raw probs
if hasattr(self.args, 'probs'):
assert self.args.probs.dim() == 3, \
'expected probs to have size bsz*steps*vocab'
probs = self.args.probs.index_select(1, torch.LongTensor(steps))
else:
probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_()
for i, step in enumerate(steps):
# args.beam_probs gives the probability for every vocab element,
# starting with eos, then unknown, and then the rest of the vocab
if step < len(self.args.beam_probs):
probs[:, i, self.dictionary.eos():] = self.args.beam_probs[step]
else:
probs[:, i, self.dictionary.eos()] = 1.0
# random attention
attn = torch.rand(bbsz, tgt_len, src_len)
dev = prev_output_tokens.device
return probs.to(dev), {"attn": [attn.to(dev)]}
def get_normalized_probs(self, net_output, log_probs, _):
# the decoder returns probabilities directly
probs = net_output[0]
if log_probs:
return probs.log()
else:
return probs
def max_positions(self):
return self.args.max_decoder_positions
class TestReshapingEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
b_sz, t_sz = src_tokens.shape
padding_needed = t_sz % 2
x = src_tokens
if padding_needed > 0:
padding_needed = 2 - padding_needed
x = F.pad(x, (0, padding_needed))
return EncoderOut(
encoder_out=x.view(b_sz, -1, 2),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
)
def reorder_encoder_out(self, encoder_out, new_order):
return EncoderOut(
encoder_out=encoder_out.encoder_out.index_select(0, new_order),
encoder_padding_mask=None,
encoder_embedding=None,
encoder_states=None,
)
class TestReshapingModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestReshapingEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
| 9,154 | 30.898955 | 101 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_binaries.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from io import StringIO
import logging
import os
import random
import sys
import tempfile
import unittest
import torch
from fairseq import options
from fairseq_cli import preprocess
from fairseq_cli import train
from fairseq_cli import generate
from fairseq_cli import interactive
from fairseq_cli import eval_lm
from fairseq_cli import validate
class TestTranslation(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--dataset-impl', 'raw'])
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw'])
generate_main(data_dir, ['--dataset-impl', 'raw'])
@unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU')
def test_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--fp16'])
generate_main(data_dir)
@unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU')
def test_memory_efficient_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_memory_efficient_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--memory-efficient-fp16'])
generate_main(data_dir)
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_update_freq') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3'])
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_max_positions') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(
data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'],
)
self.assertTrue(
'skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception)
)
train_translation_model(
data_dir, 'fconv_iwslt_de_en',
['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'],
)
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test'])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_sampling') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir, [
'--sampling',
'--temperature', '2',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, [
'--sampling',
'--sampling-topk', '3',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, [
'--sampling',
'--sampling-topp', '0.2',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, [
'--diversity-rate', '0.5',
'--beam', '6',
])
with self.assertRaises(ValueError):
generate_main(data_dir, [
'--diverse-beam-groups', '4',
'--match-source-len',
])
generate_main(data_dir, ['--prefix-size', '2'])
def test_eval_bleu(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_eval_bleu') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', [
'--eval-bleu',
'--eval-bleu-print-samples',
'--eval-bleu-remove-bpe',
'--eval-bleu-detok', 'space',
'--eval-bleu-args', '{"beam": 4, "min_len": 10}',
])
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-out-embed-dim', '8',
])
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm', [
'--encoder-layers', '2',
'--encoder-bidirectional',
'--encoder-hidden-size', '16',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-out-embed-dim', '8',
'--decoder-layers', '2',
])
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
], run_validation=True)
generate_main(data_dir)
def test_multilingual_transformer(self):
# test with all combinations of encoder/decoder lang tokens
encoder_langtok_flags = [[], ['--encoder-langtok', 'src'], ['--encoder-langtok', 'tgt']]
decoder_langtok_flags = [[], ['--decoder-langtok']]
with contextlib.redirect_stdout(StringIO()):
for i in range(len(encoder_langtok_flags)):
for j in range(len(decoder_langtok_flags)):
enc_ltok_flag = encoder_langtok_flags[i]
dec_ltok_flag = decoder_langtok_flags[j]
with tempfile.TemporaryDirectory(f'test_multilingual_transformer_{i}_{j}') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(
data_dir,
arch='multilingual_transformer',
task='multilingual_translation',
extra_flags=[
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
] + enc_ltok_flag + dec_ltok_flag,
lang_flags=['--lang-pairs', 'in-out,out-in'],
run_validation=True,
extra_valid_flags=enc_ltok_flag + dec_ltok_flag,
)
generate_main(
data_dir,
extra_flags=[
'--task', 'multilingual_translation',
'--lang-pairs', 'in-out,out-in',
'--source-lang', 'in',
'--target-lang', 'out',
] + enc_ltok_flag + dec_ltok_flag,
)
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_cross_self_attention') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--no-cross-attention',
'--cross-self-attention',
'--layer-wise-attention',
], run_validation=True)
generate_main(data_dir, extra_flags=[])
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', [
'--encoder-conv-type', 'lightweight',
'--decoder-conv-type', 'lightweight',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
])
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', [
'--encoder-conv-type', 'dynamic',
'--decoder-conv-type', 'dynamic',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
])
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_cmlm_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'cmlm_transformer', [
'--apply-bert-init',
'--criterion', 'nat_loss',
'--noise', 'full_mask',
'--pred-length-offset',
'--length-loss-factor', '0.1'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
@unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU')
def test_levenshtein_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_levenshtein_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'levenshtein_transformer', [
'--apply-bert-init', '--early-exit', '6,6,6',
'--criterion', 'nat_loss'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'nonautoregressive_transformer', [
'--apply-bert-init', '--src-embedding-copy', '--criterion',
'nat_loss', '--noise', 'full_mask', '--pred-length-offset',
'--length-loss-factor', '0.1'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '0',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
# def test_nat_crf_transformer(self):
# with contextlib.redirect_stdout(StringIO()):
# with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir:
# create_dummy_data(data_dir)
# preprocess_translation_data(data_dir, ['--joined-dictionary'])
# train_translation_model(data_dir, 'nacrf_transformer', [
# '--apply-bert-init', '--criterion',
# 'nat_loss', '--noise', 'full_mask', '--pred-length-offset',
# '--length-loss-factor', '0.1',
# '--word-ins-loss-factor', '0.5',
# '--crf-lowrank-approx', '1',
# '--crf-beam-approx', '1'
# ], task='translation_lev')
# generate_main(data_dir, [
# '--task', 'translation_lev',
# '--iter-decode-max-iter', '0',
# '--iter-decode-eos-penalty', '0',
# '--print-step',
# ])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_iterative_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'iterative_nonautoregressive_transformer', [
'--apply-bert-init', '--src-embedding-copy', '--criterion',
'nat_loss', '--noise', 'full_mask', '--stochastic-approx',
'--dae-ratio', '0.5', '--train-step', '3'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_insertion_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'insertion_transformer', [
'--apply-bert-init', '--criterion', 'nat_loss', '--noise',
'random_mask'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_moe') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', [
'--task', 'translation_moe',
'--user-dir', 'examples/translation_moe/src',
'--method', 'hMoElp',
'--mean-pool-gating-network',
'--num-experts', '3',
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
])
generate_main(data_dir, [
'--task', 'translation_moe',
'--user-dir', 'examples/translation_moe/src',
'--method', 'hMoElp',
'--mean-pool-gating-network',
'--num-experts', '3',
'--gen-expert', '0'
])
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_alignment') as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ['--align-suffix', 'align'])
train_translation_model(
data_dir,
'transformer_align',
[
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--load-alignments',
'--alignment-layer', '1',
'--criterion', 'label_smoothed_cross_entropy_with_alignment'
],
run_validation=True,
)
generate_main(data_dir)
class TestStories(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_self_att_wp(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_self_att_wp') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
config = [
'--encoder-layers', '[(128, 3)] * 2',
'--decoder-layers', '[(128, 3)] * 2',
'--decoder-attention', 'True',
'--encoder-attention', 'False',
'--gated-attention', 'True',
'--self-attention', 'True',
'--project-input', 'True',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-out-embed-dim', '8',
'--multihead-self-attention-nheads', '2'
]
train_translation_model(data_dir, 'fconv_self_att_wp', config)
generate_main(data_dir)
# fusion model
os.rename(os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'pretrained.pt'))
config.extend([
'--pretrained', 'True',
'--pretrained-checkpoint', os.path.join(data_dir, 'pretrained.pt'),
'--save-dir', os.path.join(data_dir, 'fusion_model'),
])
train_translation_model(data_dir, 'fconv_self_att_wp', config)
class TestLanguageModeling(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_fconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'fconv_lm', [
'--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]',
'--decoder-embed-dim', '280',
'--optimizer', 'nag',
'--lr', '0.1',
])
eval_lm_main(data_dir)
def test_transformer_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir, 'transformer_lm', ['--add-bos-token'], run_validation=True,
)
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
def test_lightconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir, 'lightconv_lm', ['--add-bos-token'], run_validation=True,
)
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
def test_lstm_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir, 'lstm_lm', ['--add-bos-token'], run_validation=True,
)
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
class TestMaskedLanguageModel(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_legacy_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, "masked_lm")
def test_roberta_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_masked_lm(data_dir, "roberta_base")
def test_roberta_sentence_prediction(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_head") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes)
preprocess_lm_data(os.path.join(data_dir, 'input0'))
preprocess_lm_data(os.path.join(data_dir, 'label'))
train_roberta_head(data_dir, "roberta_base", num_classes=num_classes)
def test_roberta_regression_single(self):
num_classes = 1
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_regression_single") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes, regression=True)
preprocess_lm_data(os.path.join(data_dir, 'input0'))
train_roberta_head(data_dir, "roberta_base", num_classes=num_classes, extra_flags=['--regression-target'])
def test_roberta_regression_multiple(self):
num_classes = 3
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_roberta_regression_multiple") as data_dir:
create_dummy_roberta_head_data(data_dir, num_classes=num_classes, regression=True)
preprocess_lm_data(os.path.join(data_dir, 'input0'))
train_roberta_head(data_dir, "roberta_base", num_classes=num_classes, extra_flags=['--regression-target'])
def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(
data_dir,
arch="masked_lm",
extra_args=('--encoder-learned-pos',) if learned_pos_emb else ()
)
with tempfile.TemporaryDirectory(
"test_mlm_translation"
) as translation_dir:
create_dummy_data(translation_dir)
preprocess_translation_data(
translation_dir, extra_flags=["--joined-dictionary"]
)
# Train transformer with data_dir/checkpoint_last.pt
train_translation_model(
translation_dir,
arch="transformer_from_pretrained_xlm",
extra_flags=[
"--decoder-layers",
"1",
"--decoder-embed-dim",
"32",
"--decoder-attention-heads",
"1",
"--decoder-ffn-embed-dim",
"32",
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
"--pretrained-xlm-checkpoint",
"{}/checkpoint_last.pt".format(data_dir),
"--activation-fn",
"gelu",
"--max-source-positions",
"500",
"--max-target-positions",
"500",
] + (
["--encoder-learned-pos", "--decoder-learned-pos"]
if learned_pos_emb else []
) + (['--init-encoder-only'] if encoder_only else []),
task="translation_from_pretrained_xlm",
)
def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(True, False)
def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(False, False)
def test_pretrained_masked_lm_for_translation_encoder_only(self):
self._test_pretrained_masked_lm_for_translation(True, True)
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
# TODO: langs should be in and out right?
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"cross_lingual_lm",
data_dir,
"--arch",
arch,
# Optimizer args
"--optimizer",
"adam",
"--lr-scheduler",
"reduce_lr_on_plateau",
"--lr-shrink",
"0.5",
"--lr",
"0.0001",
"--min-lr",
"1e-09",
# dropout, attention args
"--dropout",
"0.1",
"--attention-dropout",
"0.1",
# MLM args
"--criterion",
"legacy_masked_lm_loss",
"--masked-lm-only",
"--monolingual-langs",
"in,out",
"--num-segment",
"5",
# Transformer args: use a small transformer model for fast training
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
# Other training args
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--dataset-impl",
"raw",
] + list(extra_args),
)
train.main(train_args)
class TestOptimizers(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
def test_optimizers(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_optimizers') as data_dir:
# Use just a bit of data and tiny model to keep this test runtime reasonable
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
optimizers = ['adafactor', 'adam', 'nag', 'adagrad', 'sgd', 'adadelta']
last_checkpoint = os.path.join(data_dir, 'checkpoint_last.pt')
for optimizer in optimizers:
if os.path.exists(last_checkpoint):
os.remove(last_checkpoint)
train_translation_model(data_dir, 'lstm', [
'--required-batch-size-multiple', '1',
'--encoder-layers', '1',
'--encoder-hidden-size', '32',
'--decoder-layers', '1',
'--optimizer', optimizer,
])
generate_main(data_dir)
@unittest.skipIf(not torch.cuda.is_available(), 'test requires a GPU')
def test_flat_grads(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_flat_grads') as data_dir:
# Use just a bit of data and tiny model to keep this test runtime reasonable
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
with self.assertRaises(RuntimeError):
# adafactor isn't compatible with flat grads, which
# are used by default with --fp16
train_translation_model(data_dir, 'lstm', [
'--required-batch-size-multiple', '1',
'--encoder-layers', '1',
'--encoder-hidden-size', '32',
'--decoder-layers', '1',
'--optimizer', 'adafactor',
'--fp16',
])
# but it should pass once we set --fp16-no-flatten-grads
train_translation_model(data_dir, 'lstm', [
'--required-batch-size-multiple', '1',
'--encoder-layers', '1',
'--encoder-hidden-size', '32',
'--decoder-layers', '1',
'--optimizer', 'adafactor',
'--fp16',
'--fp16-no-flatten-grads',
])
def create_dummy_data(data_dir, num_examples=100, maxlen=20, alignment=False):
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:offset+ex_len]))
print(ex_str, file=h)
offset += ex_len
def _create_dummy_alignment_data(filename_src, filename_tgt, filename):
with open(os.path.join(data_dir, filename_src), 'r') as src_f, \
open(os.path.join(data_dir, filename_tgt), 'r') as tgt_f, \
open(os.path.join(data_dir, filename), 'w') as h:
for src, tgt in zip(src_f, tgt_f):
src_len = len(src.split())
tgt_len = len(tgt.split())
avg_len = (src_len + tgt_len) // 2
num_alignments = random.randint(avg_len // 2, 2 * avg_len)
src_indices = torch.floor(torch.rand(num_alignments) * src_len).int()
tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int()
ex_str = ' '.join(["{}-{}".format(src, tgt) for src, tgt in zip(src_indices, tgt_indices)])
print(ex_str, file=h)
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
if alignment:
_create_dummy_alignment_data('train.in', 'train.out', 'train.align')
_create_dummy_alignment_data('valid.in', 'valid.out', 'valid.align')
_create_dummy_alignment_data('test.in', 'test.out', 'test.align')
def create_dummy_roberta_head_data(data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False):
input_dir = 'input0'
def _create_dummy_data(filename):
random_data = torch.rand(num_examples * maxlen)
input_data = 97 + torch.floor(26 * random_data).int()
if regression:
output_data = torch.rand((num_examples, num_classes))
else:
output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int()
with open(os.path.join(data_dir, input_dir, filename+'.out'), 'w') as f_in:
label_filename = filename+'.label' if regression else filename+'.out'
with open(os.path.join(data_dir, 'label', label_filename), 'w') as f_out:
offset = 0
for i in range(num_examples):
# write example input
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, input_data[offset:offset+ex_len]))
print(ex_str, file=f_in)
# write example label
if regression:
class_str = ' '.join(map(str, output_data[i].numpy()))
print(class_str, file=f_out)
else:
class_str = 'class{}'.format(output_data[i])
print(class_str, file=f_out)
offset += ex_len
os.mkdir(os.path.join(data_dir, input_dir))
os.mkdir(os.path.join(data_dir, 'label'))
_create_dummy_data('train')
_create_dummy_data('valid')
_create_dummy_data('test')
def preprocess_translation_data(data_dir, extra_flags=None):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(
[
'--source-lang', 'in',
'--target-lang', 'out',
'--trainpref', os.path.join(data_dir, 'train'),
'--validpref', os.path.join(data_dir, 'valid'),
'--testpref', os.path.join(data_dir, 'test'),
'--thresholdtgt', '0',
'--thresholdsrc', '0',
'--destdir', data_dir,
] + (extra_flags or []),
)
preprocess.main(preprocess_args)
def train_translation_model(data_dir, arch, extra_flags=None, task='translation', run_validation=False,
lang_flags=None, extra_valid_flags=None):
if lang_flags is None:
lang_flags = [
'--source-lang', 'in',
'--target-lang', 'out',
]
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', task,
data_dir,
'--save-dir', data_dir,
'--arch', arch,
'--lr', '0.05',
'--max-tokens', '500',
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--num-workers', 0,
] + lang_flags + (extra_flags or []),
)
train.main(train_args)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
'--task', task,
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--valid-subset', 'valid',
'--max-tokens', '500',
'--no-progress-bar',
] + lang_flags + (extra_valid_flags or [])
)
validate.main(validate_args)
def generate_main(data_dir, extra_flags=None):
if extra_flags is None:
extra_flags = [
'--print-alignment',
]
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(
generate_parser,
[
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--beam', '3',
'--batch-size', '64',
'--max-len-b', '5',
'--gen-subset', 'valid',
'--no-progress-bar',
] + (extra_flags or []),
)
# evaluate model in batch mode
generate.main(generate_args)
# evaluate model interactively
generate_args.buffer_size = 0
generate_args.input = '-'
generate_args.max_sentences = None
orig_stdin = sys.stdin
sys.stdin = StringIO('h e l l o\n')
interactive.main(generate_args)
sys.stdin = orig_stdin
def preprocess_lm_data(data_dir):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args([
'--only-source',
'--trainpref', os.path.join(data_dir, 'train.out'),
'--validpref', os.path.join(data_dir, 'valid.out'),
'--testpref', os.path.join(data_dir, 'test.out'),
'--destdir', data_dir,
])
preprocess.main(preprocess_args)
def train_masked_lm(data_dir, arch, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'masked_lm',
data_dir,
'--arch', arch,
'--optimizer', 'adam',
'--lr', '0.0001',
'--criterion', 'masked_lm',
'--max-sentences', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
'--num-workers', 0,
] + (extra_flags or []),
)
train.main(train_args)
def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'sentence_prediction',
data_dir,
'--arch', arch,
'--num-classes', str(num_classes),
'--optimizer', 'adam',
'--lr', '0.0001',
'--criterion', 'sentence_prediction',
'--max-tokens', '500',
'--max-positions', '500',
'--max-sentences', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
'--num-workers', 0,
] + (extra_flags or []),
)
train.main(train_args)
def train_language_model(data_dir, arch, extra_flags=None, run_validation=False):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'language_modeling',
data_dir,
'--arch', arch,
'--optimizer', 'adam',
'--lr', '0.0001',
'--criterion', 'adaptive_loss',
'--adaptive-softmax-cutoff', '5,10,15',
'--max-tokens', '500',
'--tokens-per-sample', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
] + (extra_flags or []),
)
train.main(train_args)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
'--task', 'language_modeling',
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--valid-subset', 'valid',
'--max-tokens', '500',
'--no-progress-bar',
]
)
validate.main(validate_args)
def eval_lm_main(data_dir):
eval_lm_parser = options.get_eval_lm_parser()
eval_lm_args = options.parse_args_and_arch(
eval_lm_parser,
[
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--no-progress-bar',
],
)
eval_lm.main(eval_lm_args)
def train_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
# TODO: langs should be in and out right?
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"cross_lingual_lm",
data_dir,
"--arch",
arch,
# Optimizer args
"--optimizer",
"adam",
"--lr-scheduler",
"reduce_lr_on_plateau",
"--lr-shrink",
"0.5",
"--lr",
"0.0001",
"--min-lr",
"1e-09",
# dropout, attention args
"--dropout",
"0.1",
"--attention-dropout",
"0.1",
# MLM args
"--criterion",
"masked_lm_loss",
"--masked-lm-only",
"--monolingual-langs",
"in,out",
"--num-segment",
"5",
# Transformer args: use a small transformer model for fast training
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
# Other training args
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--dataset-impl",
"raw",
] + list(extra_args),
)
train.main(train_args)
if __name__ == '__main__':
unittest.main()
| 45,457 | 40.363057 | 122 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_concat_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import LanguagePairDataset, TokenBlockDataset
from fairseq.data.concat_dataset import ConcatDataset
from tests.test_train import mock_dict
class TestConcatDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, -1)
tokens_ds1 = TokenBlockDataset(
tokens_1,
sizes=[tokens_1.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_1 = LanguagePairDataset(
tokens_ds1, tokens_ds1.sizes, d, shuffle=False
)
tokens_2 = torch.LongTensor([2]).view(1, -1)
tokens_ds2 = TokenBlockDataset(
tokens_2,
sizes=[tokens_2.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_2 = LanguagePairDataset(
tokens_ds2, tokens_ds2.sizes, d, shuffle=False
)
def test_concat_dataset_basics(self):
d = ConcatDataset(
[self.dataset_1, self.dataset_2]
)
assert(len(d) == 2)
assert(d[0]['source'][0] == 1)
assert(d[1]['source'][0] == 2)
d = ConcatDataset(
[self.dataset_1, self.dataset_2], sample_ratios=[1, 2]
)
assert(len(d) == 3)
assert(d[0]['source'][0] == 1)
assert(d[1]['source'][0] == 2)
assert(d[2]['source'][0] == 2)
d = ConcatDataset(
[self.dataset_1, self.dataset_2], sample_ratios=[2, 1]
)
assert(len(d) == 3)
assert(d[0]['source'][0] == 1)
assert(d[1]['source'][0] == 1)
assert(d[2]['source'][0] == 2)
| 1,943 | 28.907692 | 66 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_noising.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Dict, List
import tests.utils as test_utils
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
TransformEosDataset,
data_utils,
noising,
)
class TestDataNoising(unittest.TestCase):
def _get_test_data_with_bpe_cont_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with continuation markers as suffixes to denote
non-end of word tokens. This is the standard BPE format used in
fairseq's preprocessing.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he@@")
vocab.add_symbol("llo")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("y@@")
vocab.add_symbol("ou")
vocab.add_symbol("n@@")
vocab.add_symbol("ew")
vocab.add_symbol("or@@")
vocab.add_symbol("k")
src_tokens = [
["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"],
["how", "are", "y@@", "ou"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_bpe_end_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with end-of-word markers as suffixes to denote
tokens at the end of a word. This is an alternative to fairseq's
standard preprocessing framework and is not generally supported
within fairseq.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he")
vocab.add_symbol("llo_EOW")
vocab.add_symbol("how_EOW")
vocab.add_symbol("are_EOW")
vocab.add_symbol("y")
vocab.add_symbol("ou_EOW")
vocab.add_symbol("n")
vocab.add_symbol("ew_EOW")
vocab.add_symbol("or")
vocab.add_symbol("k_EOW")
src_tokens = [
["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"],
["how_EOW", "are_EOW", "y", "ou_EOW"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_word_vocab(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: word vocab
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("hello")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("you")
vocab.add_symbol("new")
vocab.add_symbol("york")
src_tokens = [
["hello", "new", "york", "you"],
["how", "are", "you", "new", "york"],
]
x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _convert_src_tokens_to_tensor(
self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool
):
src_len = [len(x) for x in src_tokens]
# If we have to append EOS, we include EOS in counting src length
if append_eos:
src_len = [length + 1 for length in src_len]
x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad())
for i in range(len(src_tokens)):
for j in range(len(src_tokens[i])):
x[i][j] = vocab.index(src_tokens[i][j])
if append_eos:
x[i][j + 1] = vocab.eos()
x = x.transpose(1, 0)
return x, torch.LongTensor(src_len)
def assert_eos_at_end(self, x, x_len, eos):
"""Asserts last token of every sentence in x is EOS """
for i in range(len(x_len)):
self.assertEqual(
x[x_len[i] - 1][i],
eos,
(
"Expected eos (token id {eos}) at the end of sentence {i} "
"but got {other} instead"
).format(i=i, eos=eos, other=x[i][-1]),
)
def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised):
# Expect only the first word (2 bpe tokens) of the first example
# was dropped out
self.assertEqual(x_len[0] - 2, l_noised[0])
for i in range(l_noised[0]):
self.assertEqual(x_noised[i][0], x[i + 2][0])
def test_word_dropout_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk):
# Expect only the first word (2 bpe tokens) of the first example
# was blanked out
self.assertEqual(x_len[0], l_noised[0])
for i in range(l_noised[0]):
if i < 2:
self.assertEqual(x_noised[i][0], unk)
else:
self.assertEqual(x_noised[i][0], x[i][0])
def test_word_blank_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def generate_unchanged_shuffle_map(self, length):
return {i: i for i in range(length)}
def assert_word_shuffle_matches_expected(
self,
x,
x_len,
max_shuffle_distance: int,
vocab: Dictionary,
expected_shufle_maps: List[Dict[int, int]],
expect_eos_at_end: bool,
bpe_end_marker=None,
):
"""
This verifies that with a given x, x_len, max_shuffle_distance, and
vocab, we get the expected shuffle result.
Args:
x: Tensor of shape (T x B) = (sequence_length, batch_size)
x_len: Tensor of length B = batch_size
max_shuffle_distance: arg to pass to noising
expected_shuffle_maps: List[mapping] where mapping is a
Dict[old_index, new_index], mapping x's elements from their
old positions in x to their new positions in x.
expect_eos_at_end: if True, check the output to make sure there is
an EOS at the end.
bpe_end_marker: str denoting the BPE end token. If this is not None, we
set the BPE cont token to None in the noising classes.
"""
bpe_cont_marker = None
if bpe_end_marker is None:
bpe_cont_marker = "@@"
with data_utils.numpy_seed(1234):
word_shuffle = noising.WordShuffle(
vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker
)
x_noised, l_noised = word_shuffle.noising(
x, x_len, max_shuffle_distance=max_shuffle_distance
)
# For every example, we have a different expected shuffle map. We check
# that each example is shuffled as expected according to each
# corresponding shuffle map.
for i in range(len(expected_shufle_maps)):
shuffle_map = expected_shufle_maps[i]
for k, v in shuffle_map.items():
self.assertEqual(x[k][i], x_noised[v][i])
# Shuffling should not affect the length of each example
for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised):
self.assertEqual(pre_shuffle_length, post_shuffle_length)
if expect_eos_at_end:
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_shuffle_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=True,
)
def test_word_shuffle_with_eos_nonbpe(self):
"""The purpose of this is to test shuffling logic with word vocabs"""
vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
{0: 0, 1: 1, 2: 3, 3: 2},
{0: 0, 1: 2, 2: 1, 3: 3, 4: 4},
],
expect_eos_at_end=True,
)
def test_word_shuffle_without_eos(self):
"""Same result as word shuffle with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
)
def test_word_shuffle_without_eos_with_bpe_end_marker(self):
"""Same result as word shuffle without eos except using BPE end token"""
vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
def assert_no_eos_at_end(self, x, x_len, eos):
"""Asserts that the last token of each sentence in x is not EOS """
for i in range(len(x_len)):
self.assertNotEqual(
x[x_len[i] - 1][i],
eos,
"Expected no eos (token id {eos}) at the end of sentence {i}.".format(
eos=eos, i=i
),
)
def test_word_dropout_without_eos(self):
"""Same result as word dropout with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_blank_without_eos(self):
"""Same result as word blank with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def _get_noising_dataset_batch(
self, src_tokens_no_pad, src_dict, append_eos_to_tgt=False,
):
"""
Constructs a NoisingDataset and the corresponding
``LanguagePairDataset(NoisingDataset(src), src)``. If
*append_eos_to_tgt* is True, wrap the source dataset in
:class:`TransformEosDataset` to append EOS to the clean source when
using it as the target.
"""
src_dataset = test_utils.TestDataset(data=src_tokens_no_pad)
noising_dataset = noising.NoisingDataset(
src_dataset=src_dataset,
src_dict=src_dict,
seed=1234,
max_word_shuffle_distance=3,
word_dropout_prob=0.2,
word_blanking_prob=0.2,
noising_class=noising.UnsupervisedMTNoising,
)
tgt = src_dataset
language_pair_dataset = LanguagePairDataset(
src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict
)
language_pair_dataset = TransformEosDataset(
language_pair_dataset, src_dict.eos(),
append_eos_to_tgt=append_eos_to_tgt,
)
dataloader = torch.utils.data.DataLoader(
dataset=language_pair_dataset,
batch_size=2,
collate_fn=language_pair_dataset.collater,
)
denoising_batch_result = next(iter(dataloader))
return denoising_batch_result
def test_noising_dataset_with_eos(self):
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=True
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_noising_dataset_without_eos(self):
"""
Similar to test noising dataset with eos except that we have to set
*append_eos_to_tgt* to ``True``.
"""
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=False
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad,
src_dict=src_dict,
append_eos_to_tgt=True,
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| 19,779 | 36.533207 | 87 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_sparse_multihead_attention.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import unittest
from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
class TestSparseMultiheadAttention(unittest.TestCase):
def test_sparse_multihead_attention(self):
attn_weights = torch.randn(1, 8, 8)
bidirectional_sparse_mask = torch.tensor([
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0],
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0],
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0],
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0]
])
bidirectional_attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=True)
bidirectional_attention_sparse_mask = bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask))
sparse_mask = torch.tensor([
[0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'),
float('-inf'), float('-inf')],
[0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')],
[0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')],
[0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf')],
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf')],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, float('-inf'), float('-inf')],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, float('-inf')],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0],
])
attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=False)
attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(attention_sparse_mask, sparse_mask))
if __name__ == '__main__':
unittest.main()
| 2,545 | 50.959184 | 114 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_export.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import tempfile
import unittest
import torch
from fairseq.data.dictionary import Dictionary
from fairseq.models.transformer import TransformerModel
from fairseq.modules import multihead_attention, sinusoidal_positional_embedding
from fairseq.tasks.fairseq_task import FairseqTask
DEFAULT_TEST_VOCAB_SIZE = 100
class DummyTask(FairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.src_dict = self.dictionary
self.tgt_dict = self.dictionary
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
def get_dummy_task_and_parser():
"""
Return a dummy task and argument parser, which can be used to
create a model/criterion.
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
class TestExportModels(unittest.TestCase):
def _test_save_and_load(self, scripted_module):
with tempfile.NamedTemporaryFile() as f:
scripted_module.save(f.name)
torch.jit.load(f.name)
def test_export_multihead_attention(self):
module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
scripted = torch.jit.script(module)
self._test_save_and_load(scripted)
def test_incremental_state_multihead_attention(self):
module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module1 = torch.jit.script(module1)
module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2)
module2 = torch.jit.script(module2)
state = {}
state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])})
state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])})
v1 = module1.get_incremental_state(state, "key")["a"]
v2 = module2.get_incremental_state(state, "key")["a"]
self.assertEqual(v1, 1)
self.assertEqual(v2, 2)
def test_positional_embedding(self):
module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding(
embedding_dim=8, padding_idx=1
)
scripted = torch.jit.script(module)
self._test_save_and_load(scripted)
@unittest.skipIf(
torch.__version__ < "1.5.0", "Targeting OSS scriptability for the 1.5 release"
)
def test_export_transformer(self):
task, parser = get_dummy_task_and_parser()
TransformerModel.add_args(parser)
args = parser.parse_args([])
model = TransformerModel.build_model(args, task)
scripted = torch.jit.script(model)
self._test_save_and_load(scripted)
if __name__ == "__main__":
unittest.main()
| 3,535 | 31.740741 | 86 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_backtranslation_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import (
BacktranslationDataset,
LanguagePairDataset,
TransformEosDataset,
)
from fairseq.sequence_generator import SequenceGenerator
import tests.utils as test_utils
class TestBacktranslationDataset(unittest.TestCase):
def setUp(self):
self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model = (
test_utils.sequence_generator_setup()
)
dummy_src_samples = self.src_tokens
self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples)
self.cuda = torch.cuda.is_available()
def _backtranslation_dataset_helper(
self, remove_eos_from_input_src, remove_eos_from_output_src,
):
tgt_dataset = LanguagePairDataset(
src=self.tgt_dataset,
src_sizes=self.tgt_dataset.sizes,
src_dict=self.tgt_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
)
generator = SequenceGenerator(
tgt_dict=self.tgt_dict,
max_len_a=0,
max_len_b=200,
beam_size=2,
unk_penalty=0,
)
backtranslation_dataset = BacktranslationDataset(
tgt_dataset=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# remove eos from the input src
remove_eos_from_src=remove_eos_from_input_src,
),
src_dict=self.tgt_dict,
backtranslation_fn=(
lambda sample: generator.generate([self.model], sample)
),
output_collater=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# if we remove eos from the input src, then we need to add it
# back to the output tgt
append_eos_to_tgt=remove_eos_from_input_src,
remove_eos_from_src=remove_eos_from_output_src,
).collater,
cuda=self.cuda,
)
dataloader = torch.utils.data.DataLoader(
backtranslation_dataset,
batch_size=2,
collate_fn=backtranslation_dataset.collater,
)
backtranslation_batch_result = next(iter(dataloader))
eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2
# Note that we sort by src_lengths and add left padding, so actually
# ids will look like: [1, 0]
expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]])
if remove_eos_from_output_src:
expected_src = expected_src[:, :-1]
expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
generated_src = backtranslation_batch_result["net_input"]["src_tokens"]
tgt_tokens = backtranslation_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_backtranslation_dataset_no_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False, remove_eos_from_output_src=True,
)
def test_backtranslation_dataset_with_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False, remove_eos_from_output_src=False,
)
def test_backtranslation_dataset_no_eos_in_input_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=True, remove_eos_from_output_src=False,
)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| 4,004 | 33.525862 | 90 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_sequence_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import torch
from fairseq import search
from fairseq.sequence_generator import SequenceGenerator
import tests.utils as test_utils
class TestSequenceGeneratorBase(unittest.TestCase):
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()**lenpen
self.assertLess(abs(score - hypo['score']), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
class TestSequenceGenerator(TestSequenceGeneratorBase):
def setUp(self):
self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model = (
test_utils.sequence_generator_setup()
)
self.sample = {
'net_input': {
'src_tokens': src_tokens, 'src_lengths': src_lengths,
},
}
def test_with_normalization(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
# Sentence 1: unchanged from the normalized case
# Sentence 2: beams swap order
generator = SequenceGenerator(self.tgt_dict, beam_size=2, normalize_scores=False)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_with_lenpen_favoring_short_hypos(self):
lenpen = 0.6
generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
def test_with_lenpen_favoring_long_hypos(self):
lenpen = 5.0
generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen)
def test_maxlen(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2, max_len_b=2)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
def test_encoder_with_different_output_len(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2, max_len_b=2)
args = self.model.encoder.args
task = test_utils.TestTranslationTask.setup_task(args, self.tgt_dict, self.tgt_dict)
reshaping_model = test_utils.TestReshapingModel.build_model(args, task)
hypos = generator.generate([reshaping_model], self.sample)
for sent in [0, 1]:
for beam in [0, 1]:
assert hypos[sent][beam]['attention'] is not None
class TestDiverseBeamSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor([
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
])
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]),
# step 1:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[0.0, unk, 0.6, 0.4],
[0.0, unk, 0.6, 0.4],
# sentence 2:
[0.25, unk, 0.35, 0.4],
[0.25, unk, 0.35, 0.4],
]),
# step 2:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
# sentence 2:
[0.9, unk, 0.1, 0.0],
[0.9, unk, 0.1, 0.0],
]),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_diverse_beam_search(self):
search_strategy = search.DiverseBeamSearch(self.tgt_dict, num_groups=2, diversity_strength=0.)
generator = SequenceGenerator(
self.tgt_dict, beam_size=2, search_strategy=search_strategy,
)
sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}}
hypos = generator.generate([self.model], sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9])
class TestDiverseSiblingsSearch(TestDiverseBeamSearch):
def assertHypoScore(
self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0
):
pos_scores = torch.FloatTensor(pos_probs).log()
pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate)
self.assertAlmostEqual(hypo["positional_scores"], pos_scores)
self.assertEqual(pos_scores.numel(), hypo["tokens"].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
self.assertLess(abs(score - hypo["score"]), 1e-6)
def test_diverse_beam_search(self):
search_strategy = search.DiverseSiblingsSearch(
self.tgt_dict, diversity_rate=0.5
)
generator = SequenceGenerator(
self.tgt_dict, beam_size=2, search_strategy=search_strategy
)
sample = {
"net_input": {
"src_tokens": self.src_tokens,
"src_lengths": self.src_lengths,
}
}
hypos = generator.generate([self.model], sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5)
class TestTopPSamplingSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor([
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
])
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.
# The minimal probability of top 2 tokens.
self.min_top2_prob = 0.75
# The minimal probability of the top 1 token.
self.min_top1_prob = 0.4
w1_prob = self.min_top1_prob
w2_prob = self.min_top2_prob - self.min_top1_prob
eos_prob = 1 - self.min_top2_prob
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
]),
# step 1:
torch.FloatTensor([
# eos w1 w2
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
]),
# step 2:
torch.FloatTensor([
# eos w1 w2
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
]),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_topp_sampling_search_low_prob(self):
# Given a prob low enough to top-P sampling, we expect only the top
# 1 token to be sampled, which always results in the same output.
low_sampling_topp = self.min_top1_prob/2.0
search_strategy = search.Sampling(self.tgt_dict, sampling_topp=low_sampling_topp)
generator = SequenceGenerator(
self.tgt_dict, beam_size=2, search_strategy=search_strategy)
sample = {
'net_input': {
'src_tokens': self.src_tokens,
'src_lengths': self.src_lengths
}
}
hypos = generator.generate([self.model], sample)
eos, w1 = self.eos, self.w1
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w1, eos])
self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0])
def test_topp_sampling_search_high_prob(self):
# Given a prob high enough to top-P sampling, any of the top 2
# tokens could be sampled. This can cause different outputs.
high_sampling_topp = (self.min_top1_prob+self.min_top2_prob)/2.0
search_strategy = search.Sampling(self.tgt_dict, sampling_topp=high_sampling_topp)
generator = SequenceGenerator(
self.tgt_dict, beam_size=2, search_strategy=search_strategy)
sample = {
'net_input': {
'src_tokens': self.src_tokens,
'src_lengths': self.src_lengths
}
}
hypos = generator.generate([self.model], sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertTrue(self.hypoTokens(hypos[0][0], [w1, w1, eos]) or
self.hypoTokens(hypos[0][0], [w1, w2, eos]))
self.assertTrue(self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or
self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0]))
# sentence 1, beam 2
self.assertTrue(self.hypoTokens(hypos[0][1], [w1, w1, eos]) or
self.hypoTokens(hypos[0][1], [w1, w2, eos]))
self.assertTrue(self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or
self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0]))
# sentence 2, beam 1
self.assertTrue(self.hypoTokens(hypos[1][0], [w1, w1, eos]) or
self.hypoTokens(hypos[1][0], [w1, w2, eos]))
self.assertTrue(self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or
self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0]))
# sentence 2, beam 2
self.assertTrue(self.hypoTokens(hypos[1][1], [w1, w1, eos]) or
self.hypoTokens(hypos[1][1], [w1, w2, eos]))
self.assertTrue(self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or
self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0]))
def hypoTokens(self, hypo, tokens):
return self.tensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.):
pos_scores = torch.FloatTensor(pos_probs).log()
if not self.almostEqual(hypo['positional_scores'], pos_scores):
return False
if pos_scores.numel() != hypo['tokens'].numel():
return False
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
return abs(score - hypo['score']) < 1e-6
def almostEqual(self, t1, t2):
return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4
def tensorEqual(self, t1, t2):
return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0
if __name__ == '__main__':
unittest.main()
| 17,439 | 40.035294 | 102 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_label_smoothing.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import unittest
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion
import tests.utils as test_utils
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, eos])},
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, w1, eos])},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.probs = torch.FloatTensor([
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]).unsqueeze(0).expand(2, 3, 7) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertLess(abs(nll_loss - nll_logging_output['loss']), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output['nll_loss']), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.assertAlmostEqual(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.assertAlmostEqual(loss, unreduced_loss.sum())
def test_zero_eps(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertAlmostEqual(nll_loss, smooth_loss)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == '__main__':
unittest.main()
| 4,235 | 41.36 | 101 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_convtbc.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import unittest
from fairseq.modules import ConvTBC
import torch.nn as nn
class TestConvTBC(unittest.TestCase):
def test_convtbc(self):
# ksz, in_channels, out_channels
conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
# out_channels, in_channels, ksz
conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)
conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
conv_tbc.bias.data.copy_(conv1d.bias.data)
input_tbc = torch.randn(7, 2, 4, requires_grad=True)
input1d = input_tbc.data.transpose(0, 1).transpose(1, 2)
input1d.requires_grad = True
output_tbc = conv_tbc(input_tbc)
output1d = conv1d(input1d)
self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)
grad_tbc = torch.randn(output_tbc.size())
grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()
output_tbc.backward(grad_tbc)
output1d.backward(grad1d)
self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == '__main__':
unittest.main()
| 1,679 | 33.285714 | 102 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_token_block_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import TokenBlockDataset
import tests.utils as test_utils
class TestTokenBlockDataset(unittest.TestCase):
def _build_dataset(self, data, **kwargs):
sizes = [len(x) for x in data]
underlying_ds = test_utils.TestDataset(data)
return TokenBlockDataset(underlying_ds, sizes, **kwargs)
def test_eos_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos')
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [1])
self.assertEqual(ds[2].tolist(), [8, 7, 6, 1])
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos')
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1])
self.assertEqual(ds[2].tolist(), [1])
def test_block_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='none')
self.assertEqual(ds[0].tolist(), [5, 4, 3])
self.assertEqual(ds[1].tolist(), [2, 1, 8])
self.assertEqual(ds[2].tolist(), [7, 6, 1])
self.assertEqual(ds[3].tolist(), [9, 1])
def test_complete_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=6, pad=0, eos=1, break_mode='complete')
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1])
data = [
torch.tensor([4, 3, 2, 1], dtype=torch.long),
torch.tensor([5, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([6, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='complete')
self.assertEqual(ds[0].tolist(), [4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [5, 1, 1])
self.assertEqual(ds[2].tolist(), [6, 1])
if __name__ == "__main__":
unittest.main()
| 2,970 | 36.607595 | 89 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_multi_corpus_sampled_dataset.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from collections import OrderedDict
import numpy as np
import torch
from fairseq.data import LanguagePairDataset, TokenBlockDataset
from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset
from tests.test_train import mock_dict
class TestMultiCorpusSampledDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, -1)
tokens_ds1 = TokenBlockDataset(
tokens_1,
sizes=[tokens_1.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_1 = LanguagePairDataset(
tokens_ds1, tokens_ds1.sizes, d, shuffle=False
)
tokens_2 = torch.LongTensor([2]).view(1, -1)
tokens_ds2 = TokenBlockDataset(
tokens_2,
sizes=[tokens_2.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_2 = LanguagePairDataset(
tokens_ds2, tokens_ds2.sizes, d, shuffle=False
)
def _test_sample_helper(
self,
expected_sample_from_first_ds_percentage,
num_samples=1000,
sampling_func=None,
):
# To make sure test is not flaky
np.random.seed(0)
if sampling_func is None:
m = MultiCorpusSampledDataset(
OrderedDict({0: self.dataset_1, 1: self.dataset_2}),
)
else:
m = MultiCorpusSampledDataset(
OrderedDict({0: self.dataset_1, 1: self.dataset_2}),
sampling_func=sampling_func,
)
m.ordered_indices()
count_sample_from_first_dataset = 0
for _ in range(num_samples):
if m.collater([m[0], m[1]])["net_input"]["src_tokens"][0] == 1:
count_sample_from_first_dataset += 1
sample_from_first_ds_percentage = (
1.0 * count_sample_from_first_dataset / num_samples
)
self.assertLess(
abs(
sample_from_first_ds_percentage
- expected_sample_from_first_ds_percentage
),
0.01,
)
def test_multi_corpus_sampled_dataset_uniform_sample(self):
self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5)
def test_multi_corpus_sampled_dataset_weighted_sample(self):
def naive_weighted_sample(weights):
def f(l):
v = np.random.random()
agg = 0
for i, weight in enumerate(weights):
agg += weight
if agg > v:
return i
return f
self._test_sample_helper(
expected_sample_from_first_ds_percentage=0.9,
sampling_func=naive_weighted_sample(weights=[0.9, 0.1]),
)
| 3,105 | 31.354167 | 79 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_bmuf.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from multiprocessing import Manager
import random
import unittest
import torch
import torch.nn as nn
from fairseq import distributed_utils, optim
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, input):
output = self.fc(input)
return output
def setup_model_loss_criterion(args, rank, is_cuda):
"""
setup model, criterion and optimizer based on input args
"""
args.distributed_rank = rank
distributed_utils.distributed_init(args)
torch.manual_seed(1)
model = Model(args.input_size, args.nb_classes)
loss_fn = nn.CrossEntropyLoss()
if is_cuda:
model = model.cuda()
loss_fn = loss_fn.cuda()
optimizer = optim.sgd.SGD(args, model.parameters())
optimizer = optim.FairseqBMUF(args, optimizer)
return model, loss_fn, optimizer
def train_step(input, target, model, loss_fn, optimizer, **unused):
"""Do forward, backward and parameter update."""
model.train()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
optimizer.step()
def single_gpu_training(args, rank, iterations, shared_results):
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.set_device(rank)
model, loss_fn, optimizer = setup_model_loss_criterion(args, rank, is_cuda)
for _ in range(iterations):
input = torch.randn(1, args.input_size)
target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes)
if is_cuda:
input = input.cuda()
target = target.cuda()
train_step(input, target, model, loss_fn, optimizer)
results = []
for param in model.parameters():
if len(results) == 0:
results = param.flatten().cpu().data
else:
results = torch.cat((results, param.flatten().cpu().data), 0)
shared_results[rank] = results
def setup_args():
args = argparse.Namespace()
args.global_sync_iter = 20
args.block_momentum = 0.875
args.block_lr = 0.5
args.input_size = 5
args.nb_classes = 2
args.batch_size = 1
args.lr = [1e-3]
args.momentum = 0
args.weight_decay = 0
args.warmup_iterations = 0
args.use_nbm = True
args.average_sync = True
args.global_sync_iter = 1
args.distributed_backend = "gloo"
args.distributed_world_size = 2
port = random.randint(10000, 20000)
args.distributed_init_method = "tcp://localhost:{port}".format(port=port)
args.distributed_init_host = "localhost"
args.distributed_port = port + 1
args.local_world_size = args.distributed_world_size
return args
@unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs")
class TestBMUF(unittest.TestCase):
def bmuf_process(self, args, iterations):
processes = []
results = Manager().dict()
ctx = torch.multiprocessing.get_context("spawn")
for rank in range(args.distributed_world_size):
p = ctx.Process(
target=single_gpu_training, args=(args, rank, iterations, results)
)
p.start()
processes.append(p)
for p in processes:
p.join()
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_bmuf_sync(self):
# Train model for 1 iteration and do bmuf sync without doing warmup
args = setup_args()
iterations = 1
self.bmuf_process(args, iterations)
def test_warmup_sync(self):
# Train model for 20 iteration and do warmup sync without doing bmuf sync
args = setup_args()
args.warmup_iterations = 20
iterations = 20
self.bmuf_process(args, iterations)
def test_warmup_sync_bmuf_sync(self):
# Train model for 25 iteration and do warmup sync after 20 iteration
# and bmuf sync after 25 iteration
args = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
iterations = 25
self.bmuf_process(args, iterations)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == '__main__':
unittest.main()
| 4,636 | 28.535032 | 88 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_dictionary.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import tempfile
import unittest
import torch
from fairseq.data import Dictionary
class TestDictionary(unittest.TestCase):
def test_finalize(self):
txt = [
'A B C D',
'B C D',
'C D',
'D',
]
ref_ids1 = list(map(torch.IntTensor, [
[4, 5, 6, 7, 2],
[5, 6, 7, 2],
[6, 7, 2],
[7, 2],
]))
ref_ids2 = list(map(torch.IntTensor, [
[7, 6, 5, 4, 2],
[6, 5, 4, 2],
[5, 4, 2],
[4, 2],
]))
# build dictionary
d = Dictionary()
for line in txt:
d.encode_line(line, add_if_not_exist=True)
def get_ids(dictionary):
ids = []
for line in txt:
ids.append(dictionary.encode_line(line, add_if_not_exist=False))
return ids
def assertMatch(ids, ref_ids):
for toks, ref_toks in zip(ids, ref_ids):
self.assertEqual(toks.size(), ref_toks.size())
self.assertEqual(0, (toks != ref_toks).sum().item())
ids = get_ids(d)
assertMatch(ids, ref_ids1)
# check finalized dictionary
d.finalize()
finalized_ids = get_ids(d)
assertMatch(finalized_ids, ref_ids2)
# write to disk and reload
with tempfile.NamedTemporaryFile(mode='w') as tmp_dict:
d.save(tmp_dict.name)
d = Dictionary.load(tmp_dict.name)
reload_ids = get_ids(d)
assertMatch(reload_ids, ref_ids2)
assertMatch(finalized_ids, reload_ids)
def test_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999 #fairseq:overwrite\n"
"<s> 999 #fairseq:overwrite\n"
"</s> 999 #fairseq:overwrite\n"
", 999\n"
"▁de 999\n"
)
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index('<pad>'), 1)
self.assertEqual(d.index('foo'), 3)
self.assertEqual(d.index('<unk>'), 4)
self.assertEqual(d.index('<s>'), 5)
self.assertEqual(d.index('</s>'), 6)
self.assertEqual(d.index(','), 7)
self.assertEqual(d.index('▁de'), 8)
def test_no_overwrite(self):
# for example, Camembert overwrites <unk>, <s> and </s>
dict_file = io.StringIO(
"<unk> 999\n"
"<s> 999\n"
"</s> 999\n"
", 999\n"
"▁de 999\n"
)
d = Dictionary()
with self.assertRaisesRegex(RuntimeError, 'Duplicate'):
d.add_from_file(dict_file)
def test_space(self):
# for example, character models treat space as a symbol
dict_file = io.StringIO(
" 999\n"
"a 999\n"
"b 999\n"
)
d = Dictionary()
d.add_from_file(dict_file)
self.assertEqual(d.index(' '), 4)
self.assertEqual(d.index('a'), 5)
self.assertEqual(d.index('b'), 6)
if __name__ == '__main__':
unittest.main()
| 3,336 | 27.521368 | 80 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq import utils
class TestUtils(unittest.TestCase):
def test_convert_padding_direction(self):
pad = 1
left_pad = torch.LongTensor([
[2, 3, 4, 5, 6],
[1, 7, 8, 9, 10],
[1, 1, 1, 11, 12],
])
right_pad = torch.LongTensor([
[2, 3, 4, 5, 6],
[7, 8, 9, 10, 1],
[11, 12, 1, 1, 1],
])
self.assertAlmostEqual(
right_pad,
utils.convert_padding_direction(
left_pad,
pad,
left_to_right=True,
),
)
self.assertAlmostEqual(
left_pad,
utils.convert_padding_direction(
right_pad,
pad,
right_to_left=True,
),
)
def test_make_positions(self):
pad = 1
left_pad_input = torch.LongTensor([
[9, 9, 9, 9, 9],
[1, 9, 9, 9, 9],
[1, 1, 1, 9, 9],
])
left_pad_output = torch.LongTensor([
[2, 3, 4, 5, 6],
[1, 2, 3, 4, 5],
[1, 1, 1, 2, 3],
])
right_pad_input = torch.LongTensor([
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 1],
[9, 9, 1, 1, 1],
])
right_pad_output = torch.LongTensor([
[2, 3, 4, 5, 6],
[2, 3, 4, 5, 1],
[2, 3, 1, 1, 1],
])
self.assertAlmostEqual(
left_pad_output,
utils.make_positions(left_pad_input, pad),
)
self.assertAlmostEqual(
right_pad_output,
utils.make_positions(right_pad_input, pad),
)
def test_clip_grad_norm_(self):
params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, 0.0)
params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)]
for p in params:
p.grad = torch.full((5,), fill_value=2)
grad_norm = utils.clip_grad_norm_(params, 1.0)
exp_grad_norm = torch.full((15,), fill_value=2).norm()
self.assertTrue(torch.is_tensor(grad_norm))
self.assertEqual(grad_norm, exp_grad_norm)
grad_norm = utils.clip_grad_norm_(params, 1.0)
self.assertAlmostEqual(grad_norm, torch.tensor(1.0))
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4)
if __name__ == '__main__':
unittest.main()
| 2,878 | 27.50495 | 73 | py |
BIFI | BIFI-main/utils/fairseq/tests/test_character_token_embedder.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import unittest
from fairseq.data import Dictionary
from fairseq.modules import CharacterTokenEmbedder
class TestCharacterTokenEmbedder(unittest.TestCase):
def test_character_token_embedder(self):
vocab = Dictionary()
vocab.add_symbol('hello')
vocab.add_symbol('there')
embedder = CharacterTokenEmbedder(vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2)
test_sents = [['hello', 'unk', 'there'], ['there'], ['hello', 'there']]
max_len = max(len(s) for s in test_sents)
input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad())
for i in range(len(test_sents)):
input[i][0] = vocab.eos()
for j in range(len(test_sents[i])):
input[i][j + 1] = vocab.index(test_sents[i][j])
input[i][j + 2] = vocab.eos()
embs = embedder(input)
assert embs.size() == (len(test_sents), max_len + 2, 5)
self.assertAlmostEqual(embs[0][0], embs[1][0])
self.assertAlmostEqual(embs[0][0], embs[0][-1])
self.assertAlmostEqual(embs[0][1], embs[2][1])
self.assertAlmostEqual(embs[0][3], embs[1][1])
embs.sum().backward()
assert embedder.char_embeddings.weight.grad is not None
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == '__main__':
unittest.main()
| 1,656 | 34.255319 | 96 | py |
BIFI | BIFI-main/utils/fairseq/tests/speech_recognition/asr_test_base.py | #!/usr/bin/env python3
import argparse
import os
import unittest
from inspect import currentframe, getframeinfo
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
from fairseq.data.dictionary import Dictionary
from fairseq.models import (
BaseFairseqModel,
FairseqDecoder,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqModel,
)
from fairseq.tasks.fairseq_task import FairseqTask
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
DEFAULT_TEST_VOCAB_SIZE = 100
# ///////////////////////////////////////////////////////////////////////////
# utility function to setup dummy dict/task/input
# ///////////////////////////////////////////////////////////////////////////
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
class DummyTask(FairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.tgt_dict = self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
def get_dummy_input(T=100, D=80, B=5, K=100):
forward_input = {}
# T max sequence length
# D feature vector dimension
# B batch size
# K target dimension size
feature = torch.randn(B, T, D)
# this (B, T, D) layout is just a convention, you can override it by
# write your own _prepare_forward_input function
src_lengths = torch.from_numpy(
np.random.randint(low=1, high=T, size=B, dtype=np.int64)
)
src_lengths[0] = T # make sure the maximum length matches
prev_output_tokens = []
for b in range(B):
token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1)
tokens = np.random.randint(low=0, high=K, size=token_length, dtype=np.int64)
prev_output_tokens.append(torch.from_numpy(tokens))
prev_output_tokens = fairseq_data_utils.collate_tokens(
prev_output_tokens,
pad_idx=1,
eos_idx=2,
left_pad=False,
move_eos_to_beginning=False,
)
src_lengths, sorted_order = src_lengths.sort(descending=True)
forward_input["src_tokens"] = feature.index_select(0, sorted_order)
forward_input["src_lengths"] = src_lengths
forward_input["prev_output_tokens"] = prev_output_tokens
return forward_input
def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)):
"""
This only provides an example to generate dummy encoder output
"""
(T, B, D) = encoder_out_shape
encoder_out = {}
encoder_out["encoder_out"] = torch.from_numpy(
np.random.randn(*encoder_out_shape).astype(np.float32)
)
seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B))
# some dummy mask
encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand(
B, -1
) >= seq_lengths.view(B, 1).expand(-1, T)
encoder_out["encoder_padding_mask"].t_()
# encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate
# whether encoder_out[t, b] is valid (=0) or not (=1)
return encoder_out
def _current_postion_info():
cf = currentframe()
frameinfo = " (at {}:{})".format(
os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno
)
return frameinfo
def check_encoder_output(encoder_output, batch_size=None):
"""we expect encoder_output to be a dict with the following
key/value pairs:
- encoder_out: a Torch.Tensor
- encoder_padding_mask: a binary Torch.Tensor
"""
if not isinstance(encoder_output, dict):
msg = (
"FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info()
)
return False, msg
if "encoder_out" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_out"
+ _current_postion_info()
)
return False, msg
if "encoder_padding_mask" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_padding_mask"
+ _current_postion_info()
)
return False, msg
if not isinstance(encoder_output["encoder_out"], torch.Tensor):
msg = "encoder_out must be a torch.Tensor" + _current_postion_info()
return False, msg
if encoder_output["encoder_out"].dtype != torch.float32:
msg = "encoder_out must have float32 dtype" + _current_postion_info()
return False, msg
mask = encoder_output["encoder_padding_mask"]
if mask is not None:
if not isinstance(mask, torch.Tensor):
msg = (
"encoder_padding_mask must be a torch.Tensor" + _current_postion_info()
)
return False, msg
if (
mask.dtype != torch.uint8
and (not hasattr(torch, 'bool') or mask.dtype != torch.bool)
):
msg = (
"encoder_padding_mask must have dtype of uint8"
+ _current_postion_info()
)
return False, msg
if mask.dim() != 2:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)"
+ _current_postion_info()
)
return False, msg
if batch_size is not None and mask.size(1) != batch_size:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, with size(1)"
+ " being the batch size"
+ _current_postion_info()
)
return False, msg
return True, None
def check_decoder_output(decoder_output):
"""we expect output from a decoder is a tuple with the following constraint:
- the first element is a torch.Tensor
- the second element can be anything (reserved for future use)
"""
if not isinstance(decoder_output, tuple):
msg = "FariseqDecoder output must be a tuple" + _current_postion_info()
return False, msg
if len(decoder_output) != 2:
msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info()
return False, msg
if not isinstance(decoder_output[0], torch.Tensor):
msg = (
"FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info()
)
return False, msg
return True, None
# ///////////////////////////////////////////////////////////////////////////
# Base Test class
# ///////////////////////////////////////////////////////////////////////////
class TestBaseFairseqModelBase(unittest.TestCase):
"""
This class is used to facilitate writing unittest for any class derived from
`BaseFairseqModel`.
"""
@classmethod
def setUpClass(cls):
if cls is TestBaseFairseqModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model):
self.assertTrue(isinstance(model, BaseFairseqModel))
self.model = model
def setupInput(self):
pass
def setUp(self):
self.model = None
self.forward_input = None
pass
class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase):
"""
base code to test FairseqEncoderDecoderModel (formally known as
`FairseqModel`) must be derived from this base class
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderDecoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)),
msg="This class only tests for FairseqModel subclasses",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
def setUp(self):
super().setUp()
def test_forward(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
# for FairseqEncoderDecoderModel, forward returns a tuple of two
# elements, the first one is a Torch.Tensor
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderModelBase(TestBaseFairseqModelBase):
"""
base class to test FairseqEncoderModel
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, FairseqEncoderModel),
msg="This class is only used for testing FairseqEncoderModel",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
super().setUp()
def test_forward(self):
if self.forward_input and self.model:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.model.forward(**self.forward_input)
# we expect forward_output to be a dict with the following
# key/value pairs:
# - encoder_out: a Torch.Tensor
# - encoder_padding_mask: a binary Torch.Tensor
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderBase(unittest.TestCase):
"""
base class to test FairseqEncoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpEncoder(self, encoder):
self.assertTrue(
isinstance(encoder, FairseqEncoder),
msg="This class is only used for test FairseqEncoder",
)
self.encoder = encoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
self.encoder = None
self.forward_input = None
def test_forward(self):
if self.encoder and self.forward_input:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.encoder.forward(**self.forward_input)
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
class TestFairseqDecoderBase(unittest.TestCase):
"""
base class to test FairseqDecoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqDecoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpDecoder(self, decoder):
self.assertTrue(
isinstance(decoder, FairseqDecoder),
msg="This class is only used for test FairseqDecoder",
)
self.decoder = decoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_encoder_output() if input is None else input
def setUpPrevOutputTokens(self, tokens=None):
if tokens is None:
self.encoder_input = get_dummy_input()
self.prev_output_tokens = self.encoder_input["prev_output_tokens"]
else:
self.prev_output_tokens = tokens
def setUp(self):
self.decoder = None
self.forward_input = None
self.prev_output_tokens = None
def test_forward(self):
if (
self.decoder is not None
and self.forward_input is not None
and self.prev_output_tokens is not None
):
forward_output = self.decoder.forward(
prev_output_tokens=self.prev_output_tokens,
encoder_out=self.forward_input,
)
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_input = forward_output
class DummyEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@classmethod
def build_model(cls, args, task):
return cls(DummyEncoder())
def get_logits(self, net_output):
# Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as
# F.binary_cross_entropy_with_logits combines sigmoid and CE
return torch.log(
torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"])
)
def get_normalized_probs(self, net_output, log_probs, sample=None):
lprobs = super().get_normalized_probs(net_output, log_probs, sample=sample)
lprobs.batch_first = True
return lprobs
class DummyEncoder(FairseqEncoder):
def __init__(self):
super().__init__(None)
def forward(self, src_tokens, src_lengths):
mask, max_len = lengths_to_encoder_padding_mask(src_lengths)
return {"encoder_out": src_tokens, "encoder_padding_mask": mask}
class CrossEntropyCriterionTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
if cls is CrossEntropyCriterionTestBase:
raise unittest.SkipTest("Skipping base class test case")
super().setUpClass()
def setUpArgs(self):
args = argparse.Namespace()
args.sentence_avg = False
args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion
return args
def setUp(self):
args = self.setUpArgs()
self.model = DummyEncoderModel(encoder=DummyEncoder())
self.criterion = self.criterion_cls.build_criterion(args=args, task=DummyTask(args))
def get_src_tokens(self, correct_prediction, aggregate):
"""
correct_prediction: True if the net_output (src_tokens) should
predict the correct target
aggregate: True if the criterion expects net_output (src_tokens)
aggregated across time axis
"""
predicted_idx = 0 if correct_prediction else 1
if aggregate:
src_tokens = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
src_tokens[b][predicted_idx] = 1.0
else:
src_tokens = torch.zeros((2, 10, 2), dtype=torch.float)
for b in range(2):
for t in range(10):
src_tokens[b][t][predicted_idx] = 1.0
return src_tokens
def get_target(self, soft_target):
if soft_target:
target = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
target[b][0] = 1.0
else:
target = torch.zeros((2, 10), dtype=torch.long)
return target
def get_test_sample(self, correct, soft_target, aggregate):
src_tokens = self.get_src_tokens(correct, aggregate)
target = self.get_target(soft_target)
L = src_tokens.size(1)
return {
"net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])},
"target": target,
"ntokens": src_tokens.size(0) * src_tokens.size(1),
}
| 19,491 | 33.9319 | 92 | py |
BIFI | BIFI-main/utils/fairseq/tests/speech_recognition/test_collaters.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from examples.speech_recognition.data.collaters import Seq2SeqCollater
class TestSeq2SeqCollator(unittest.TestCase):
def test_collate(self):
eos_idx = 1
pad_idx = 0
collater = Seq2SeqCollater(
feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx
)
# 2 frames in the first sample and 3 frames in the second one
frames1 = np.array([[7, 8], [9, 10]])
frames2 = np.array([[1, 2], [3, 4], [5, 6]])
target1 = np.array([4, 2, 3, eos_idx])
target2 = np.array([3, 2, eos_idx])
sample1 = {"id": 0, "data": [frames1, target1]}
sample2 = {"id": 1, "data": [frames2, target2]}
batch = collater.collate([sample1, sample2])
# collate sort inputs by frame's length before creating the batch
self.assertTensorEqual(batch["id"], torch.tensor([1, 0]))
self.assertEqual(batch["ntokens"], 7)
self.assertTensorEqual(
batch["net_input"]["src_tokens"],
torch.tensor(
[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]]
),
)
self.assertTensorEqual(
batch["net_input"]["prev_output_tokens"],
torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]),
)
self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2]))
self.assertTensorEqual(
batch["target"],
torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]),
)
self.assertEqual(batch["nsentences"], 2)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| 2,048 | 33.728814 | 87 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/checkpoint_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import logging
import os
import re
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.file_io import PathManager
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
logger = logging.getLogger(__name__)
def save_checkpoint(args, trainer, epoch_itr, val_loss):
from fairseq import distributed_utils, meters
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if args.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if args.no_save or not distributed_utils.is_master(args):
return
def is_better(a, b):
return a >= b if args.maximize_best_checkpoint_metric else a <= b
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds["checkpoint{}.pt".format(epoch)] = (
end_of_epoch
and not args.no_epoch_checkpoints
and epoch % args.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}.pt".format(epoch, updates)] = (
not end_of_epoch
and args.save_interval_updates > 0
and updates % args.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best.pt"] = val_loss is not None and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
if val_loss is not None and args.keep_best_checkpoints > 0:
checkpoint_conds["checkpoint.best_{}_{:.2f}.pt".format(
args.best_checkpoint_metric, val_loss)] = (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
checkpoint_conds["checkpoint_last.pt"] = not args.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
checkpoints = [
os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
PathManager.copy(checkpoints[0], cp, overwrite=True)
write_timer.stop()
logger.info(
"saved checkpoint {} (epoch {} @ {} updates, score {}) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, val_loss, write_timer.sum
)
)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[args.keep_interval_updates :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[args.keep_last_epochs :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_best_checkpoints > 0:
# only keep the best N checkpoints according to validation metric
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint\.best_{}_(\d+\.?\d*)\.pt".format(args.best_checkpoint_metric))
if not args.maximize_best_checkpoint_metric:
checkpoints = checkpoints[::-1]
for old_chk in checkpoints[args.keep_best_checkpoints:]:
if os.path.lexists(old_chk):
os.remove(old_chk)
def load_checkpoint(args, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
# only one worker should attempt to create the required dir
if args.distributed_rank == 0:
os.makedirs(args.save_dir, exist_ok=True)
if args.restore_file == "checkpoint_last.pt":
checkpoint_path = os.path.join(args.save_dir, "checkpoint_last.pt")
else:
checkpoint_path = args.restore_file
extra_state = trainer.load_checkpoint(
checkpoint_path,
args.reset_optimizer,
args.reset_lr_scheduler,
eval(args.optimizer_overrides),
reset_meters=args.reset_meters,
)
if (
extra_state is not None
and "best" in extra_state
and not args.reset_optimizer
and not args.reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not args.reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state["epoch"], load_dataset=True, **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(
epoch=1, load_dataset=True, **passthrough_args
)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr
def load_checkpoint_to_cpu(path, arg_overrides=None):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility)."""
with PathManager.open(path, "rb") as f:
state = torch.load(
f, map_location=lambda s, l: default_restore_location(s, "cpu")
)
args = state["args"]
if arg_overrides is not None:
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
state = _upgrade_state_dict(state)
return state
def load_model_ensemble(filenames, arg_overrides=None, task=None, strict=True):
"""Loads an ensemble of models.
Args:
filenames (List[str]): checkpoint files to load
arg_overrides (Dict[str,Any], optional): override model args that
were used during model training
task (fairseq.tasks.FairseqTask, optional): task to use for loading
"""
ensemble, args, _task = load_model_ensemble_and_task(
filenames, arg_overrides, task, strict
)
return ensemble, args
def load_model_ensemble_and_task(filenames, arg_overrides=None, task=None, strict=True):
from fairseq import tasks
ensemble = []
for filename in filenames:
if not PathManager.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = load_checkpoint_to_cpu(filename, arg_overrides)
args = state["args"]
if task is None:
task = tasks.setup_task(args)
# build model for ensemble
model = task.build_model(args)
model.load_state_dict(state["model"], strict=strict, args=args)
ensemble.append(model)
return ensemble, args, task
def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = float(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def torch_persistent_save(*args, **kwargs):
for i in range(3):
try:
return torch.save(*args, **kwargs)
except Exception:
if i == 2:
logger.error(traceback.format_exc())
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_state(
filename,
args,
model_state_dict,
criterion,
optimizer,
lr_scheduler,
num_updates,
optim_history=None,
extra_state=None,
):
from fairseq import utils
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
"args": args,
"model": model_state_dict if model_state_dict else {},
"optimizer_history": optim_history
+ [
{
"criterion_name": criterion.__class__.__name__,
"optimizer_name": optimizer.__class__.__name__,
"lr_scheduler_state": lr_scheduler.state_dict(),
"num_updates": num_updates,
}
],
"extra_state": extra_state,
}
if utils.has_parameters(criterion):
state_dict["criterion"] = criterion.state_dict()
if not args.no_save_optimizer_state:
state_dict["last_optimizer_state"] = convert_state_dict_type(
optimizer.state_dict()
)
with PathManager.open(filename, "wb") as f:
torch_persistent_save(state_dict, f)
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
from fairseq import models, registry, tasks
# add optimizer_history
if "optimizer_history" not in state:
state["optimizer_history"] = [
{"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]}
]
state["last_optimizer_state"] = state["optimizer"]
del state["optimizer"]
del state["best_loss"]
# move extra_state into sub-dictionary
if "epoch" in state and "extra_state" not in state:
state["extra_state"] = {
"epoch": state["epoch"],
"batch_offset": state["batch_offset"],
"val_loss": state["val_loss"],
}
del state["epoch"]
del state["batch_offset"]
del state["val_loss"]
# reduce optimizer history's memory usage (only keep the last state)
if "optimizer" in state["optimizer_history"][-1]:
state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"]
for optim_hist in state["optimizer_history"]:
del optim_hist["optimizer"]
# record the optimizer class name
if "optimizer_name" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG"
# move best_loss into lr_scheduler_state
if "lr_scheduler_state" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["lr_scheduler_state"] = {
"best": state["optimizer_history"][-1]["best_loss"]
}
del state["optimizer_history"][-1]["best_loss"]
# keep track of number of updates
if "num_updates" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["num_updates"] = 0
# old model checkpoints may not have separate source/target positions
if hasattr(state["args"], "max_positions") and not hasattr(
state["args"], "max_source_positions"
):
state["args"].max_source_positions = state["args"].max_positions
state["args"].max_target_positions = state["args"].max_positions
# use stateful training data iterator
if "train_iterator" not in state["extra_state"]:
state["extra_state"]["train_iterator"] = {
"epoch": state["extra_state"]["epoch"],
"iterations_in_epoch": state["extra_state"].get("batch_offset", 0),
}
# default to translation task
if not hasattr(state["args"], "task"):
state["args"].task = "translation"
# --raw-text and --lazy-load are deprecated
if getattr(state["args"], "raw_text", False):
state["args"].dataset_impl = "raw"
elif getattr(state["args"], "lazy_load", False):
state["args"].dataset_impl = "lazy"
# epochs start at 1
if state["extra_state"]["train_iterator"] is not None:
state["extra_state"]["train_iterator"]["epoch"] = max(
state["extra_state"]["train_iterator"].get("epoch", 1),
1,
)
# set any missing default values in the task, model or other registries
registry.set_defaults(state["args"], tasks.TASK_REGISTRY[state["args"].task])
registry.set_defaults(state["args"], models.ARCH_MODEL_REGISTRY[state["args"].arch])
for registry_name, REGISTRY in registry.REGISTRIES.items():
choice = getattr(state["args"], registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
registry.set_defaults(state["args"], cls)
return state
def prune_state_dict(state_dict, args):
"""Prune the given state_dict if desired for LayerDrop
(https://arxiv.org/abs/1909.11556).
Training with LayerDrop allows models to be robust to pruning at inference
time. This function prunes state_dict to allow smaller models to be loaded
from a larger model and re-maps the existing state_dict for this to occur.
It's called by functions that load models from checkpoints and does not
need to be called directly.
"""
if not args or args.arch == "ptt_transformer":
# args should not be none, but don't crash if it is.
return state_dict
encoder_layers_to_keep = (
args.encoder_layers_to_keep if "encoder_layers_to_keep" in vars(args) else None
)
decoder_layers_to_keep = (
args.decoder_layers_to_keep if "decoder_layers_to_keep" in vars(args) else None
)
if not encoder_layers_to_keep and not decoder_layers_to_keep:
return state_dict
# apply pruning
logger.info(
"Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
)
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted(
[int(layer_string) for layer_string in layers_to_keep.split(",")]
)
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile("^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
return {"substitution_regex": regex, "mapping_dict": mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search("\.layers\.(\d+)\.", layer_name)
# if layer has no number in it, it is a supporting layer, such as an
# embedding
if not match:
new_state_dict[layer_name] = state_dict[layer_name]
continue
# otherwise, layer should be pruned.
original_layer_number = match.group(1)
# figure out which mapping dict to replace from
for pruning_pass in pruning_passes:
if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
"substitution_regex"
].search(layer_name):
new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
substitution_match = pruning_pass["substitution_regex"].search(
layer_name
)
new_state_key = (
layer_name[: substitution_match.start(1)]
+ new_layer_number
+ layer_name[substitution_match.end(1) :]
)
new_state_dict[new_state_key] = state_dict[layer_name]
# Since layers are now pruned, *_layers_to_keep are no longer needed.
# This is more of "It would make it work fix" rather than a proper fix.
if "encoder_layers_to_keep" in vars(args):
args.encoder_layers_to_keep = None
if "decoder_layers_to_keep" in vars(args):
args.decoder_layers_to_keep = None
return new_state_dict
def load_pretrained_component_from_model(
component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str
):
"""
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
provided `component` object. If state_dict fails to load, there may be a
mismatch in the architecture of the corresponding `component` found in the
`checkpoint` file.
"""
if not PathManager.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = "encoder"
elif isinstance(component, FairseqDecoder):
component_type = "decoder"
else:
raise ValueError(
"component to load must be either a FairseqEncoder or "
"FairseqDecoder. Loading other component types are not supported."
)
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
# encoder.input_layers.0.0.weight --> input_layers.0.0.weight
component_subkey = key[len(component_type) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return component
def verify_checkpoint_directory(save_dir: str) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, "dummy")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
logger.warning("Unable to access checkpoint save directory: {}".format(save_dir))
raise e
else:
os.remove(temp_file_path)
| 18,554 | 36.035928 | 114 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import importlib.util
import logging
import math
import os
import sys
import warnings
from collections import defaultdict
from itertools import accumulate
from typing import Callable, Dict, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.logging.meters import safe_round
from fairseq.modules import gelu, gelu_accurate
from fairseq.modules.multihead_attention import MultiheadAttention
from torch import Tensor
logger = logging.getLogger(__name__)
def split_paths(paths: str) -> List[str]:
return paths.split(os.pathsep) if "://" not in paths else paths.split("|")
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
"utils.load_ensemble_for_inference is deprecated. "
"Please use checkpoint_utils.load_model_ensemble instead."
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task
)
def apply_to_sample(f, sample):
if hasattr(sample, '__len__') and len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {key: _apply(value) for key, value in x.items()}
elif isinstance(x, list):
return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def move_to_cuda(sample):
def _move_to_cuda(tensor):
return tensor.cuda()
return apply_to_sample(_move_to_cuda, sample)
def move_to_cpu(sample):
def _move_to_cpu(tensor):
return tensor.cpu()
return apply_to_sample(_move_to_cpu, sample)
def get_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
return module.get_incremental_state(incremental_state, key)
def set_incremental_state(
module: MultiheadAttention,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
result = module.set_incremental_state(incremental_state, key, value)
if result is not None:
incremental_state = result
return incremental_state
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, "r") as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
logger.info("found {}/{} types in embedding file".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor(
[float(weight) for weight in pieces[1:]]
)
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ["<eos>"]
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return " ".join(hypo_tokens)
def post_process_prediction(
hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None, extra_symbols_to_ignore=None
):
hypo_str = tgt_dict.string(hypo_tokens, remove_bpe, extra_symbols_to_ignore=extra_symbols_to_ignore)
if align_dict is not None:
hypo_str = replace_unk(
hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()
)
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx: int, onnx_trace: bool = False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, "buf"):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
buffered_arange.buf.resize_(max)
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(
src_tokens, padding_idx, right_to_left=False, left_to_right=False
):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
range = buffered_arange(max_len).type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, "item"):
return tensor.item()
if hasattr(tensor, "__getitem__"):
return tensor[0]
return tensor
def clip_grad_norm_(params, max_norm, aggregate_norm_fn=None) -> torch.Tensor:
if isinstance(params, torch.Tensor):
params = [params]
params = list(params)
grads = [p.grad.detach() for p in filter(lambda p: p.grad is not None, params)]
if len(grads) == 0:
if len(params) > 0:
return params[0].new_tensor(0.)
else:
return torch.tensor(0.)
total_norm = torch.norm(torch.stack([torch.norm(g) for g in grads]))
if aggregate_norm_fn is not None:
total_norm = aggregate_norm_fn(total_norm)
if max_norm > 0:
max_norm = float(max_norm)
clip_coef = (max_norm / (total_norm + 1e-6)).clamp_(max=1)
for g in grads:
g.mul_(clip_coef)
return total_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return (arg_number, arg_number)
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
max_positions, arg = _match_types(max_positions, arg)
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(map(nullsafe_min, zip(max_positions, arg)))
return max_positions
def import_user_module(args):
module_path = getattr(args, "user_dir", None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path):
fairseq_rel_path = os.path.join(
os.path.dirname(__file__), "..", args.user_dir
)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
sys.path.pop(0)
def softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim: int, onnx_trace: bool = False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss, round=2, base=2):
if loss is None:
return 0.
try:
return safe_round(base ** loss, round)
except OverflowError:
return float('inf')
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
deprecation_warning(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
"relu",
"gelu",
"gelu_fast", # deprecated
"gelu_accurate",
"tanh",
"linear",
]
@contextlib.contextmanager
def eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def set_torch_seed(seed):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
assert isinstance(seed, int)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
@contextlib.contextmanager
def with_torch_seed(seed):
assert isinstance(seed, int)
rng_state = torch.get_rng_state()
cuda_rng_state = torch.cuda.get_rng_state()
set_torch_seed(seed)
yield
torch.set_rng_state(rng_state)
torch.cuda.set_rng_state(cuda_rng_state)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split("-")
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = ((tgt_sent != pad) & (tgt_sent != eos)).nonzero().squeeze(dim=-1)
src_invalid = ((src_sent == pad) | (src_sent == eos)).nonzero().squeeze(dim=-1)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float("-inf")
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append(
(
src_token_to_word[src_idx.item()] - 1,
tgt_token_to_word[tgt_idx.item()] - 1,
)
)
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
| 15,998 | 31.062124 | 111 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/hub_utils.py | #!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import logging
import os
from typing import List, Dict, Iterator, Tuple, Any
import torch
from torch import nn
from fairseq import utils
from fairseq.data import encoders
logger = logging.getLogger(__name__)
def from_pretrained(
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == 'checkpoint_file':
checkpoint_file = v
elif (
k != 'path'
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
'code': 'bpe_codes',
'bpecodes': 'bpe_codes',
'sentencepiece.bpe.model': 'sentencepiece_vocab',
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if 'user_dir' in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(os.pathsep)],
arg_overrides=kwargs,
)
return {
'args': args,
'task': task,
'models': models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.make_generation_fast_(
beamable_mm_beam_size=(
None if getattr(args, 'no_beamable_mm', False)
else getattr(args, 'beam', 5)
),
need_attn=getattr(args, 'print_alignment', False),
)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
self.max_positions = utils.resolve_max_positions(
self.task.max_positions(), *[model.max_positions() for model in models]
)
# this is useful for determining the device
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(self, sentences: List[str], beam: int = 5, verbose: bool = False, **kwargs) -> List[str]:
return self.sample(sentences, beam, verbose, **kwargs)
def sample(self, sentences: List[str], beam: int = 1, verbose: bool = False, **kwargs) -> List[str]:
if isinstance(sentences, str):
return self.sample([sentences], beam=beam, verbose=verbose, **kwargs)[0]
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
return [self.decode(hypos[0]['tokens']) for hypos in batched_hypos]
def score(self, sentences: List[str], **kwargs):
if isinstance(sentences, str):
return self.score([sentences], **kwargs)[0]
# NOTE: this doesn't support translation tasks currently
tokenized_sentences = [self.encode(sentence) for sentence in sentences]
return [hypos[0] for hypos in self.generate(tokenized_sentences, score_reference=True, **kwargs)]
def generate(
self,
tokenized_sentences: List[torch.LongTensor],
beam: int = 5,
verbose: bool = False,
skip_invalid_size_inputs=False,
**kwargs
) -> List[List[Dict[str, torch.Tensor]]]:
if torch.is_tensor(tokenized_sentences) and tokenized_sentences.dim() == 1:
return self.generate(
tokenized_sentences.unsqueeze(0), beam=beam, verbose=verbose, **kwargs
)[0]
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(gen_args)
results = []
for batch in self._build_batches(tokenized_sentences, skip_invalid_size_inputs):
batch = utils.apply_to_sample(lambda t: t.to(self.device), batch)
translations = self.task.inference_step(generator, self.models, batch)
for id, hypos in zip(batch["id"].tolist(), translations):
results.append((id, hypos))
# sort output to match input order
outputs = [hypos for _, hypos in sorted(results, key=lambda x: x[0])]
if verbose:
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
for source_tokens, target_hypotheses in zip(tokenized_sentences, outputs):
src_str_with_unk = self.string(source_tokens)
logger.info('S\t{}'.format(src_str_with_unk))
for hypo in target_hypotheses:
hypo_str = self.decode(hypo['tokens'])
logger.info('H\t{}\t{}'.format(hypo['score'], hypo_str))
logger.info('P\t{}'.format(
' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist()))
))
if hypo['alignment'] is not None and getarg('print_alignment', False):
logger.info('A\t{}'.format(
' '.join(map(lambda x: str(utils.item(x)), hypo['alignment'].int().cpu()))
))
return outputs
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_batches(
self, tokens: List[List[int]], skip_invalid_size_inputs: bool
) -> Iterator[Dict[str, Any]]:
lengths = torch.LongTensor([t.numel() for t in tokens])
batch_iterator = self.task.get_batch_iterator(
dataset=self.task.build_dataset_for_inference(tokens, lengths),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=self.max_positions,
ignore_invalid_inputs=skip_invalid_size_inputs,
).next_epoch_itr(shuffle=False)
return batch_iterator
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| 10,125 | 36.227941 | 107 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/sequence_scorer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sys
from fairseq import utils
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(self, tgt_dict, softmax_batch=None, compute_alignment=False, eos=None):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos() if eos is None else eos
self.softmax_batch = softmax_batch or sys.maxsize
assert self.softmax_batch > 0
self.compute_alignment = compute_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample['net_input']
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample['target']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model(**net_input)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get('attn', None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample['target'] = tgt
curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample['target'] = orig_target
probs = probs.view(sample['target'].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None and torch.is_tensor(attn):
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \
if sample['target'] is not None else None
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
if self.compute_alignment:
alignment = utils.extract_hard_alignment(
avg_attn_i,
sample['net_input']['src_tokens'][i],
sample['target'][i],
self.pad,
self.eos,
)
else:
alignment = None
else:
avg_attn_i = alignment = None
hypos.append([{
'tokens': ref,
'score': score_i,
'attention': avg_attn_i,
'alignment': alignment,
'positional_scores': avg_probs_i,
}])
return hypos
| 4,835 | 36.78125 | 107 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/binarizer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from collections import Counter
from fairseq.tokenizer import tokenize_line
import torch
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
class Binarizer:
@staticmethod
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(filename, "r", encoding="utf-8") as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
if already_numberized:
id_strings = line.strip().split()
id_list = [int(id_string) for id_string in id_strings]
if reverse_order:
id_list.reverse()
if append_eos:
id_list.append(dict.eos())
ids = torch.IntTensor(id_list)
else:
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {
"nseq": nseq,
"nunk": sum(replaced.values()),
"ntok": ntok,
"replaced": replaced,
}
@staticmethod
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(filename, "r") as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {"nseq": nseq}
@staticmethod
def find_offsets(filename, num_chunks):
with open(filename, "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
| 3,231 | 29.780952 | 84 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/distributed_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import pickle
import socket
import struct
import subprocess
import warnings
from collections import OrderedDict
from typing import Any, Dict, Mapping
import torch
import torch.distributed as dist
from fairseq import utils
logger = logging.getLogger(__name__)
def is_master(args):
return args.distributed_rank == 0
def infer_init_method(args):
if args.distributed_init_method is not None:
return
# support torch.distributed.launch
if all(key in os.environ for key in [
'MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'RANK'
]):
args.distributed_init_method = 'env://'
args.distributed_world_size = int(os.environ['WORLD_SIZE'])
args.distributed_rank = int(os.environ['RANK'])
# we can determine the init method automatically for Slurm
elif args.distributed_port > 0:
node_list = os.environ.get('SLURM_STEP_NODELIST')
if node_list is None:
node_list = os.environ.get('SLURM_JOB_NODELIST')
if node_list is not None:
try:
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list])
args.distributed_init_method = 'tcp://{host}:{port}'.format(
host=hostnames.split()[0].decode('utf-8'),
port=args.distributed_port,
)
nnodes = int(os.environ.get('SLURM_NNODES'))
ntasks_per_node = os.environ.get('SLURM_NTASKS_PER_NODE')
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get('SLURM_NTASKS'))
nnodes = int(os.environ.get('SLURM_NNODES'))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
assert args.distributed_world_size % nnodes == 0
gpus_per_node = args.distributed_world_size // nnodes
node_id = int(os.environ.get('SLURM_NODEID'))
args.distributed_rank = node_id * gpus_per_node
else:
assert ntasks_per_node == args.distributed_world_size // nnodes
args.distributed_no_spawn = True
args.distributed_rank = int(os.environ.get('SLURM_PROCID'))
args.device_id = int(os.environ.get('SLURM_LOCALID'))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
def distributed_init(args):
if args.distributed_world_size == 1:
raise ValueError('Cannot initialize distributed with distributed_world_size=1')
if torch.distributed.is_initialized():
warnings.warn('Distributed is already initialized, cannot initialize twice!')
else:
logger.info('distributed init (rank {}): {}'.format(
args.distributed_rank, args.distributed_init_method,
))
dist.init_process_group(
backend=args.distributed_backend,
init_method=args.distributed_init_method,
world_size=args.distributed_world_size,
rank=args.distributed_rank,
)
logger.info('initialized host {} as rank {}'.format(
socket.gethostname(), args.distributed_rank,
))
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
else:
dist.all_reduce(torch.zeros(1))
if is_master(args):
logging.getLogger().setLevel(logging.INFO)
else:
logging.getLogger().setLevel(logging.WARNING)
args.distributed_rank = torch.distributed.get_rank()
if args.model_parallel_size > 1:
try:
from fairseq.model_parallel.megatron.mpu import (
initialize_model_parallel,
model_parallel_cuda_manual_seed,
)
except ImportError:
raise ImportError(
'\n\nPlease install the megatron submodule:'
'\n\n git submodule update --init '
'fairseq/model_parallel/megatron'
)
initialize_model_parallel(args.model_parallel_size)
model_parallel_cuda_manual_seed(args.seed)
return args.distributed_rank
def get_rank():
return dist.get_rank()
def get_world_size():
return dist.get_world_size()
def get_default_group():
return dist.group.WORLD
def all_reduce(tensor, group=None):
if group is None:
group = get_default_group()
return dist.all_reduce(tensor, group=group)
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group (optional): group of the collective
max_size (int, optional): maximum size of the data to be gathered
across workers
"""
rank = get_rank()
world_size = get_world_size()
buffer_size = max_size * world_size
if not hasattr(all_gather_list, '_buffer') or \
all_gather_list._buffer.numel() < buffer_size:
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
data = utils.move_to_cpu(data)
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4 # size of header that contains the length of the encoded data
size = header_size + enc_size
if size > max_size:
raise ValueError('encoded data size ({}) exceeds max_size ({})'.format(size, max_size))
header = struct.pack(">I", enc_size)
cpu_buffer[:size] = torch.ByteTensor(list(header + enc))
start = rank * max_size
buffer[start:start + size].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size:(i + 1) * max_size]
enc_size, = struct.unpack(">I", bytes(out_buffer[:header_size].tolist()))
if enc_size > 0:
result.append(pickle.loads(bytes(out_buffer[header_size:header_size + enc_size].tolist())))
return result
except pickle.UnpicklingError:
raise Exception(
'Unable to unpickle data from other workers. all_gather_list requires all '
'workers to enter the function together, so this error usually indicates '
'that the workers have fallen out of sync somehow. Workers can fall out of '
'sync if one of them runs out of memory, or if there are other conditions '
'in your training script that can cause one worker to finish an epoch '
'while other workers are still iterating over their portions of the data. '
'Try rerunning with --ddp-backend=no_c10d and see if that helps.'
)
def all_reduce_dict(
data: Mapping[str, Any],
device,
group=None,
) -> Dict[str, Any]:
"""
AllReduce a dictionary of values across workers. We separately
reduce items that are already on the device and items on CPU for
better performance.
Args:
data (Mapping[str, Any]): dictionary of data to all-reduce, but
cannot be a nested dictionary
device (torch.device): device for the reduction
group (optional): group of the collective
"""
data_keys = list(data.keys())
# We want to separately reduce items that are already on the
# device and items on CPU for performance reasons.
cpu_data = OrderedDict()
device_data = OrderedDict()
for k in data_keys:
t = data[k]
if not torch.is_tensor(t):
cpu_data[k] = torch.tensor(t, dtype=torch.double)
elif t.device.type != device.type:
cpu_data[k] = t.to(dtype=torch.double)
else:
device_data[k] = t.to(dtype=torch.double)
def _all_reduce_dict(data: OrderedDict):
if len(data) == 0:
return data
buf = torch.stack(list(data.values())).to(device=device)
all_reduce(buf, group=group)
return {k: buf[i] for i, k in enumerate(data)}
cpu_data = _all_reduce_dict(cpu_data)
device_data = _all_reduce_dict(device_data)
def get_from_stack(key):
if key in cpu_data:
return cpu_data[key]
elif key in device_data:
return device_data[key]
raise KeyError
return OrderedDict([(key, get_from_stack(key)) for key in data_keys])
| 9,197 | 35.070588 | 107 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/sequence_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
class SequenceGenerator(object):
def __init__(
self,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.,
unk_penalty=0.,
retain_dropout=False,
temperature=1.,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None
):
"""Generates translations of a given source sentence.
Args:
tgt_dict (~fairseq.data.Dictionary): target dictionary
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
assert temperature > 0, '--temperature must be greater than 0'
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
model = EnsembleModel(models)
return self._generate(model, sample, **kwargs)
@torch.no_grad()
def _generate(
self,
model,
sample,
prefix_tokens=None,
bos_token=None,
**kwargs
):
if not self.retain_dropout:
model.eval()
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample['net_input'].items()
if k != 'prev_output_tokens'
}
src_tokens = encoder_input['src_tokens']
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
# batch dimension goes first followed by source lengths
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
model.max_decoder_positions() - 1,
)
assert self.min_len <= max_len, 'min_len cannot be larger than max_len, please adjust these!'
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)
# initialize buffers
scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn, attn_buf = None, None
# The blacklist indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the blacklist would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfin_idx):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size or step == max_len:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths[unfin_idx]:
score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
model.reorder_incremental_state(reorder_state)
encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)
lprobs, avg_attn_scores = model.forward_decoder(
tokens[:, :step + 1], encoder_outs, temperature=self.temperature,
)
lprobs[lprobs != lprobs] = -math.inf
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
# handle prefix tokens (possibly with different lengths)
if prefix_tokens is not None and step < prefix_tokens.size(1) and step < max_len:
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = -math.inf
lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
cpu_tokens = tokens.cpu()
for bbsz_idx in range(bsz * beam_size):
gen_tokens = cpu_tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
if ngram[-1] != self.pad:
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if type(avg_attn_scores) is list:
avg_attn_scores = avg_attn_scores[0]
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, avg_attn_scores.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(cpu_tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
banned_tokens_per_sample = gen_ngrams[bbsz_idx].get(ngram_index, [])
banned_tokens_per_sample = [(bbsz_idx, t) for t in banned_tokens_per_sample]
return banned_tokens_per_sample
banned_tokens = []
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
for bbsz_idx in range(bsz * beam_size):
banned_tokens.extend(calculate_banned_tokens(bbsz_idx))
if banned_tokens:
banned_tokens = torch.LongTensor(banned_tokens)
lprobs.index_put_(tuple(banned_tokens.t()), lprobs.new_tensor([-math.inf] * len(banned_tokens)))
# XXX ADDED BY MICHAEL XIE
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos, except for blacklisted ones
# or candidates with a score of -inf
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
class EnsembleModel(torch.nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.incremental_states = None
if all(hasattr(m, 'decoder') and isinstance(m.decoder, FairseqIncrementalDecoder) for m in models):
self.incremental_states = {m: {} for m in models}
def has_encoder(self):
return hasattr(self.models[0], 'encoder')
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, encoder_input):
if not self.has_encoder():
return None
return [model.encoder(**encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, tokens, encoder_outs, temperature=1.):
if len(self.models) == 1:
return self._decode_one(
tokens,
self.models[0],
encoder_outs[0] if self.has_encoder() else None,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs = []
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(
tokens,
model,
encoder_out,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(len(self.models))
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens, encoder_out=encoder_out, incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get('attn', None)
if type(attn) is list:
attn = attn[0]
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
def reorder_encoder_out(self, encoder_outs, new_order):
if not self.has_encoder():
return
return [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
def reorder_incremental_state(self, new_order):
if self.incremental_states is None:
return
for model in self.models:
model.decoder.reorder_incremental_state(self.incremental_states[model], new_order)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(self, tgt_dict, left_pad_target=False, **kwargs):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
@torch.no_grad()
def generate(self, models, sample, **kwargs):
model = EnsembleModelWithAlignment(models)
finalized = super()._generate(model, sample, **kwargs)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
beam_size = self.beam_size
src_tokens, src_lengths, prev_output_tokens, tgt_tokens = \
self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, 'full_context_alignment', False) for m in model.models):
attn = model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]['attention'].transpose(1, 0)
for i in range(bsz * beam_size)
]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = utils.extract_hard_alignment(attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos)
finalized[i // beam_size][i % beam_size]['alignment'] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
src_tokens = src_tokens[:, None, :].expand(-1, self.beam_size, -1).contiguous().view(bsz * self.beam_size, -1)
src_lengths = sample['net_input']['src_lengths']
src_lengths = src_lengths[:, None].expand(-1, self.beam_size).contiguous().view(bsz * self.beam_size)
prev_output_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]['attn']
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens,
encoder_out=encoder_out,
incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1] if len(decoder_out) > 1 else None
if type(attn) is dict:
attn = attn.get('attn', None)
if type(attn) is list:
attn = attn[0]
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
| 30,021 | 41.403955 | 118 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/legacy_distributed_data_parallel.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A modified version of the legacy DistributedDataParallel module that uses c10d
communication primitives. This version is simpler than the latest PyTorch
version and is useful for debugging. Notably it does not overlap gradient
communication with the backward pass, which makes it slower but more robust
than the PyTorch version.
This version also supports the *no_sync* context manager, which allows faster
training with `--update-freq`.
"""
from contextlib import contextmanager
import copy
import torch
from torch import nn
from torch.autograd import Variable
from . import distributed_utils
class LegacyDistributedDataParallel(nn.Module):
"""Implements distributed data parallelism at the module level.
A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.
This version uses a c10d process group for communication and does not
broadcast buffers.
Args:
module (~torch.nn.Module): module to be parallelized
world_size (int): number of parallel workers
process_group (optional): the c10d process group to be used for
distributed data all-reduction. If None, the default process group
will be used.
buffer_size (int, optional): number of elements to buffer before
performing all-reduce (default: 256M).
"""
def __init__(self, module, world_size, process_group=None, buffer_size=2**28):
super().__init__()
self.module = module
self.world_size = world_size
self.process_group = process_group
# Never use a bigger buffer than the number of model params
self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters()))
self.buffer = None
# Flag used by the NCCL backend to make sure we only reduce gradients
# one time in the execution engine
self.need_reduction = False
# We can also forcibly accumulate grads locally and only do the
# all-reduce at some later time
self.accumulate_grads = False
# For NCCL backend, since every single NCCL call is asynchoronous, we
# therefore directly enqueue all the NCCL reduction calls to the
# default CUDA stream without spawning up other reduction threads.
# This achieves the best performance.
self._register_grad_hook()
def __getstate__(self):
attrs = copy.copy(self.__dict__)
return attrs
def __setstate__(self, state):
super().__setstate__(state)
self._register_grad_hook()
@contextmanager
def no_sync(self):
"""A context manager to disable gradient synchronization."""
old_accumulate_grads = self.accumulate_grads
self.accumulate_grads = True
yield
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def _register_grad_hook(self):
"""
This function registers the callback all-reduction function for the
NCCL backend. All gradients will be all reduced in one single step.
The NCCL reduction will directly be enqueued into the default CUDA
stream. Therefore, no synchronization is needed.
"""
def all_reduce(params):
buffer = self.buffer
nonzero_buffer = False
if len(params) > 1:
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
buffer[offset:offset+sz].copy_(p.grad.data.view(-1))
nonzero_buffer = True
else:
buffer[offset:offset+sz].zero_()
offset += sz
else:
# we only have a single grad to all-reduce
p = params[0]
if p.grad is not None:
buffer = p.grad.data
nonzero_buffer = True
elif p.numel() <= self.buffer.numel():
buffer = buffer[:p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
distributed_utils.all_reduce(buffer, self.process_group)
# copy all-reduced grads back into their original place
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
p.grad.data.copy_(buffer[offset:offset+sz].view_as(p))
else:
p.grad = buffer[offset:offset+sz].view_as(p).clone()
offset += sz
def reduction_fn():
# This function only needs to be called once
if not self.need_reduction or self.accumulate_grads:
return
self.need_reduction = False
if self.buffer is None:
self.buffer = next(self.module.parameters()).new(self.buffer_size)
# All-reduce the gradients in buckets
offset = 0
buffered_params = []
for param in self.module.parameters():
if not param.requires_grad:
continue
if param.grad is None:
param.grad = torch.zeros_like(param)
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works "
"with gradients that don't require "
"grad")
sz = param.numel()
if sz > self.buffer.numel():
# all-reduce big params directly
all_reduce([param])
else:
if offset + sz > self.buffer.numel():
all_reduce(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if len(buffered_params) > 0:
all_reduce(buffered_params)
# Now register the reduction hook on the parameters
for p in self.module.parameters():
def allreduce_hook(*unused):
self.need_reduction = True
Variable._execution_engine.queue_callback(reduction_fn)
if p.requires_grad:
p.register_hook(allreduce_hook)
| 6,724 | 36.154696 | 88 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/options.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import sys
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
group = parser.add_argument_group("Evaluation")
add_common_eval_args(group)
return parser
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if hasattr(args, "max_sentences_valid") and args.max_sentences_valid is None:
args.max_sentences_valid = args.max_sentences
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
# Apply architecture configuration.
if hasattr(args, "arch"):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
# fmt: off
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use',
choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--tensorboard-logdir', metavar='DIR', default='',
help='path to save logs for tensorboard, should match --logdir '
'of running tensorboard (default: no tensorboard logging)')
parser.add_argument('--seed', default=1, type=int, metavar='N',
help='pseudo random number generator seed')
parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--memory-efficient-fp16', action='store_true',
help='use a memory-efficient version of FP16 training; implies --fp16')
parser.add_argument('--fp16-no-flatten-grads', action='store_true',
help='don\'t flatten FP16 grads tensor')
parser.add_argument('--fp16-init-scale', default=2 ** 7, type=int,
help='default FP16 loss scale')
parser.add_argument('--fp16-scale-window', type=int,
help='number of updates before increasing loss scale')
parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float,
help='pct of updates that can overflow before decreasing the loss scale')
parser.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D',
help='minimum FP16 loss scale, after which training is stopped')
parser.add_argument('--threshold-loss-scale', type=float,
help='threshold FP16 loss scale from below')
parser.add_argument('--user-dir', default=None,
help='path to a python module containing custom extensions (tasks and/or architectures)')
parser.add_argument('--empty-cache-freq', default=0, type=int,
help='how often to clear the PyTorch CUDA cache (0 to disable)')
parser.add_argument('--all-gather-list-size', default=16384, type=int,
help='number of bytes reserved for gathering stats from workers')
parser.add_argument('--model-parallel-size', type=int, metavar='N',
default=1,
help='total number of GPUs to parallelize model over')
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
'--' + registry_name.replace('_', '-'),
default=REGISTRY['default'],
choices=REGISTRY['registry'].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument('--task', metavar='TASK', default=default_task,
choices=TASK_REGISTRY.keys(),
help='task')
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("Dataset and data loading")
# fmt: off
group.add_argument('--num-workers', default=1, type=int, metavar='N',
help='how many subprocesses to use for data loading')
group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',
help='ignore too long or too short lines in valid and test set')
group.add_argument('--max-tokens', type=int, metavar='N',
help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',
help='maximum number of sentences in a batch')
group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N',
help='batch size will be a multiplier of this value')
parser.add_argument('--dataset-impl', metavar='FORMAT',
choices=get_available_dataset_impl(),
help='output dataset implementation')
# XXX Added by Michael Xie
group.add_argument('--add-mask-token', default=False, action='store_true')
# ####
if train:
group.add_argument('--train-subset', default='train', metavar='SPLIT',
help='data subset to use for training (e.g. train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list of data subsets to use for validation'
' (e.g. train, valid, test)')
group.add_argument('--validate-interval', type=int, default=1, metavar='N',
help='validate every N epochs')
group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N',
help='specified random seed for validation')
group.add_argument('--disable-validation', action='store_true',
help='disable validation')
group.add_argument('--max-tokens-valid', type=int, metavar='N',
help='maximum number of tokens in a validation batch'
' (defaults to --max-tokens)')
group.add_argument('--max-sentences-valid', type=int, metavar='N',
help='maximum number of sentences in a validation batch'
' (defaults to --max-sentences)')
group.add_argument('--curriculum', default=0, type=int, metavar='N',
help='don\'t shuffle batches for first N epochs')
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT',
help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N',
help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID',
help='id of the shard to generate (id < num_shards)')
# fmt: on
return group
def add_distributed_training_args(parser):
group = parser.add_argument_group("Distributed training")
# fmt: off
group.add_argument('--distributed-world-size', type=int, metavar='N',
default=max(1, torch.cuda.device_count()),
help='total number of GPUs across all nodes (default: all visible GPUs)')
group.add_argument('--distributed-rank', default=0, type=int,
help='rank of the current worker')
group.add_argument('--distributed-backend', default='nccl', type=str,
help='distributed backend')
group.add_argument('--distributed-init-method', default=None, type=str,
help='typically tcp://hostname:port that will be used to '
'establish initial connetion')
group.add_argument('--distributed-port', default=-1, type=int,
help='port number (not required if using --distributed-init-method)')
group.add_argument('--device-id', '--local_rank', default=0, type=int,
help='which GPU to use (usually configured automatically)')
group.add_argument('--distributed-no-spawn', action='store_true',
help='do not spawn multiple processes even if multiple GPUs are visible')
# "c10d" is PyTorch's DDP implementation and provides the fastest
# training. "no_c10d" is a more robust, but slightly slower DDP
# implementation. Try this if you get warning messages about
# inconsistent gradients between workers, or if some of your model
# parameters are not always used.
group.add_argument('--ddp-backend', default='c10d', type=str,
choices=['c10d', 'no_c10d'],
help='DistributedDataParallel backend')
group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB',
help='bucket size for reduction')
group.add_argument('--fix-batches-to-gpus', action='store_true',
help='don\'t shuffle batches between GPUs; this reduces overall '
'randomness and may affect precision but avoids the cost of '
're-reading the data')
group.add_argument('--find-unused-parameters', default=False, action='store_true',
help='disable unused parameter detection (not applicable to '
'no_c10d ddp-backend')
group.add_argument('--fast-stat-sync', default=False, action='store_true',
help='[deprecated] this is now defined per Criterion')
group.add_argument('--broadcast-buffers', default=False, action='store_true',
help='Copy non-trainable parameters between GPUs, such as '
'batchnorm population statistics')
# fmt: on
return group
def add_optimization_args(parser):
group = parser.add_argument_group("Optimization")
# fmt: off
group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',
help='force stop training at specified epoch')
group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',
help='force stop training at specified update')
group.add_argument('--clip-norm', default=25, type=float, metavar='NORM',
help='clip threshold of gradients')
group.add_argument('--sentence-avg', action='store_true',
help='normalize gradients by the number of sentences in a batch'
' (default is to normalize by number of tokens)')
group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K',
type=lambda uf: eval_str_list(uf, type=int),
help='update parameters every N_i batches, when in epoch i')
group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list,
metavar='LR_1,LR_2,...,LR_N',
help='learning rate for the first N epochs; all epochs >N using LR_N'
' (note: this may be interpreted differently depending on --lr-scheduler)')
group.add_argument('--min-lr', default=-1, type=float, metavar='LR',
help='stop training when the learning rate reaches this minimum')
group.add_argument('--use-bmuf', default=False, action='store_true',
help='specify global optimizer for syncing models on different GPUs/shards')
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("Checkpointing")
# fmt: off
group.add_argument('--save-dir', metavar='DIR', default='checkpoints',
help='path to save checkpoints')
group.add_argument('--restore-file', default='checkpoint_last.pt',
help='filename from which to load checkpoint '
'(default: <save-dir>/checkpoint_last.pt')
group.add_argument('--reset-dataloader', action='store_true',
help='if set, does not reload dataloader state from the checkpoint')
group.add_argument('--reset-lr-scheduler', action='store_true',
help='if set, does not load lr scheduler state from the checkpoint')
group.add_argument('--reset-meters', action='store_true',
help='if set, does not load meters from the checkpoint')
group.add_argument('--reset-optimizer', action='store_true',
help='if set, does not load optimizer state from the checkpoint')
group.add_argument('--optimizer-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override optimizer args when loading a checkpoint')
group.add_argument('--save-interval', type=int, default=1, metavar='N',
help='save a checkpoint every N epochs')
group.add_argument('--save-interval-updates', type=int, default=0, metavar='N',
help='save a checkpoint (and validate) every N updates')
group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N',
help='keep the last N checkpoints saved with --save-interval-updates')
group.add_argument('--keep-last-epochs', type=int, default=-1, metavar='N',
help='keep last N epoch checkpoints')
group.add_argument('--keep-best-checkpoints', type=int, default=-1, metavar='N',
help='keep best N checkpoints based on scores')
group.add_argument('--no-save', action='store_true',
help='don\'t save models or checkpoints')
group.add_argument('--no-epoch-checkpoints', action='store_true',
help='only store last and best checkpoints')
group.add_argument('--no-last-checkpoints', action='store_true',
help='don\'t store last checkpoints')
group.add_argument('--no-save-optimizer-state', action='store_true',
help='don\'t save optimizer-state as part of checkpoint')
group.add_argument('--best-checkpoint-metric', type=str, default='loss',
help='metric to use for saving "best" checkpoints')
group.add_argument('--maximize-best-checkpoint-metric', action='store_true',
help='select the largest metric value for saving "best" checkpoints')
group.add_argument('--patience', type=int, default=-1, metavar='N',
help=('early stop training if valid performance doesn\'t '
'improve for N consecutive validation runs; note '
'that this is influenced by --validate-interval'))
# fmt: on
return group
def add_common_eval_args(group):
# fmt: off
group.add_argument('--path', metavar='FILE',
help='path(s) to model file(s), colon separated')
group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE tokens before scoring (can be set to sentencepiece)')
group.add_argument('--quiet', action='store_true',
help='only print final scores')
group.add_argument('--model-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override model args at generation '
'that were used during model training')
group.add_argument('--results-path', metavar='RESDIR', type=str, default=None,
help='path to save eval results (optional)"')
# fmt: on
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
# fmt: off
group.add_argument('--output-word-probs', action='store_true',
help='if set, outputs words and their predicted log probabilities to standard output')
group.add_argument('--output-word-stats', action='store_true',
help='if set, outputs word statistics such as word count, average probability, etc')
group.add_argument('--context-window', default=0, type=int, metavar='N',
help='ensures that every evaluated token has access to a context of at least this size,'
' if possible')
group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N',
help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens'
' in order to fit into GPU memory')
# fmt: on
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
# fmt: off
group.add_argument('--beam', default=5, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--match-source-len', default=False, action='store_true',
help=('generations should match the source length'))
group.add_argument('--no-early-stop', action='store_true',
help='deprecated')
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
group.add_argument('--score-reference', action='store_true',
help='just score the reference translation')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N',
help='ngram blocking such that this size ngram cannot be repeated in the generation')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',
help='sample from the smallest set whose cumulative probability mass exceeds p for next words')
group.add_argument('--temperature', default=1., type=float, metavar='N',
help='temperature for generation')
group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N',
help='number of groups for Diverse Beam Search')
group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',
help='strength of diversity penalty for Diverse Beam Search')
group.add_argument('--diversity-rate', default=-1.0, type=float, metavar='N',
help='strength of diversity penalty for Diverse Siblings Search')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--print-step', action='store_true')
# arguments for iterative refinement generator
group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N',
help='if > 0.0, it penalized early-stopping in decoding.')
group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N',
help='maximum iterations for iterative refinement.')
group.add_argument('--iter-decode-force-max-iter', action='store_true',
help='if set, run exact the maximum number of iterations without early stop')
group.add_argument('--iter-decode-with-beam', default=1, type=int, metavar='N',
help='if > 1, model will generate translations varying by the lengths.')
group.add_argument('--iter-decode-with-external-reranker', action='store_true',
help='if set, the last checkpoint are assumed to be a reranker to rescore the translations'),
group.add_argument('--retain-iter-history', action='store_true',
help='if set, decoding returns the whole history of iterative refinement')
# special decoding format for advanced decoding.
group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs'])
# XXX Added by Michael Xie
group.add_argument('--generate-second-half', default=False, action='store_true')
# fmt: on
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
# fmt: off
group.add_argument('--buffer-size', default=0, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
group.add_argument('--input', default='-', type=str, metavar='FILE',
help='file to read from; use - for stdin')
# fmt: on
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', default='fconv', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='Model Architecture')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',
help='sample from the smallest set whose cumulative probability mass exceeds p for next words')
# fmt: on
return group
| 32,607 | 51.849271 | 120 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/bleu.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import math
import torch
try:
from fairseq import libbleu
except ImportError as e:
import sys
sys.stderr.write('ERROR: missing libbleu.so. run `pip install --editable .`\n')
raise e
C = ctypes.cdll.LoadLibrary(libbleu.__file__)
class BleuStat(ctypes.Structure):
_fields_ = [
('reflen', ctypes.c_size_t),
('predlen', ctypes.c_size_t),
('match1', ctypes.c_size_t),
('count1', ctypes.c_size_t),
('match2', ctypes.c_size_t),
('count2', ctypes.c_size_t),
('match3', ctypes.c_size_t),
('count3', ctypes.c_size_t),
('match4', ctypes.c_size_t),
('count4', ctypes.c_size_t),
]
class SacrebleuScorer(object):
def __init__(self):
import sacrebleu
self.sacrebleu = sacrebleu
self.reset()
def reset(self, one_init=False):
if one_init:
raise NotImplementedError
self.ref = []
self.sys = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.sys.append(pred)
def score(self, order=4):
return self.result_string(order).score
def result_string(self, order=4):
if order != 4:
raise NotImplementedError
return self.sacrebleu.corpus_bleu(self.sys, [self.ref])
class Scorer(object):
def __init__(self, pad, eos, unk):
self.stat = BleuStat()
self.pad = pad
self.eos = eos
self.unk = unk
self.reset()
def reset(self, one_init=False):
if one_init:
C.bleu_one_init(ctypes.byref(self.stat))
else:
C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError('ref must be a torch.IntTensor (got {})'
.format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError('pred must be a torch.IntTensor(got {})'
.format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos))
def score(self, order=4):
psum = sum(math.log(p) if p > 0 else float('-Inf')
for p in self.precision()[:order])
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = 'BLEU{} = {:2.2f}, {:2.1f}'
for _ in range(1, order):
fmt += '/{:2.1f}'
fmt += ' (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})'
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(order, self.score(order=order), *bleup,
self.brevity(), self.stat.predlen/self.stat.reflen,
self.stat.predlen, self.stat.reflen)
| 3,955 | 29.430769 | 83 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/file_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with the local dataset cache.
This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_.
and `huggingface <https://github.com/huggingface>`_.
"""
import fnmatch
from functools import wraps, partial
from hashlib import sha256
from io import open
import json
import logging
import os
import shutil
import tarfile
import tempfile
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_fairseq')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_FAIRSEQ_CACHE = Path(
os.getenv('PYTORCH_FAIRSEQ_CACHE', default_cache_path))
except (AttributeError, ImportError):
PYTORCH_FAIRSEQ_CACHE = os.getenv(
'PYTORCH_FAIRSEQ_CACHE', default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def load_archive_file(archive_file):
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
logger.info(
"Archive name '{}' was not found in archive name list. "
"We assumed '{}' was a path or URL but couldn't find any file "
"associated to this path or URL.".format(
archive_file,
archive_file,
)
)
return None
if resolved_archive_file == archive_file:
logger.info("loading archive file {}".format(archive_file))
else:
logger.info("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
# Extract archive to temp dir and replace .tar.bz2 if necessary
tempdir = None
if not os.path.isdir(resolved_archive_file):
tempdir = tempfile.mkdtemp()
logger.info("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, 'r:' + ext) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the URL's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def request_wrap_timeout(func, url):
import requests
for attempt, timeout in enumerate([10, 20, 40, 60, 60]):
try:
return func(timeout=timeout)
except requests.exceptions.Timeout as e:
logger.warning("Request for %s timed-out (attempt %d). Retrying with a timeout of %d secs",
url, attempt, timeout, exc_info=e)
continue
raise RuntimeError(f"Unable to fetch file {url}")
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = request_wrap_timeout(partial(requests.get, url, stream=True), url)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
import requests
response = request_wrap_timeout(partial(requests.head, url, allow_redirects=True), url)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| 11,036 | 32.243976 | 103 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/incremental_decoding_utils.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional
import uuid
from torch import Tensor
class FairseqIncrementalState(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.init_incremental_state()
def init_incremental_state(self):
self._incremental_state_id = str(uuid.uuid4())
def _get_full_incremental_state_key(self, key: str) -> str:
return "{}.{}".format(self._incremental_state_id, key)
def get_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
) -> Optional[Dict[str, Optional[Tensor]]]:
"""Helper for getting incremental state for an nn.Module."""
full_key = self._get_full_incremental_state_key(key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(
self,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]],
key: str,
value: Dict[str, Optional[Tensor]],
) -> Optional[Dict[str, Dict[str, Optional[Tensor]]]]:
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = self._get_full_incremental_state_key(key)
incremental_state[full_key] = value
return incremental_state
def with_incremental_state(cls):
cls.__bases__ = (FairseqIncrementalState,) + tuple(b for b in cls.__bases__ if b != FairseqIncrementalState)
return cls
| 1,760 | 33.529412 | 112 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/search.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Optional, List
import torch
import torch.nn as nn
from torch import Tensor
class Search(nn.Module):
def __init__(self, tgt_dict):
super().__init__()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.src_lengths = torch.tensor(-1)
def step(self, step, lprobs, scores):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
@torch.jit.export
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
@torch.jit.export
def step(self, step: int, lprobs, scores: Optional[Tensor]):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
assert scores is not None
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
top_prediction = torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
)
scores_buf = top_prediction[0]
indices_buf = top_prediction[1]
beams_buf = torch.div(indices_buf, vocab_size)
indices_buf.fmod_(vocab_size)
return scores_buf, indices_buf, beams_buf
class LengthConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
super().__init__(tgt_dict)
self.min_len_a = min_len_a
self.min_len_b = min_len_b
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.beam = BeamSearch(tgt_dict)
def step(self, step: int, lprobs, scores):
min_lens = self.min_len_a * self.src_lengths + self.min_len_b
max_lens = self.max_len_a * self.src_lengths + self.max_len_b
lprobs[step < min_lens, :, self.eos] = -math.inf
lprobs[step == max_lens, :, self.eos] = 0
lprobs[step > max_lens, :, self.eos] = -math.inf
return self.beam.step(step, lprobs, scores)
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.beam = BeamSearch(tgt_dict)
@torch.jit.export
def step(self, step: int, lprobs, scores):
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
"DiverseBeamSearch requires --beam to be divisible by the number of groups"
)
# initialize diversity penalty
diversity_buf = torch.zeros(lprobs[:, 0, :].size()).to(lprobs)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g :: self.num_groups, :]
scores_g = scores[:, g :: self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(
lprobs_g, self.diversity_strength, diversity_buf.unsqueeze(1)
)
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(
step, lprobs_g, scores_g
)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
diversity_buf.scatter_add_(
1, indices_buf, torch.ones(indices_buf.size()).to(diversity_buf)
)
# interleave results from different groups
scores_buf = torch.stack(scores_G, dim=2).view(bsz, -1)
indices_buf = torch.stack(indices_G, dim=2).view(bsz, -1)
beams_buf = torch.stack(beams_G, dim=2).view(bsz, -1)
return scores_buf, indices_buf, beams_buf
class Sampling(Search):
sampling_topk: int
sampling_topp: float
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, : max_dim + 1]
truncated_probs = sorted_probs[:, :, : max_dim + 1]
truncated_indices = sorted_indices[:, :, : max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = ~truncated_mask
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
@torch.jit.export
def step(self, step: int, lprobs, scores):
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs, top_indices = self._sample_topp(lprobs)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs, top_indices = lprobs.topk(self.sampling_topk)
probs = lprobs.exp_()
else:
probs = lprobs.exp_()
# dummy data to be consistent with true branch for type check
top_indices = torch.empty(0).to(probs)
# sample
if step == 0:
indices_buf = torch.multinomial(
probs.view(bsz, -1), beam_size, replacement=True,
).view(bsz, beam_size)
else:
indices_buf = torch.multinomial(
probs.view(bsz * beam_size, -1),
1,
replacement=True,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs = probs.expand(bsz, beam_size, -1)
# gather scores
scores_buf = torch.gather(
probs, dim=2, index=indices_buf.unsqueeze(-1)
)
scores_buf = scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=indices_buf.unsqueeze(-1),
).squeeze(2)
if step == 0:
beams_buf = indices_buf.new_zeros(bsz, beam_size)
else:
beams_buf = torch.arange(0, beam_size).to(indices_buf).repeat(bsz, 1)
# make scores cumulative
scores_buf.add_(
torch.gather(scores[:, :, step - 1], dim=1, index=beams_buf)
)
return scores_buf, indices_buf, beams_buf
class DiverseSiblingsSearch(Search):
"""
Beam search with diverse siblings.
See "A Simple, Fast Diverse Decoding Algorithm for Neural Generation" for details.
https://arxiv.org/abs/1611.08562
1/ Calculate hypotheses for each beam
2/ Intra-sibling ordering
3/ Rewrite scores
4/ Choose top K hypotheses
if diversity_rate == 0 is equivalent to BeamSearch
"""
def __init__(self, tgt_dict, diversity_rate):
super().__init__(tgt_dict)
self.diversity_rate = diversity_rate
self.beam = BeamSearch(tgt_dict)
def step(self, step: int, lprobs, scores):
bsz, beam_size, vocab_size = lprobs.size()
k = min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
)
s_list: List[Tensor]
i_list: List[Tensor]
s_list = [torch.empty(0).to(lprobs) for i in range(beam_size)]
i_list = [torch.LongTensor().to(device=lprobs.device) for i in range(beam_size)]
sibling_score = torch.arange(1, k + 1).to(lprobs) * self.diversity_rate
if step == 0:
return self.beam.step(step, lprobs, scores)
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
# 1/ Calculate hypotheses for each beam
for i in range(beam_size):
torch.topk(lprobs[:, i, :].view(bsz, -1), k, out=(s_list[i], i_list[i]))
i_list[i].fmod_(vocab_size)
# 2/ Intra-sibling ordering by default from topk + 3/ Rewrite scores
s_list[i].sub_(sibling_score)
# 4/ Choose top K hypotheses
indices = torch.stack(i_list, dim=1).view(bsz, -1)
final_scores = torch.empty(0).to(lprobs)
final_indices = torch.LongTensor().to(device=lprobs.device)
final_beams = torch.LongTensor().to(device=lprobs.device)
(final_scores, final_indices) = torch.topk(
torch.stack(s_list, dim=1).view(bsz, -1),
k,
)
final_beams = torch.div(final_indices, k)
for i in range(bsz):
final_indices[i] = indices[i][final_indices[i]]
return final_scores, final_indices, final_beams
| 12,739 | 36.251462 | 100 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/nan_detector.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger = logging.getLogger(__name__)
class NanDetector:
"""
Detects the first NaN or Inf in forward and/or backward pass and logs, together with the module name
"""
def __init__(self, model, forward=True, backward=True):
self.bhooks = []
self.fhooks = []
self.forward = forward
self.backward = backward
self.reset()
for name, mod in model.named_modules():
mod.__module_name = name
self.add_hooks(mod)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
def add_hooks(self, module):
if self.forward:
self.fhooks.append(module.register_forward_hook(self.fhook_fn))
if self.backward:
self.bhooks.append(module.register_backward_hook(self.bhook_fn))
def reset(self):
self.has_printed_f = False
self.has_printed_b = False
def _detect(self, tensor, name, backward):
err = None
if (
tensor.numel() >= 2
): # single value tensors (like the loss) will not provide much info
with torch.no_grad():
if torch.isnan(tensor).any():
err = "NaN"
elif torch.isinf(tensor).any():
err = "Inf"
if err is not None:
err = f"{err} detected in output of {name}, shape: {tensor.shape}, {'backward' if backward else 'forward'}"
return err
def _apply(self, module, inp, x, backward):
if torch.is_tensor(x):
if isinstance(inp, tuple) and len(inp) > 0:
inp = inp[0]
err = self._detect(x, module.__module_name, backward)
if err is not None:
if torch.is_tensor(inp) and not backward:
err += (
f" input max: {inp.max().item()}, input min: {inp.min().item()}"
)
has_printed_attr = 'has_printed_b' if backward else 'has_printed_f'
logger.warning(err)
setattr(self, has_printed_attr, True)
elif isinstance(x, dict):
for v in x.values():
self._apply(module, inp, v, backward)
elif isinstance(x, list) or isinstance(x, tuple):
for v in x:
self._apply(module, inp, v, backward)
def fhook_fn(self, module, inp, output):
if not self.has_printed_f:
self._apply(module, inp, output, backward=False)
def bhook_fn(self, module, inp, output):
if not self.has_printed_b:
self._apply(module, inp, output, backward=True)
def close(self):
for hook in self.fhooks + self.bhooks:
hook.remove()
| 2,982 | 32.144444 | 119 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/iterative_refinement_generator.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import torch
import numpy as np
from fairseq import utils
DecoderOut = namedtuple('IterativeRefinementDecoderOut', [
'output_tokens',
'output_scores',
'attn',
'step',
'max_step',
'history'
])
class IterativeRefinementGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
beam_size=1,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
reranking=False,
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.beam_size = beam_size
self.reranking = reranking
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None):
# TODO: iterative refinement generator does not support ensemble for now.
if not self.retain_dropout:
for model in models:
model.eval()
model, reranker = models[0], None
if self.reranking:
assert len(models) > 1, "Assuming the last checkpoint is the reranker"
assert self.beam_size > 1, "Reranking requires multiple translation for each example"
reranker = models[-1]
models = models[:-1]
if len(models) > 1 and hasattr(model, 'enable_ensemble'):
assert model.allow_ensemble, "{} does not support ensembling".format(model.__class__.__name__)
model.enable_ensemble(models)
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()
# initialize
encoder_out = model.forward_encoder([src_tokens, src_lengths])
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
if self.beam_size > 1:
assert model.allow_length_beam, \
"{} does not support decoding with length beam.".format(model.__class__.__name__)
# regenerate data based on length-beam
length_beam_order = utils.new_arange(src_tokens, self.beam_size, bsz).t().reshape(-1)
encoder_out = model.encoder.reorder_encoder_out(encoder_out, length_beam_order)
prev_decoder_out = model.regenerate_length_beam(prev_decoder_out, self.beam_size)
bsz = bsz * self.beam_size
sent_idxs = torch.arange(bsz)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(decoder_out.output_tokens.size(0)).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None if (decoder_out.attn is None or decoder_out.attn.size(0) == 0) else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]['history'] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]['history'].append(
finalized_hypos(
step,
finalized_history_tokens[j][i],
None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated]
if (decoder_out.attn is not None and decoder_out.attn.size(0) > 0)
else None,
history=[h[not_terminated] for h in decoder_out.history]
if decoder_out.history is not None
else None,
)
encoder_out = model.encoder.reorder_encoder_out(encoder_out, not_terminated.nonzero().squeeze())
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.beam_size > 1:
if reranker is not None:
finalized = self.rerank(
reranker, finalized, [src_tokens, src_lengths], self.beam_size
)
# aggregate information from length beam
finalized = [
finalized[np.argmax(
[finalized[self.beam_size * i + j][0]['score'] for j in range(self.beam_size)]
) + self.beam_size * i] for i in range(len(finalized) // self.beam_size)
]
return finalized
def rerank(self, reranker, finalized, encoder_input, beam_size):
def rebuild_batch(finalized):
finalized_tokens = [f[0]['tokens'] for f in finalized]
finalized_maxlen = max(f.size(0) for f in finalized_tokens)
final_output_tokens = finalized_tokens[0].new_zeros(len(finalized_tokens), finalized_maxlen).fill_(self.pad)
for i, f in enumerate(finalized_tokens):
final_output_tokens[i, :f.size(0)] = f
return final_output_tokens
final_output_tokens = rebuild_batch(finalized)
final_output_tokens[:, 0] = self.eos # autoregressive model assumes starting with EOS
reranker_encoder_out = reranker.encoder(*encoder_input)
length_beam_order = utils.new_arange(
final_output_tokens, beam_size, reranker_encoder_out.encoder_out.size(1)).t().reshape(-1)
reranker_encoder_out = reranker.encoder.reorder_encoder_out(reranker_encoder_out, length_beam_order)
reranking_scores = reranker.get_normalized_probs(
reranker.decoder(final_output_tokens[:, :-1], reranker_encoder_out), True, None)
reranking_scores = reranking_scores.gather(2, final_output_tokens[:, 1:, None])
reranking_masks = final_output_tokens[:, 1:].ne(self.pad)
reranking_scores = reranking_scores[:, :, 0].masked_fill_(~reranking_masks, 0).sum(1)
reranking_scores = reranking_scores / reranking_masks.sum(1).type_as(reranking_scores)
for i in range(len(finalized)):
finalized[i][0]['score'] = reranking_scores[i]
return finalized
| 12,332 | 38.028481 | 120 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/trainer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import contextlib
from itertools import chain
import logging
import sys
from typing import Any, Dict, List
import torch
from fairseq import checkpoint_utils, distributed_utils, models, optim, utils
from fairseq.file_io import PathManager
from fairseq.logging import meters, metrics
from fairseq.nan_detector import NanDetector
from fairseq.optim import lr_scheduler
logger = logging.getLogger(__name__)
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, args, task, model, criterion):
self.args = args
self.task = task
self.cuda = torch.cuda.is_available() and not args.cpu
if self.cuda:
self.device = torch.device('cuda')
else:
self.device = torch.device('cpu')
# copy model and criterion to current device
self._criterion = criterion
self._model = model
if args.fp16:
self._criterion = self._criterion.half()
self._model = self._model.half()
self._criterion = self._criterion.to(device=self.device)
self._model = self._model.to(device=self.device)
self._dummy_batch = "DUMMY" # indicates we don't have a dummy batch at first
self._lr_scheduler = None
self._num_updates = 0
self._optim_history = None
self._optimizer = None
self._warn_once = set()
self._wrapped_criterion = None
self._wrapped_model = None
if self.cuda and self.data_parallel_world_size > 1:
self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size)
else:
self._grad_norm_buf = None
metrics.log_start_time("wall", priority=790, round=0)
@property
def data_parallel_world_size(self):
return self.args.distributed_world_size
@property
def data_parallel_process_group(self):
return None
@property
def data_parallel_rank(self):
return self.args.distributed_rank
@property
def is_data_parallel_master(self):
return distributed_utils.is_master(self.args)
@property
def criterion(self):
if self._wrapped_criterion is None:
if (
utils.has_parameters(self._criterion)
and self.data_parallel_world_size > 1
and not self.args.use_bmuf
):
self._wrapped_criterion = models.DistributedFairseqModel(
self.args, self._criterion,
process_group=self.data_parallel_process_group
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
@property
def model(self):
if self._wrapped_model is None:
if self.data_parallel_world_size > 1 and not self.args.use_bmuf:
self._wrapped_model = models.DistributedFairseqModel(
self.args, self._model,
process_group=self.data_parallel_process_group
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
# XXX added by Michael Xie
def get_params(m):
if hasattr(m, "trainable_params"):
# "trainable_params" is custom module function
return m.trainable_params()
return m.parameters()
params = list(
filter(
lambda p: p.requires_grad,
chain(get_params(self.model), self.criterion.parameters()),
)
)
# params = list(
# filter(
# lambda p: p.requires_grad,
# chain(self.model.parameters(), self.criterion.parameters()),
# )
# )
if self.args.fp16:
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
logger.info(
"NOTE: your device does NOT support faster training with --fp16, "
"please switch to FP32 which is likely to be faster"
)
if self.args.memory_efficient_fp16:
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.args, params
)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:
logger.info("NOTE: your device may support faster training with --fp16")
self._optimizer = optim.build_optimizer(self.args, params)
if self.args.use_bmuf:
self._optimizer = optim.FairseqBMUF(self.args, self._optimizer)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)
self._lr_scheduler.step_update(0)
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
if self.is_data_parallel_master: # only save one checkpoint
extra_state["metrics"] = metrics.state_dict()
checkpoint_utils.save_state(
filename,
self.args,
self.get_model().state_dict(),
self.get_criterion(),
self.optimizer,
self.lr_scheduler,
self.get_num_updates(),
self._optim_history,
extra_state,
)
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
"""Load all training state from a checkpoint file."""
extra_state, self._optim_history, last_optim_state = None, [], None
bexists = PathManager.isfile(filename)
if bexists:
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
# load model parameters
try:
self.get_model().load_state_dict(
state["model"], strict=True, args=self.args
)
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = state["extra_state"]
self._optim_history = state["optimizer_history"]
last_optim_state = state.get("last_optimizer_state", None)
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), "Criterion does not match; please reset the optimizer (--reset-optimizer)."
assert (
last_optim["optimizer_name"] == self.optimizer.__class__.__name__
), "Optimizer does not match; please reset the optimizer (--reset-optimizer)."
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
epoch = extra_state["train_iterator"]["epoch"]
logger.info(
"loaded checkpoint {} (epoch {} @ {} updates)".format(
filename, epoch, self.get_num_updates()
)
)
self.lr_step(epoch)
if "metrics" in extra_state and not reset_meters:
metrics.load_state_dict(extra_state["metrics"])
# reset TimeMeters, since their start times don't make sense anymore
for meter in metrics.get_meters("default"):
if isinstance(meter, meters.TimeMeter):
meter.reset()
else:
logger.info("no existing checkpoint found {}".format(filename))
return extra_state
def get_train_iterator(
self,
epoch,
combine=True,
load_dataset=True,
data_selector=None,
shard_batch_itr=True,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
if load_dataset:
logger.info("loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.args.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
)
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.args.train_subset),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
self.args.max_tokens,
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.data_parallel_world_size if shard_batch_itr else 1,
shard_id=self.data_parallel_rank if shard_batch_itr else 0,
num_workers=self.args.num_workers,
epoch=epoch,
)
def get_valid_iterator(
self,
subset,
):
"""Return an EpochBatchIterator over given validation subset for a given epoch."""
return self.task.get_batch_iterator(
dataset=self.task.dataset(subset),
max_tokens=self.args.max_tokens_valid,
max_sentences=self.args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
self.task.max_positions(),
self.model.max_positions(),
),
ignore_invalid_inputs=self.args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.data_parallel_world_size,
shard_id=self.data_parallel_rank,
num_workers=self.args.num_workers,
)
@metrics.aggregate("train")
def train_step(self, samples, raise_oom=False):
"""Do forward, backward and parameter update."""
if self._dummy_batch == "DUMMY":
self._dummy_batch = samples[0]
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
metrics.log_start_time("train_wall", priority=800, round=0)
# forward and backward pass
logging_outputs, sample_size, ooms = [], 0, 0
for i, sample in enumerate(samples):
sample = self._prepare_sample(sample)
if sample is None:
# when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch)
is_dummy_batch = True
else:
is_dummy_batch = False
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.data_parallel_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
loss, sample_size_i, logging_output = self.task.train_step(
sample=sample,
model=self.model,
criterion=self.criterion,
optimizer=self.optimizer,
update_num=self.get_num_updates(),
ignore_grad=is_dummy_batch,
)
del loss
logging_outputs.append(logging_output)
sample_size += sample_size_i
# emptying the CUDA cache after the first step can
# reduce the chance of OOM
if self.cuda and self.get_num_updates() == 0:
torch.cuda.empty_cache()
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
logger.warning(
"attempting to recover from OOM in forward/backward pass"
)
ooms += 1
self.zero_grad()
else:
raise e
if torch.is_tensor(sample_size):
sample_size = sample_size.float()
else:
sample_size = float(sample_size)
if is_dummy_batch:
sample_size *= 0. # multiply by 0 to preserve device
# gather logging outputs from all replicas
if self._sync_stats():
logging_outputs, (sample_size, ooms) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ooms, ignore=is_dummy_batch,
)
try:
# multiply gradients by (# GPUs / sample_size) since DDP
# already normalizes by the number of GPUs. Thus we get
# (sum_of_gradients / sample_size).
if not self.args.use_bmuf:
multiplier = self.data_parallel_world_size
self.optimizer.multiply_grads(
multiplier / sample_size
)
elif sample_size > 0: # BMUF needs to check sample size
num = self.data_parallel_world_size if self._sync_stats() else 1
self.optimizer.multiply_grads(num / sample_size)
# clip grads
grad_norm = self.clip_grad_norm(self.args.clip_norm)
# check that grad norms are consistent across workers
if not self.args.use_bmuf:
self._check_grad_norms(grad_norm)
# take an optimization step
self.optimizer.step()
self.set_num_updates(self.get_num_updates() + 1)
# log stats
logging_output = self._reduce_and_log_stats(
logging_outputs, sample_size, grad_norm,
)
# clear CUDA cache to reduce memory fragmentation
if (
self.args.empty_cache_freq > 0
and (
(self.get_num_updates() + self.args.empty_cache_freq - 1)
% self.args.empty_cache_freq
) == 0
and torch.cuda.is_available()
and not self.args.cpu
):
torch.cuda.empty_cache()
except FloatingPointError:
# re-run the forward and backward pass with hooks attached to print out where it fails
with NanDetector(self.model):
self.task.train_step(
sample, self.model, self.criterion, self.optimizer, self.get_num_updates(),
ignore_grad=False
)
raise
except OverflowError as e:
logger.info("NOTE: overflow detected, " + str(e))
self.zero_grad()
logging_output = None
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
logger.error("OOM during optimization, irrecoverable")
raise e
if self.args.fp16:
metrics.log_scalar("loss_scale", self.optimizer.scaler.loss_scale, priority=700, round=0)
metrics.log_stop_time("train_wall")
return logging_output
@metrics.aggregate("valid")
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
if self._dummy_batch == "DUMMY":
self._dummy_batch = sample
with torch.no_grad():
self.model.eval()
self.criterion.eval()
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch)
is_dummy_batch = True
else:
is_dummy_batch = False
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
logger.warning(
"ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
logging_outputs = [logging_output]
if is_dummy_batch:
sample_size *= 0 # multiply by 0 to preserve device
# gather logging outputs from all replicas
if self.data_parallel_world_size > 1:
logging_outputs, (sample_size, ) = self._aggregate_logging_outputs(
logging_outputs, sample_size, ignore=is_dummy_batch,
)
# log validation stats
logging_output = self._reduce_and_log_stats(logging_outputs, sample_size)
return logging_output
def zero_grad(self):
self.optimizer.zero_grad()
# XXX Added by Michael Xie
self.model.zero_grad()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate at the end of the epoch."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
new_lr = self.lr_scheduler.step_update(self.get_num_updates())
metrics.log_scalar("lr", new_lr, weight=0, priority=300)
return new_lr
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""[deprecated] Get a specific meter by name."""
from fairseq import meters
if 'get_meter' not in self._warn_once:
self._warn_once.add('get_meter')
utils.deprecation_warning(
'Trainer.get_meter is deprecated. Please use fairseq.metrics instead.'
)
train_meters = metrics.get_meters("train")
if train_meters is None:
train_meters = {}
if name == "train_loss" and "loss" in train_meters:
return train_meters["loss"]
elif name == "train_nll_loss":
# support for legacy train.py, which assumed this meter is
# always initialized
m = train_meters.get("nll_loss", None)
return m or meters.AverageMeter()
elif name == "wall":
# support for legacy train.py, which assumed this meter is
# always initialized
m = metrics.get_meter("default", "wall")
return m or meters.TimeMeter()
elif name == "wps":
m = metrics.get_meter("train", "wps")
return m or meters.TimeMeter()
elif name in {"valid_loss", "valid_nll_loss"}:
# support for legacy train.py, which assumed these meters
# are always initialized
k = name[len("valid_"):]
m = metrics.get_meter("valid", k)
return m or meters.AverageMeter()
elif name == "oom":
return meters.AverageMeter()
elif name in train_meters:
return train_meters[name]
return None
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200)
def clip_grad_norm(self, clip_norm):
return self.optimizer.clip_grad_norm(clip_norm, aggregate_norm_fn=None)
def _prepare_sample(self, sample):
if sample == "DUMMY":
raise Exception(
"Trying to use an uninitialized 'dummy' batch. This usually indicates "
"that the total number of batches is smaller than the number of "
"participating GPUs. Try reducing the batch size or using fewer GPUs."
)
if sample is None or len(sample) == 0:
return None
if self.cuda:
sample = utils.move_to_cuda(sample)
def apply_half(t):
if t.dtype is torch.float32:
return t.half()
return t
if self.args.fp16:
sample = utils.apply_to_sample(apply_half, sample)
return sample
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
torch.manual_seed(seed)
if self.cuda:
torch.cuda.manual_seed(seed)
def _sync_stats(self):
# Return True if it's using multiple GPUs and DDP or multiple GPUs with
# BMUF and it's a bmuf sync with warmup iterations completed before.
return self.data_parallel_world_size > 1 and (
(not self.args.use_bmuf)
or (
self.args.use_bmuf
and (self.get_num_updates() + 1) % self.args.global_sync_iter == 0
and (self.get_num_updates() + 1) > self.args.warmup_iterations
)
)
def _log_oom(self, exc):
msg = "OOM: Ran out of memory with exception: {}".format(exc)
logger.warning(msg)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
logger.warning(torch.cuda.memory_summary(device=device_idx))
sys.stderr.flush()
def _aggregate_logging_outputs(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()):
return self._fast_stat_sync_sum(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
else:
return self._all_gather_list_sync(
logging_outputs, *extra_stats_to_sum, ignore=ignore
)
def _all_gather_list_sync(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. all_gather_list_sync is
suitable when logging outputs are complex types.
"""
if ignore:
logging_outputs = []
results = list(zip(
*distributed_utils.all_gather_list(
[logging_outputs] + list(extra_stats_to_sum),
max_size=getattr(self.args, 'all_gather_list_size', 16384),
group=self.data_parallel_process_group,
)
))
logging_outputs, extra_stats_to_sum = results[0], results[1:]
logging_outputs = list(chain.from_iterable(logging_outputs))
extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum]
return logging_outputs, extra_stats_to_sum
def _fast_stat_sync_sum(
self,
logging_outputs: List[Dict[str, Any]],
*extra_stats_to_sum,
ignore=False,
):
"""
Sync logging outputs across workers. fast_stat_sync_sum is
faster than all_gather_list_sync, but is only suitable when
logging outputs are scalars and can be summed. Note that
*logging_outputs* cannot contain any nested dicts/lists.
"""
data = {}
for i, stat in enumerate(extra_stats_to_sum):
data['extra_stats_' + str(i)] = stat
if len(logging_outputs) > 0:
log_keys = list(logging_outputs[0].keys())
for k in log_keys:
if not ignore:
v = sum(log[k] for log in logging_outputs if k in log)
else:
v = logging_outputs[0][k]
v = torch.zeros_like(v) if torch.is_tensor(v) else 0
data['logging_outputs_' + k] = v
else:
log_keys = None
data = distributed_utils.all_reduce_dict(
data,
device=self.device,
group=self.data_parallel_process_group
)
extra_stats_to_sum = [
data['extra_stats_' + str(i)] for i in range(len(extra_stats_to_sum))
]
if log_keys is not None:
logging_outputs = [{k: data['logging_outputs_' + k] for k in log_keys}]
else:
logging_outputs = []
return logging_outputs, extra_stats_to_sum
def _check_grad_norms(self, grad_norm):
"""Check that grad norms are consistent across workers."""
if self._grad_norm_buf is not None:
self._grad_norm_buf.zero_()
self._grad_norm_buf[self.data_parallel_rank] = grad_norm
distributed_utils.all_reduce(self._grad_norm_buf, group=self.data_parallel_process_group)
if not (self._grad_norm_buf == self._grad_norm_buf[0]).all():
raise RuntimeError(
"Fatal error: gradients are inconsistent between workers. "
"Try --ddp-backend=no_c10d."
)
def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None):
if grad_norm is not None:
metrics.log_speed("ups", 1., priority=100, round=2)
metrics.log_scalar("gnorm", grad_norm, priority=400, round=3)
if self.args.clip_norm > 0:
metrics.log_scalar(
"clip",
torch.where(
grad_norm > self.args.clip_norm,
grad_norm.new_tensor(100),
grad_norm.new_tensor(0),
),
priority=500,
round=1,
)
with metrics.aggregate() as agg:
if logging_outputs is not None:
self.task.reduce_metrics(logging_outputs, self.get_criterion())
# support legacy interface
logging_output = agg.get_smoothed_values()
logging_output["sample_size"] = sample_size
for key_to_delete in ["ppl", "wps", "wpb", "bsz"]:
if key_to_delete in logging_output:
del logging_output[key_to_delete]
return logging_output
| 29,348 | 36.626923 | 101 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/modules/transformer_sentence_encoder_layer.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import utils
from fairseq.modules import (
LayerNorm,
MultiheadAttention,
)
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: int = 768,
ffn_embedding_dim: int = 3072,
num_attention_heads: int = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = 'relu',
export: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
add_bias_kv=False,
add_zero_attn=False,
self_attention=True
)
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
x, attn = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = residual + x
x = self.final_layer_norm(x)
return x, attn
| 2,830 | 30.10989 | 80 | py |
BIFI | BIFI-main/utils/fairseq/fairseq/modules/learned_positional_embedding.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn as nn
from fairseq import utils
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
if self.padding_idx is not None:
self.max_positions = self.num_embeddings - self.padding_idx - 1
else:
self.max_positions = self.num_embeddings
def forward(self, input, incremental_state=None, positions=None):
"""Input is expected to be of size [bsz x seqlen]."""
assert (
(positions is None) or (self.padding_idx is None)
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = input.data.new(1, 1).fill_(int(self.padding_idx + input.size(1)))
else:
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace,
)
return super().forward(positions)
| 1,826 | 37.0625 | 94 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.