code stringlengths 101 5.91M |
|---|
def test_digits_greedi_ln():
model = MaxCoverageSelection(100, optimizer='greedi', optimizer_kwds={'optimizer1': 'lazy', 'optimizer2': 'naive'}, random_state=0)
model.fit(X_digits)
assert_array_equal(model.ranking[:2], digits_greedi_ranking[:2])
assert_array_almost_equal(model.gains[:2], digits_greedi_gains[:2], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
def test_digits_cosine_greedi_ln_object():
model = SaturatedCoverageSelection(100, 'cosine', optimizer=GreeDi(optimizer1='lazy', optimizer2='naive', random_state=0))
model.fit(X_digits)
assert_array_equal(model.ranking[:2], digits_cosine_greedi_ranking[:2])
assert_array_almost_equal(model.gains[:2], digits_cosine_greedi_gains[:2], 4)
assert_array_almost_equal(model.subset, X_digits[model.ranking]) |
class TFElectraForQuestionAnswering(metaclass=DummyObject):
_backends = ['tf']
def __init__(self, *args, **kwargs):
requires_backends(self, ['tf']) |
def diapreresnet1001_cifar10(num_classes=10, **kwargs):
return get_diapreresnet_cifar(num_classes=num_classes, blocks=1001, bottleneck=True, model_name='diapreresnet1001_cifar10', **kwargs) |
def generate_corpus_4_elmo_test():
print('')
dataset_dir = os.path.join('C:\\Data\\NLP-corpus\\PHEME-dataset', 'pheme_training')
print(os.path.exists(dataset_dir))
all_test_dataset_path = load_files_from_dataset_dir(dataset_dir)
all_events = {'charliehebdo', 'ebola-essien', 'ferguson', 'germanwings', 'gurlitt', 'ottawashooting', 'prince-toronto', 'sydneysiege', 'putinmissing'}
all_set_path = list(filter(None, [(os.path.join(dataset_dir, individual_data_set_path) if (individual_data_set_path.split('-')[0] in all_events) else '') for individual_data_set_path in all_test_dataset_path]))
print('all event data set_path: ', all_set_path)
X = None
for event_set_file in all_set_path:
df = load_matrix_from_csv(event_set_file, header=0, start_col_index=0, end_col_index=4)
print(('dataset loaded from %s' % event_set_file))
print('current event set size: ', df[:].shape)
if (X is None):
X = df[:]
else:
X = np.append(X, df[:], axis=0)
print('all PHEME source tweets are loaded: ', X.shape)
print('Export PHEME corpus: ')
pheme_data_output_dir = os.path.join(os.path.dirname(__file__), '..', '..', 'output', 'elmo')
try:
os.mkdir(pheme_data_output_dir)
except Exception as err:
print()
pheme_source_tweet_corpus_path = os.path.join(pheme_data_output_dir, 'pheme_source_tweet_corpus.txt')
with open(pheme_source_tweet_corpus_path, mode='w', encoding='utf-8') as outputfile:
for row in X[:]:
normed_text_tokens = preprocessing_tweet_text(row[2])
if (len(normed_text_tokens) > 0):
outputfile.write(('%s\n' % ' '.join(normed_text_tokens)))
print('done') |
class Param():
def __init__(self):
self.parser = argparse.ArgumentParser(description='')
self.parser.add_argument('--iters', type=int, default=100000)
self.parser.add_argument('--name', type=str, default='default')
self.parser.add_argument('--train', type=str, default='speaker')
self.parser.add_argument('--test_only', type=int, default=0)
self.parser.add_argument('--test_obj', type=int, default=0)
self.parser.add_argument('--maxInput', type=int, default=80, help='max input instruction')
self.parser.add_argument('--maxDecode', type=int, default=120, help='max input instruction')
self.parser.add_argument('--maxAction', type=int, default=20, help='Max Action sequence')
self.parser.add_argument('--batchSize', type=int, default=64)
self.parser.add_argument('--n_objects', type=int, default=36)
self.parser.add_argument('--ignoreid', type=int, default=(- 100))
self.parser.add_argument('--feature_size', type=int, default=2048)
self.parser.add_argument('--loadOptim', action='store_const', default=False, const=True)
self.parser.add_argument('--speaker', default=None)
self.parser.add_argument('--listener', default=None)
self.parser.add_argument('--load', default=None)
self.parser.add_argument('--aug', default=None)
self.parser.add_argument('--zeroInit', dest='zero_init', action='store_const', default=False, const=True)
self.parser.add_argument('--mlWeight', dest='ml_weight', type=float, default=0.05)
self.parser.add_argument('--teacherWeight', dest='teacher_weight', type=float, default=1.0)
self.parser.add_argument('--accumulateGrad', dest='accumulate_grad', action='store_const', default=False, const=True)
self.parser.add_argument('--features', type=str, default='imagenet')
self.parser.add_argument('--featdropout', type=float, default=0.3)
self.parser.add_argument('--selfTrain', dest='self_train', action='store_const', default=False, const=True)
self.parser.add_argument('--candidates', type=int, default=1)
self.parser.add_argument('--paramSearch', dest='param_search', action='store_const', default=False, const=True)
self.parser.add_argument('--submit', type=int, default=0)
self.parser.add_argument('--beam', action='store_const', default=False, const=True)
self.parser.add_argument('--alpha', type=float, default=0.5)
self.parser.add_argument('--optim', type=str, default='rms')
self.parser.add_argument('--lr', type=float, default=0.0001, help='The learning rate')
self.parser.add_argument('--decay', dest='weight_decay', type=float, default=0.0)
self.parser.add_argument('--dropout', type=float, default=0.5)
self.parser.add_argument('--feedback', type=str, default='sample', help='How to choose next position, one of ``teacher``, ``sample`` and ``argmax``')
self.parser.add_argument('--teacher', type=str, default='final', help='How to get supervision. one of ``next`` and ``final`` ')
self.parser.add_argument('--epsilon', type=float, default=0.1)
self.parser.add_argument('--rnnDim', dest='rnn_dim', type=int, default=512)
self.parser.add_argument('--wemb', type=int, default=256)
self.parser.add_argument('--aemb', type=int, default=64)
self.parser.add_argument('--proj', type=int, default=512)
self.parser.add_argument('--fast', dest='fast_train', action='store_const', default=False, const=True)
self.parser.add_argument('--valid', action='store_const', default=False, const=True)
self.parser.add_argument('--candidate', dest='candidate_mask', action='store_const', default=False, const=True)
self.parser.add_argument('--bidir', type=bool, default=True)
self.parser.add_argument('--encode', type=str, default='word')
self.parser.add_argument('--subout', dest='sub_out', type=str, default='max')
self.parser.add_argument('--attn', type=str, default='soft')
self.parser.add_argument('--angleFeatSize', dest='angle_feat_size', type=int, default=128)
self.parser.add_argument('--gamma', default=0.9, type=float)
self.parser.add_argument('--normalize', dest='normalize_loss', default='total', type=str, help='batch or total')
self.args = self.parser.parse_args()
if (self.args.optim == 'rms'):
print('Optimizer: Using RMSProp')
self.args.optimizer = torch.optim.RMSprop
elif (self.args.optim == 'adam'):
print('Optimizer: Using Adam')
self.args.optimizer = torch.optim.Adam
elif (self.args.optim == 'sgd'):
print('Optimizer: sgd')
self.args.optimizer = torch.optim.SGD
else:
assert False |
class Nima():
def __init__(self, base_model_name, n_classes=10, learning_rate=0.001, dropout_rate=0, loss=earth_movers_distance, decay=0, weights='imagenet'):
self.n_classes = n_classes
self.base_model_name = base_model_name
self.learning_rate = learning_rate
self.dropout_rate = dropout_rate
self.loss = loss
self.decay = decay
self.weights = weights
self._get_base_module()
def _get_base_module(self):
if (self.base_model_name == 'InceptionV3'):
self.base_module = importlib.import_module('tensorflow.keras.applications.inception_v3')
elif (self.base_model_name == 'InceptionResNetV2'):
self.base_module = importlib.import_module('tensorflow.keras.applications.inception_resnet_v2')
else:
self.base_module = importlib.import_module(('tensorflow.keras.applications.' + self.base_model_name.lower()))
def build(self):
BaseCnn = getattr(self.base_module, self.base_model_name)
self.base_model = BaseCnn(input_shape=(224, 224, 3), weights=self.weights, include_top=False, pooling='avg')
x = Dropout(self.dropout_rate)(self.base_model.output)
x = Dense(units=self.n_classes, activation='softmax')(x)
self.nima_model = Model(self.base_model.inputs, x)
def compile(self):
self.nima_model.compile(optimizer=Adam(lr=self.learning_rate, decay=self.decay), loss=self.loss)
def preprocessing_function(self):
return self.base_module.preprocess_input |
def get_mv_mean_var(param_tuple):
data = {(('dataset', 'object'), ('views', 6), ('resolution', 128), ('trans', (- 1.4)), ('size', 1), ('normalize', False), ('norm_pc', True)): [(0., 0.0615424), (0., 0.), (0., 0.), (0.044222, 0.)], (('dataset', 'modelnet'), ('views', 6), ('resolution', 128), ('trans', (- 1.4)), ('size', 1), ('normalize', False), ('norm_pc', True)): [(0., 0.), (0., 0.), (0., 0.), (0., 0.)]}
mean_var_list = data[param_tuple]
mean_list = [x for (x, y) in mean_var_list]
var_list = [y for (x, y) in mean_var_list]
mean = (sum(mean_list) / len(mean_list))
var = (sum(var_list) / len(var_list))
return (mean, var) |
_module
class TTAReformat(object):
def __init__(self, cfg, **kwargs):
self.tta_flag = cfg.get('tta_flag', False)
self.num_tta_tranforms = cfg.get('num_tta_tranforms', (- 1))
def __call__(self, res, info):
meta = res['metadata']
points = res['lidar']['points']
voxels = res['lidar']['voxels']
all_points = res['lidar']['all_points']
data_bundle = dict(metadata=meta, points=points, voxels=voxels['voxels'], shape=voxels['shape'], num_points=voxels['num_points'], num_voxels=voxels['num_voxels'], coordinates=voxels['coordinates'], all_points=all_points)
if (res['mode'] == 'train'):
data_bundle.update(res['lidar']['targets'])
elif (res['mode'] == 'val'):
data_bundle.update(dict(metadata=meta))
if self.tta_flag:
data_bundle_list = [data_bundle]
assert (self.num_tta_tranforms > 1)
for i in range(1, self.num_tta_tranforms):
point_key_i = ('tta_%s_points' % i)
voxel_key_i = ('tta_%s_voxels' % i)
tta_points = res['lidar'][point_key_i]
tta_voxels = res['lidar'][voxel_key_i]
tta_data_bundle = dict(metadata=meta, points=tta_points, voxels=tta_voxels['voxels'], shape=tta_voxels['shape'], num_points=tta_voxels['num_points'], num_voxels=tta_voxels['num_voxels'], coordinates=tta_voxels['coordinates'])
data_bundle_list.append(tta_data_bundle)
return (data_bundle_list, info)
return (data_bundle, info) |
class JamesSteinEncoderTransformer(AutotabularPreprocessingAlgorithm):
def __init__(self, cols=None, random_state: Optional[np.random.RandomState]=None):
self.cols = cols
self.random_state = random_state
def fit(self, X: PIPELINE_DATA_DTYPE, y: Optional[PIPELINE_DATA_DTYPE]=None) -> 'JamesSteinEncoderTransformer':
self.preprocessor = JSEncoder(cols=self.cols)
self.preprocessor.fit(X, y)
return self
def transform(self, X: PIPELINE_DATA_DTYPE) -> PIPELINE_DATA_DTYPE:
if (self.preprocessor is None):
raise NotImplementedError()
return self.preprocessor.transform(X)
def get_properties(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> Dict[(str, Optional[Union[(str, int, bool, Tuple)]])]:
return {'shortname': 'JamesSteinEncoderTransformer', 'name': 'JamesSteinEncoder Transformer', 'handles_regression': False, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': True, 'handles_multioutput': True, 'handles_sparse': True, 'handles_dense': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (INPUT,)}
def get_hyperparameter_search_space(dataset_properties: Optional[DATASET_PROPERTIES_TYPE]=None) -> ConfigurationSpace:
return ConfigurationSpace() |
class TarDataset(Dataset):
def __init__(self, archive, transform=to_tensor, extensions=('.png', '.jpg', '.jpeg'), is_valid_file=None):
if (not isinstance(archive, TarDataset)):
worker = get_worker_info()
worker = (worker.id if worker else None)
self.tar_obj = {worker: tarfile.open(archive)}
self.archive = archive
members = sorted(self.tar_obj[worker].getmembers(), key=(lambda m: m.name))
self.members_by_name = {m.name: m for m in members}
else:
self.members_by_name = archive.members_by_name
self.archive = archive.archive
self.tar_obj = {}
self.filter_samples(is_valid_file, extensions)
self.transform = transform
def filter_samples(self, is_valid_file=None, extensions=('.png', '.jpg', '.jpeg')):
if (is_valid_file is None):
def is_valid_file(m):
return (m.isfile() and m.name.lower().endswith(extensions))
self.samples = [m.name for m in self.members_by_name.values() if is_valid_file(m)]
def __getitem__(self, index):
image = self.get_image(self.samples[index], pil=True)
image = image.convert('RGB')
if self.transform:
image = self.transform(image)
return image
def __len__(self):
return len(self.samples)
def get_image(self, name, pil=False):
image = Image.open(BytesIO(self.get_file(name).read()))
if pil:
return image
return to_tensor(image)
def get_text_file(self, name, encoding='utf-8'):
return self.get_file(name).read().decode(encoding)
def get_file(self, name):
worker = get_worker_info()
worker = (worker.id if worker else None)
if (worker not in self.tar_obj):
self.tar_obj[worker] = tarfile.open(self.archive)
return self.tar_obj[worker].extractfile(self.members_by_name[name])
def __del__(self):
for o in self.tar_obj.values():
o.close()
def __getstate__(self):
state = dict(self.__dict__)
state['tar_obj'] = {}
return state |
def dsrla_mobilenetv2_k6_eca(eca=True):
print('Constructing dsrla_mobilenetv2_k6_eca......')
model = dsRLA_MobileNetV2(rla_channel=6, ECA=eca)
return model |
def test_loader_func(config, batch_size):
(_, test_loader, _) = load_dataset(config['data_dir'], batch_size)
return test_loader |
_model
def resnetv2_101x3_bitm_in21k(pretrained=False, **kwargs):
return _create_resnetv2('resnetv2_101x3_bitm_in21k', pretrained=pretrained, num_classes=kwargs.pop('num_classes', 21843), layers=[3, 4, 23, 3], width_factor=3, stem_type='fixed', **kwargs) |
class TestStat(BasePythonTest):
def test_return_value(self):
la_str = 'a b\n where\n a: scalar\n b: scalar'
func_info = self.gen_func_info(la_str)
self.assertEqual(func_info.numpy_func(3, 2).ret, 6)
if TEST_MATLAB:
mat_func = getattr(mat_engine, func_info.mat_func_name, None)
self.assertEqual(np.array(mat_func(3, 2)['ret']), 6)
cppyy.include(func_info.eig_file_name)
func_list = ['bool {}(){{'.format(func_info.eig_test_name), ' if({}(3, 2).ret == 6){{'.format(func_info.eig_func_name), ' return true;', ' }', ' return false;', '}']
cppyy.cppdef('\n'.join(func_list))
self.assertTrue(getattr(cppyy.gbl, func_info.eig_test_name)())
def test_no_where_block(self):
la_str = 'A = 2 + 3'
func_info = self.gen_func_info(la_str)
self.assertEqual(func_info.numpy_func().A, 5)
if TEST_MATLAB:
mat_func = getattr(mat_engine, func_info.mat_func_name, None)
self.assertEqual(np.array(mat_func()['A']), 5)
cppyy.include(func_info.eig_file_name)
func_list = ['bool {}(){{'.format(func_info.eig_test_name), ' double B = {}().A;'.format(func_info.eig_func_name), ' return (B == 5);', '}']
cppyy.cppdef('\n'.join(func_list))
self.assertTrue(getattr(cppyy.gbl, func_info.eig_test_name)())
def test_interleaved_stat(self):
la_str = 'A: R^(nn)\n a = A_1,1\n n: Z'
func_info = self.gen_func_info(la_str)
A = np.array([[2, 5], [0, 12]])
self.assertEqual(func_info.numpy_func(A, 2).a, 2)
if TEST_MATLAB:
mat_func = getattr(mat_engine, func_info.mat_func_name, None)
self.assertEqual(np.array(mat_func(matlab.double(A.tolist()), 2)['a']), 2)
cppyy.include(func_info.eig_file_name)
func_list = ['bool {}(){{'.format(func_info.eig_test_name), ' Eigen::Matrix<double, 2, 2> A;', ' A << 2, 5, 0, 12;', ' double B = {}(A, 2).a;'.format(func_info.eig_func_name), ' return (B == 2);', '}']
cppyy.cppdef('\n'.join(func_list))
self.assertTrue(getattr(cppyy.gbl, func_info.eig_test_name)()) |
class _MultiHeadAttention(nn.Module):
def __init__(self, d_k, d_v, d_model, n_heads, dropout):
super(_MultiHeadAttention, self).__init__()
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.n_heads = n_heads
self.w_q = Linear([d_model, (d_k * n_heads)])
self.w_k = Linear([d_model, (d_k * n_heads)])
self.w_v = Linear([d_model, (d_v * n_heads)])
self.attention = ScaledDotProductAttention(d_k, dropout)
def forward(self, q, k, v, attn_mask):
b_size = q.size(0)
q_s = self.w_q(q).view(b_size, (- 1), self.n_heads, self.d_k).transpose(1, 2)
k_s = self.w_k(k).view(b_size, (- 1), self.n_heads, self.d_k).transpose(1, 2)
v_s = self.w_v(v).view(b_size, (- 1), self.n_heads, self.d_v).transpose(1, 2)
if attn_mask:
attn_mask = attn_mask.unsqueeze(1).repeat(1, self.n_heads, 1, 1)
(context, attn) = self.attention(q_s, k_s, v_s, attn_mask=attn_mask)
context = context.transpose(1, 2).contiguous().view(b_size, (- 1), (self.n_heads * self.d_v))
return (context, attn) |
def main():
parser = argparse.ArgumentParser(description='PyTorch Object Detection Webcam Demo')
parser.add_argument('--config-file', default='../configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml', metavar='FILE', help='path to config file')
parser.add_argument('--confidence-threshold', type=float, default=0.7, help='Minimum score for the prediction to be shown')
parser.add_argument('--min-image-size', type=int, default=224, help='Smallest size of the image to feed to the model. Model was trained with 800, which gives best results')
parser.add_argument('--show-mask-heatmaps', dest='show_mask_heatmaps', help='Show a heatmap probability for the top masks-per-dim masks', action='store_true')
parser.add_argument('--masks-per-dim', type=int, default=2, help='Number of heatmaps per dimension to show')
parser.add_argument('opts', help='Modify model config options using the command-line', default=None, nargs=argparse.REMAINDER)
args = parser.parse_args()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
coco_demo = COCODemo(cfg, confidence_threshold=args.confidence_threshold, show_mask_heatmaps=args.show_mask_heatmaps, masks_per_dim=args.masks_per_dim, min_image_size=args.min_image_size)
cam = cv2.VideoCapture(0)
while True:
start_time = time.time()
(ret_val, img) = cam.read()
composite = coco_demo.run_on_opencv_image(img)
print('Time: {:.2f} s / img'.format((time.time() - start_time)))
cv2.imshow('COCO detections', composite)
if (cv2.waitKey(1) == 27):
break
cv2.destroyAllWindows() |
def train(train_loader, model, criterion, optimizer, epoch, use_cuda):
model.train()
torch.set_grad_enabled(True)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(train_loader))
for (batch_idx, (inputs, targets)) in enumerate(train_loader):
batch_size = inputs.size(0)
if (batch_size < args.train_batch):
continue
data_time.update((time.time() - end))
if use_cuda:
(inputs, targets) = (inputs.cuda(), targets.cuda())
outputs = model(inputs)
loss = criterion(outputs, targets)
(prec1, prec5) = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.data.item(), inputs.size(0))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(batch=(batch_idx + 1), size=len(train_loader), data=data_time.val, bt=batch_time.val, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg)
bar.next()
bar.finish()
return (losses.avg, top1.avg) |
def Backbone_ResNeXt50_in3():
net = l_resnext50(pretrained=True)
div_2 = nn.Sequential(*list(net.children())[:3])
div_4 = nn.Sequential(*list(net.children())[3:5])
div_8 = net.layer2
div_16 = net.layer3
div_32 = net.layer4
return (div_2, div_4, div_8, div_16, div_32) |
class SVI_Base(nn.Module):
def __init__(self, weight_shape, bias_shape, variational_distribution, prior, use_bias):
super(SVI_Base, self).__init__()
self.data_type = torch.float32
self.weight_rhos = nn.Parameter(torch.empty(weight_shape, dtype=self.data_type))
self.weight_mus = nn.Parameter(torch.empty(weight_shape, dtype=self.data_type))
self.weight = Variable(torch.empty(weight_shape, dtype=self.data_type))
self.use_bias = use_bias
if use_bias:
self.bias_rhos = nn.Parameter(torch.empty(bias_shape, dtype=self.data_type))
self.bias_mus = nn.Parameter(torch.empty(bias_shape, dtype=self.data_type))
self.bias = Variable(torch.empty(bias_shape, dtype=self.data_type))
else:
self.register_parameter('bias_rhos', None)
self.register_parameter('bias_mus', None)
self.register_parameter('bias', None)
assert hasattr(distributions, prior['name']), 'The prior named in config is not defined in utils.distributions'
prior_args = copy.deepcopy(prior)
prior_args['log2pi'] = torch.log(Variable(torch.from_numpy(np.array((2.0 * np.pi))).type(self.data_type), requires_grad=False))
prior_args['device'] = 'cpu'
if torch.cuda.is_available():
prior_args['log2pi'] = prior_args['log2pi'].cuda()
prior_args['device'] = 'cuda'
self.prior_log_pdf = getattr(distributions, prior_args['name'])(**prior_args)
assert hasattr(distributions, variational_distribution), 'The variational distribution is not defined in util.distributions'
self.noise_distribution = getattr(distributions, variational_distribution)
self.pretraining = False
def _rho_to_sigma(self, rho):
return torch.log((1 + torch.exp(rho)))
def entropy(self):
if (not self.pretraining):
entropy = torch.sum(torch.log(self._rho_to_sigma(self.weight_rhos)))
if self.use_bias:
entropy += torch.sum(torch.log(self._rho_to_sigma(self.bias_rhos)))
return entropy
else:
return 0
def cross_entropy(self):
if (not self.pretraining):
weight_log_prior_mean_over_epsilon = torch.mean(self.prior_log_pdf(self.weight), dim=0)
cross_entropy = (- torch.sum(weight_log_prior_mean_over_epsilon))
if self.use_bias:
bias_log_prior_mean_over_epsilon = torch.mean(self.prior_log_pdf(self.bias), dim=0)
cross_entropy -= torch.sum(bias_log_prior_mean_over_epsilon)
return cross_entropy
else:
return 0
def is_pretraining(self, pretraining_on):
self.pretraining = pretraining_on
return 1 |
class MapImage(ImageAugmentor):
def __init__(self, func):
self.func = func
def _augment(self, img, _):
return self.func(img) |
class TestQuantization(unittest.TestCase):
def setUpClass(self):
self.constant_graph = build_fake_model()
self.test_graph = create_test_graph()
build_fake_yaml()
build_fake_yaml2()
def tearDownClass(self):
os.remove('fake_yaml.yaml')
os.remove('fake_yaml2.yaml')
shutil.rmtree('saved', ignore_errors=True)
def test_run_mse_one_trial(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml.yaml')
dataset = quantizer.dataset('dummy', shape=(100, 3, 3, 1), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = self.constant_graph
output_graph = quantizer.fit()
self.assertNotEqual(output_graph, None)
def test_run_mse_max_trials(self):
from neural_compressor.experimental import Quantization, common
quantizer = Quantization('fake_yaml2.yaml')
dataset = quantizer.dataset('dummy', shape=(1, 224, 224, 3), label=True)
quantizer.eval_dataloader = common.DataLoader(dataset)
quantizer.calib_dataloader = common.DataLoader(dataset)
quantizer.model = self.test_graph
output_graph = quantizer.fit()
self.assertNotEqual(output_graph, None) |
def load_embedding_npz(path):
data = np.load(path)
return ([w.decode('utf8') for w in data['words']], data['vals']) |
def find(x, parents):
while (parents[x] != x):
parent = parents[x]
parents[x] = parents[parent]
x = parent
return x |
class IntDescriptor(NumDescriptor):
def contains_value(self, val):
(low, high) = self.range
return (low <= val < high)
def sample(self):
(low, high) = self.range
return random.randint(low, (high - 1)) |
class ChannelGate(nn.Module):
def __init__(self, channels, reduction_ratio=16, num_layers=1):
super(ChannelGate, self).__init__()
mid_channels = (channels // reduction_ratio)
self.pool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.init_fc = DenseBlock(in_features=channels, out_features=mid_channels)
self.main_fcs = nn.Sequential()
for i in range((num_layers - 1)):
self.main_fcs.add_module('fc{}'.format((i + 1)), DenseBlock(in_features=mid_channels, out_features=mid_channels))
self.final_fc = nn.Linear(in_features=mid_channels, out_features=channels)
def forward(self, x):
input = x
x = self.pool(x)
x = x.view(x.size(0), (- 1))
x = self.init_fc(x)
x = self.main_fcs(x)
x = self.final_fc(x)
x = x.unsqueeze(2).unsqueeze(3).expand_as(input)
return x |
def row_logloss(row, model):
y = np.array([row['A'], row['B'], row['N']]).reshape(1, (- 1))
pred = np.array([row[(model + '-A')], row[(model + '-B')], row[(model + '-N')]]).reshape(1, (- 1))
return log_loss(y, pred) |
def get_count_matrix(args, file_path):
global DOC2IDX
doc_ids = {}
doc_metas = {}
nan_cnt = 0
for filename in sorted(os.listdir(file_path)):
print(filename)
with open(os.path.join(file_path, filename), 'r') as f:
articles = json.load(f)['data']
for article in articles:
title = article['title']
kk = 0
while (title in doc_ids):
title += f'_{kk}'
kk += 1
doc_ids[title] = ' '.join([par['context'] for par in article['paragraphs']])
doc_meta = {}
for (key, val) in article.items():
if (key != 'paragraphs'):
doc_meta[key] = (val if (val == val) else 'NaN')
else:
doc_meta[key] = []
for para in val:
para_meta = {}
for (para_key, para_val) in para.items():
if (para_key != 'context'):
para_meta[para_key] = (para_val if (para_val == para_val) else 'NaN')
doc_meta[key].append(para_meta)
if (not pd.isnull(article.get('pubmed_id', np.nan))):
doc_metas[str(article['pubmed_id'])] = doc_meta
else:
nan_cnt += 1
doc_metas[article['title']] = doc_meta
DOC2IDX = {doc_id: i for (i, doc_id) in enumerate(doc_ids)}
print('doc ids:', len(DOC2IDX))
print('doc metas:', len(doc_metas), 'with nan', str(nan_cnt))
tok_class = SimpleTokenizer
workers = ProcessPool(args.num_workers, initializer=init, initargs=(tok_class, doc_ids))
doc_ids = list(doc_ids.keys())
logger.info('Mapping...')
(row, col, data) = ([], [], [])
step = max(int((len(doc_ids) / 10)), 1)
batches = [doc_ids[i:(i + step)] for i in range(0, len(doc_ids), step)]
_count = partial(count, args.ngram, args.hash_size)
for (i, batch) in enumerate(batches):
logger.info(((('-' * 25) + ('Batch %d/%d' % ((i + 1), len(batches)))) + ('-' * 25)))
for (b_row, b_col, b_data) in workers.imap_unordered(_count, batch):
row.extend(b_row)
col.extend(b_col)
data.extend(b_data)
workers.close()
workers.join()
logger.info('Creating sparse matrix...')
count_matrix = sp.csr_matrix((data, (row, col)), shape=(args.hash_size, len(doc_ids)))
count_matrix.sum_duplicates()
return (count_matrix, (DOC2IDX, doc_ids, doc_metas)) |
_function('flip')
class AutogradFlip(AutogradFunction):
def forward(ctx, input, dims):
ctx.save_for_backward(dims)
return input.flip(dims)
def backward(ctx, grad_output):
(dims,) = ctx.saved_tensors
return grad_output.flip(dims) |
def default_init_weights(module, scale=1):
for m in module.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, nn.Linear):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, _BatchNorm):
constant_init(m.weight, val=1, bias=0) |
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--input_model', type=str, required=False, default='ssd-12.onnx')
parser.add_argument('--output_model', type=str, required=True)
return parser.parse_args() |
class GATZinc(nn.Module):
def __init__(self, g, num_layers, in_dim, num_hidden, heads, activation, feat_drop, attn_drop, negative_slope, residual, num_atom_type, num_bond_type):
super(GATZinc, self).__init__()
self.g = g
self.num_layers = num_layers
self.gat_layers = nn.ModuleList()
self.BNs = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.activation = activation
self.num_atom_type = num_atom_type
self.num_bond_type = num_bond_type
self.embed = nn.Embedding(num_atom_type, in_dim)
self.gat_layers.append(GATConv(in_dim, num_hidden, heads[0], feat_drop, attn_drop, negative_slope, False, None))
self.BNs.append(nn.BatchNorm1d((num_hidden * heads[0])))
for l in range(1, num_layers):
self.gat_layers.append(GATConv((num_hidden * heads[(l - 1)]), num_hidden, heads[l], feat_drop, attn_drop, negative_slope, residual, self.activation))
self.BNs.append(nn.BatchNorm1d((num_hidden * heads[l])))
hidden_dim = (num_hidden * heads[(- 2)])
self.regressor1 = nn.Linear(hidden_dim, (hidden_dim // 2))
self.regressor2 = nn.Linear((hidden_dim // 2), 1)
def forward(self, x, e, snorm_n, snorm_e):
h = self.embed(x)
self.att_list = []
for l in range(self.num_layers):
(h, att) = self.gat_layers[l](self.g, h)
h = h.flatten(1)
self.att_list.append(att)
h = (h * snorm_n)
h = self.BNs[l](h)
h = self.activation(h)
self.g.ndata['h'] = h
h = dgl.mean_nodes(self.g, 'h')
h = self.activation(h)
h = self.regressor1(h)
h = torch.relu(h)
logits = self.regressor2(h)
return logits
def get_factor(self):
return self.att_list |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, previous_dilation=1):
super().__init__()
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = norm_layer(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, stride, dilation, dilation, bias=False)
self.bn2 = norm_layer(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), 1, bias=False)
self.bn3 = norm_layer((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.dilation = dilation
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class TokenizerTesterMixin():
tokenizer_class = None
rust_tokenizer_class = None
test_rust_tokenizer = False
space_between_special_tokens = False
from_pretrained_kwargs = None
from_pretrained_filter = None
from_pretrained_vocab_key = 'vocab_file'
def setUp(self) -> None:
if self.test_rust_tokenizer:
tokenizers_list = [(self.rust_tokenizer_class, pretrained_name, (self.from_pretrained_kwargs if (self.from_pretrained_kwargs is not None) else {})) for pretrained_name in self.rust_tokenizer_class.pretrained_vocab_files_map[self.from_pretrained_vocab_key].keys() if ((self.from_pretrained_filter is None) or ((self.from_pretrained_filter is not None) and self.from_pretrained_filter(pretrained_name)))]
self.tokenizers_list = tokenizers_list[:1]
else:
self.tokenizers_list = []
with open(f'{get_tests_dir()}/fixtures/sample_text.txt', encoding='utf-8') as f_data:
self._data = f_data.read().replace('\n\n', '\n').strip()
self.tmpdirname = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def get_input_output_texts(self, tokenizer):
input_txt = self.get_clean_sequence(tokenizer)[0]
return (input_txt, input_txt)
def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[(str, list)]:
toks = [(i, tokenizer.decode([i], clean_up_tokenization_spaces=False)) for i in range(len(tokenizer))]
toks = list(filter((lambda t: re.match('^[ a-zA-Z]+$', t[1])), toks))
toks = list(filter((lambda t: ([t[0]] == tokenizer.encode(t[1], add_special_tokens=False))), toks))
if ((max_length is not None) and (len(toks) > max_length)):
toks = toks[:max_length]
if ((min_length is not None) and (len(toks) < min_length) and (len(toks) > 0)):
while (len(toks) < min_length):
toks = (toks + toks)
toks_ids = [t[0] for t in toks]
output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False)
if ((' ' not in output_txt) and (len(toks_ids) > 1)):
output_txt = ((tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + ' ') + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False))
if with_prefix_space:
output_txt = (' ' + output_txt)
output_ids = tokenizer.encode(output_txt, add_special_tokens=False)
return (output_txt, output_ids)
def get_tokenizers(self, fast=True, **kwargs) -> List[PreTrainedTokenizerBase]:
if (fast and self.test_rust_tokenizer):
return [self.get_tokenizer(**kwargs), self.get_rust_tokenizer(**kwargs)]
return [self.get_tokenizer(**kwargs)]
def get_tokenizer(self, **kwargs) -> PreTrainedTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def get_rust_tokenizer(self, **kwargs) -> PreTrainedTokenizerFast:
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs)
def convert_batch_encode_plus_format_to_encode_plus(batch_encode_plus_sequences):
return [{value: batch_encode_plus_sequences[value][i] for value in batch_encode_plus_sequences.keys()} for i in range(len(batch_encode_plus_sequences['input_ids']))]
def test_rust_tokenizer_signature(self):
if (not self.test_rust_tokenizer):
return
signature = inspect.signature(self.rust_tokenizer_class.__init__)
self.assertIn('tokenizer_file', signature.parameters)
self.assertIsNone(signature.parameters['tokenizer_file'].default)
def test_tokenizer_slow_store_full_signature(self):
signature = inspect.signature(self.tokenizer_class.__init__)
tokenizer = self.get_tokenizer()
for (parameter_name, parameter) in signature.parameters.items():
if (parameter.default != inspect.Parameter.empty):
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_tokenizer_fast_store_full_signature(self):
if (not self.test_rust_tokenizer):
return
signature = inspect.signature(self.rust_tokenizer_class.__init__)
tokenizer = self.get_rust_tokenizer()
for (parameter_name, parameter) in signature.parameters.items():
if (parameter.default != inspect.Parameter.empty):
self.assertIn(parameter_name, tokenizer.init_kwargs)
def test_rust_and_python_full_tokenizers(self):
if (not self.test_rust_tokenizer):
return
tokenizer = self.get_tokenizer()
rust_tokenizer = self.get_rust_tokenizer()
(sequence, _) = self.get_input_output_texts(tokenizer)
ids = tokenizer.encode(sequence, add_special_tokens=False)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False)
self.assertListEqual(ids, rust_ids)
ids = tokenizer.encode(sequence, add_special_tokens=True)
rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=True)
self.assertListEqual(ids, rust_ids)
def test_tokenizers_common_properties(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
attributes_list = ['bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token']
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
self.assertTrue(hasattr(tokenizer, (attr + '_id')))
self.assertTrue(hasattr(tokenizer, 'additional_special_tokens'))
self.assertTrue(hasattr(tokenizer, 'additional_special_tokens_ids'))
attributes_list = ['model_max_length', 'init_inputs', 'init_kwargs']
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
attributes_list += ['added_tokens_encoder', 'added_tokens_decoder']
for attr in attributes_list:
self.assertTrue(hasattr(tokenizer, attr))
def test_save_and_load_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertNotEqual(tokenizer.model_max_length, 42)
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
shutil.rmtree(tmpdirname)
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
tokenizer.add_tokens(['bim', 'bambam'])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn('bim', after_vocab)
self.assertIn('bambam', after_vocab)
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
tokenizers = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
if (not tokenizer.is_fast):
continue
with self.subTest(f'{tokenizer.__class__.__name__}'):
tmpdirname = tempfile.mkdtemp()
sample_text = ' He is very happy, UNwanted,running'
tokenizer.add_tokens(['bim', 'bambam'])
additional_special_tokens = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token')
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens})
before_tokens = tokenizer.encode(sample_text, add_special_tokens=False)
before_vocab = tokenizer.get_vocab()
tokenizer.save_pretrained(tmpdirname)
after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname)
after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False)
after_vocab = after_tokenizer.get_vocab()
self.assertListEqual(before_tokens, after_tokens)
self.assertDictEqual(before_vocab, after_vocab)
self.assertIn('bim', after_vocab)
self.assertIn('bambam', after_vocab)
self.assertIn('new_additional_special_token', after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length, 42)
tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43)
self.assertEqual(tokenizer.model_max_length, 43)
shutil.rmtree(tmpdirname)
def test_pickle_tokenizer(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
self.assertIsNotNone(tokenizer)
text = 'Munich and Berlin are nice cities'
subwords = tokenizer.tokenize(text)
filename = os.path.join(self.tmpdirname, 'tokenizer.bin')
with open(filename, 'wb') as handle:
pickle.dump(tokenizer, handle)
with open(filename, 'rb') as handle:
tokenizer_new = pickle.load(handle)
subwords_loaded = tokenizer_new.tokenize(text)
self.assertListEqual(subwords, subwords_loaded)
_tokenizers
def test_pickle_added_tokens(self):
tok1 = AddedToken('<s>', rstrip=True, lstrip=True, normalized=False, single_word=True)
tok2 = pickle.loads(pickle.dumps(tok1))
self.assertEqual(tok1.__getstate__(), tok2.__getstate__())
def test_added_tokens_do_lower_case(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=True)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if ((not hasattr(tokenizer, 'do_lower_case')) or (not tokenizer.do_lower_case)):
continue
special_token = tokenizer.all_special_tokens[0]
text = ((special_token + ' aaaaa bbbbbb low cccccccccdddddddd l ') + special_token)
text2 = ((special_token + ' AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l ') + special_token)
toks0 = tokenizer.tokenize(text)
new_toks = ['aaaaa bbbbbb', 'cccccccccdddddddd', 'AAAAA BBBBBB', 'CCCCCCCCCDDDDDDDD']
added = tokenizer.add_tokens(new_toks)
self.assertEqual(added, 2)
toks = tokenizer.tokenize(text)
toks2 = tokenizer.tokenize(text2)
self.assertEqual(len(toks), len(toks2))
self.assertListEqual(toks, toks2)
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
self.assertNotEqual(len(toks), len(toks0))
sequence_with_special_tokens = (('A ' + ' yEs '.join(tokenizer.all_special_tokens)) + ' B')
tokenized_sequence = tokenizer.tokenize(sequence_with_special_tokens)
for special_token in tokenizer.all_special_tokens:
self.assertTrue((special_token in tokenized_sequence))
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (hasattr(tokenizer, 'do_lower_case') and tokenizer.do_lower_case):
continue
special_token = tokenizer.all_special_tokens[0]
text = ((special_token + ' aaaaa bbbbbb low cccccccccdddddddd l ') + special_token)
text2 = ((special_token + ' AAAAA BBBBBB low CCCCCCCCCDDDDDDDD l ') + special_token)
new_toks = ['aaaaa bbbbbb', 'cccccccccdddddddd', 'AAAAA BBBBBB', 'CCCCCCCCCDDDDDDDD']
toks0 = tokenizer.tokenize(text)
added = tokenizer.add_tokens(new_toks)
self.assertIn(added, [2, 4])
toks = tokenizer.tokenize(text)
toks2 = tokenizer.tokenize(text2)
self.assertEqual(len(toks), len(toks2))
self.assertNotEqual(toks[1], toks2[1])
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
self.assertNotEqual(len(toks), len(toks0))
def test_add_tokens_tokenizer(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
vocab_size = tokenizer.vocab_size
all_size = len(tokenizer)
self.assertNotEqual(vocab_size, 0)
new_toks = ['aaaaa bbbbbb', 'cccccccccdddddddd']
added_toks = tokenizer.add_tokens(new_toks)
vocab_size_2 = tokenizer.vocab_size
all_size_2 = len(tokenizer)
self.assertNotEqual(vocab_size_2, 0)
self.assertEqual(vocab_size, vocab_size_2)
self.assertEqual(added_toks, len(new_toks))
self.assertEqual(all_size_2, (all_size + len(new_toks)))
tokens = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l', add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 4)
self.assertGreater(tokens[0], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[(- 2)], (tokenizer.vocab_size - 1))
new_toks_2 = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
added_toks_2 = tokenizer.add_special_tokens(new_toks_2)
vocab_size_3 = tokenizer.vocab_size
all_size_3 = len(tokenizer)
self.assertNotEqual(vocab_size_3, 0)
self.assertEqual(vocab_size, vocab_size_3)
self.assertEqual(added_toks_2, len(new_toks_2))
self.assertEqual(all_size_3, (all_size_2 + len(new_toks_2)))
tokens = tokenizer.encode('>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l', add_special_tokens=False)
self.assertGreaterEqual(len(tokens), 6)
self.assertGreater(tokens[0], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[0], tokens[1])
self.assertGreater(tokens[(- 2)], (tokenizer.vocab_size - 1))
self.assertGreater(tokens[(- 2)], tokens[(- 3)])
self.assertEqual(tokens[0], tokenizer.eos_token_id)
self.assertEqual(tokens[(- 2)], tokenizer.pad_token_id)
def test_add_special_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(input_text, ids) = self.get_clean_sequence(tokenizer)
special_token = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token})
encoded_special_token = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(len(encoded_special_token), 1)
text = tokenizer.decode((ids + encoded_special_token), clean_up_tokenization_spaces=False)
encoded = tokenizer.encode(text, add_special_tokens=False)
input_encoded = tokenizer.encode(input_text, add_special_tokens=False)
special_token_id = tokenizer.encode(special_token, add_special_tokens=False)
self.assertEqual(encoded, (input_encoded + special_token_id))
decoded = tokenizer.decode(encoded, skip_special_tokens=True)
self.assertTrue((special_token not in decoded))
def test_internal_consistency(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(input_text, output_text) = self.get_input_output_texts(tokenizer)
tokens = tokenizer.tokenize(input_text)
ids = tokenizer.convert_tokens_to_ids(tokens)
ids_2 = tokenizer.encode(input_text, add_special_tokens=False)
self.assertListEqual(ids, ids_2)
tokens_2 = tokenizer.convert_ids_to_tokens(ids)
self.assertNotEqual(len(tokens_2), 0)
text_2 = tokenizer.decode(ids)
self.assertIsInstance(text_2, str)
self.assertEqual(text_2, output_text)
_tokenizers
def test_encode_decode_with_spaces(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
new_toks = [AddedToken('[ABC]', normalized=False), AddedToken('[DEF]', normalized=False)]
tokenizer.add_tokens(new_toks)
input = '[ABC][DEF][ABC][DEF]'
if self.space_between_special_tokens:
output = '[ABC] [DEF] [ABC] [DEF]'
else:
output = input
encoded = tokenizer.encode(input, add_special_tokens=False)
decoded = tokenizer.decode(encoded, spaces_between_special_tokens=self.space_between_special_tokens)
self.assertIn(decoded, [output, output.lower()])
def test_pretrained_model_lists(self):
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1)
self.assertEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), len(self.tokenizer_class.max_model_input_sizes))
weights_list = list(self.tokenizer_class.max_model_input_sizes.keys())
weights_lists_2 = []
for (file_id, map_list) in self.tokenizer_class.pretrained_vocab_files_map.items():
weights_lists_2.append(list(map_list.keys()))
for weights_list_2 in weights_lists_2:
self.assertListEqual(weights_list, weights_list_2)
def test_mask_output(self):
tokenizers = self.get_tokenizers(fast=False, do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if ((tokenizer.build_inputs_with_special_tokens.__qualname__.split('.')[0] != 'PreTrainedTokenizer') and ('token_type_ids' in tokenizer.model_input_names)):
seq_0 = 'Test this method.'
seq_1 = 'With these inputs.'
information = tokenizer.encode_plus(seq_0, seq_1, add_special_tokens=True)
(sequences, mask) = (information['input_ids'], information['token_type_ids'])
self.assertEqual(len(sequences), len(mask))
def test_token_type_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
seq_0 = 'Test this method.'
output = tokenizer(seq_0, return_token_type_ids=True)
self.assertIn(0, output['token_type_ids'])
def test_sequence_ids(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
if (not tokenizer.is_fast):
continue
with self.subTest(f'{tokenizer.__class__.__name__}'):
seq_0 = 'Test this method.'
seq_1 = 'With these inputs.'
output = tokenizer(seq_0)
self.assertIn(0, output.sequence_ids())
output = tokenizer(seq_0, seq_1)
self.assertIn(0, output.sequence_ids())
self.assertIn(1, output.sequence_ids())
if tokenizer.num_special_tokens_to_add(pair=True):
self.assertIn(None, output.sequence_ids())
def test_number_of_added_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
seq_0 = 'Test this method.'
seq_1 = 'With these inputs.'
sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=False)
attached_sequences = tokenizer.encode(seq_0, seq_1, add_special_tokens=True)
if (len(attached_sequences) != 2):
self.assertEqual(tokenizer.num_special_tokens_to_add(pair=True), (len(attached_sequences) - len(sequences)))
def test_maximum_encoding_length_single_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
(seq_0, ids) = self.get_clean_sequence(tokenizer, max_length=20)
sequence = tokenizer.encode(seq_0, add_special_tokens=False)
total_length = len(sequence)
assert (total_length > 4), "Issue with the testing sequence, please update it it's too short"
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_1 = (seq_0 * model_max_length)
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1['input_ids'])
assert (total_length1 > model_max_length), "Issue with the testing sequence, please update it it's too short"
padding_strategies = ([False, True, 'longest'] if (tokenizer.pad_token and (tokenizer.pad_token_id >= 0)) else [False])
for padding_state in padding_strategies:
with self.subTest(f'Padding: {padding_state}'):
for truncation_state in [True, 'longest_first', 'only_first']:
with self.subTest(f'Truncation: {truncation_state}'):
output = tokenizer(seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids']), model_max_length)
output = tokenizer([seq_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids'][0]), model_max_length)
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer(seq_1, padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer([seq_1], padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
stride = 2
information = tokenizer(seq_0, max_length=(total_length - 2), add_special_tokens=False, stride=stride, truncation='longest_first', return_overflowing_tokens=True)
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (total_length - 2))
self.assertEqual(truncated_sequence, sequence[:(- 2)])
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, sequence[(- (2 + stride)):])
else:
truncated_sequence = information['input_ids']
overflowing_tokens = information['overflowing_tokens']
self.assertEqual(len(truncated_sequence), (total_length - 2))
self.assertEqual(truncated_sequence, sequence[:(- 2)])
self.assertEqual(len(overflowing_tokens), (2 + stride))
def test_maximum_encoding_length_pair_input(self):
tokenizers = self.get_tokenizers(do_lower_case=False, model_max_length=100)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
stride = 2
(seq_0, ids) = self.get_clean_sequence(tokenizer, max_length=20)
if (len(ids) <= (2 + stride)):
seq_0 = ((seq_0 + ' ') * (2 + stride))
ids = None
seq0_tokens = tokenizer.encode(seq_0, add_special_tokens=False)
assert (len(seq0_tokens) > (2 + stride))
seq_1 = 'This is another sentence to be encoded.'
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
if (abs((len(seq0_tokens) - len(seq1_tokens))) <= 2):
seq1_tokens = (seq1_tokens + seq1_tokens)
seq_1 = tokenizer.decode(seq1_tokens, clean_up_tokenization_spaces=False)
seq1_tokens = tokenizer.encode(seq_1, add_special_tokens=False)
assert (len(seq1_tokens) > (2 + stride))
smallest = (seq1_tokens if (len(seq0_tokens) > len(seq1_tokens)) else seq0_tokens)
sequence = tokenizer.encode(seq_0, seq_1, add_special_tokens=False)
model_max_length = tokenizer.model_max_length
self.assertEqual(model_max_length, 100)
seq_2 = (seq_0 * model_max_length)
assert (len(seq_2) > model_max_length)
sequence1 = tokenizer(seq_1, add_special_tokens=False)
total_length1 = len(sequence1['input_ids'])
sequence2 = tokenizer(seq_2, seq_1, add_special_tokens=False)
total_length2 = len(sequence2['input_ids'])
assert (total_length1 < (model_max_length - 10)), 'Issue with the testing sequence, please update it.'
assert (total_length2 > model_max_length), 'Issue with the testing sequence, please update it.'
padding_strategies = ([False, True, 'longest'] if (tokenizer.pad_token and (tokenizer.pad_token_id >= 0)) else [False])
for padding_state in padding_strategies:
with self.subTest(f'{tokenizer.__class__.__name__} Padding: {padding_state}'):
for truncation_state in [True, 'longest_first', 'only_first']:
with self.subTest(f'{tokenizer.__class__.__name__} Truncation: {truncation_state}'):
output = tokenizer(seq_2, seq_1, padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids']), model_max_length)
output = tokenizer([seq_2], [seq_1], padding=padding_state, truncation=truncation_state)
self.assertEqual(len(output['input_ids'][0]), model_max_length)
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation='only_second')
self.assertEqual(len(output['input_ids']), model_max_length)
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation='only_second')
self.assertEqual(len(output['input_ids'][0]), model_max_length)
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer(seq_1, seq_2, padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids']), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
tokenizer.deprecation_warnings = {}
with self.assertLogs('transformers', level='WARNING') as cm:
output = tokenizer([seq_1], [seq_2], padding=padding_state, truncation=False)
self.assertNotEqual(len(output['input_ids'][0]), model_max_length)
self.assertEqual(len(cm.records), 1)
self.assertTrue(cm.records[0].message.startswith('Token indices sequence length is longer than the specified maximum sequence length for this model'))
truncated_first_sequence = (tokenizer.encode(seq_0, add_special_tokens=False)[:(- 2)] + tokenizer.encode(seq_1, add_special_tokens=False))
truncated_second_sequence = (tokenizer.encode(seq_0, add_special_tokens=False) + tokenizer.encode(seq_1, add_special_tokens=False)[:(- 2)])
truncated_longest_sequence = (truncated_first_sequence if (len(seq0_tokens) > len(seq1_tokens)) else truncated_second_sequence)
overflow_first_sequence = (tokenizer.encode(seq_0, add_special_tokens=False)[(- (2 + stride)):] + tokenizer.encode(seq_1, add_special_tokens=False))
overflow_second_sequence = (tokenizer.encode(seq_0, add_special_tokens=False) + tokenizer.encode(seq_1, add_special_tokens=False)[(- (2 + stride)):])
overflow_longest_sequence = (overflow_first_sequence if (len(seq0_tokens) > len(seq1_tokens)) else overflow_second_sequence)
information = tokenizer.encode_plus(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation='longest_first', return_overflowing_tokens=True)
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
truncated_sequence = information['input_ids']
overflowing_tokens = information['overflowing_tokens']
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
information = tokenizer.encode_plus(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation=True, return_overflowing_tokens=True)
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information['input_ids'][0]
overflowing_tokens = information['input_ids'][1]
self.assertEqual(len(information['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(smallest)))
self.assertEqual(overflowing_tokens, overflow_longest_sequence)
else:
truncated_sequence = information['input_ids']
overflowing_tokens = information['overflowing_tokens']
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_longest_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
information_first_truncated = tokenizer.encode_plus(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation='only_first', return_overflowing_tokens=True)
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information_first_truncated['input_ids'][0]
overflowing_tokens = information_first_truncated['input_ids'][1]
self.assertEqual(len(information_first_truncated['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(seq1_tokens)))
self.assertEqual(overflowing_tokens, overflow_first_sequence)
else:
truncated_sequence = information_first_truncated['input_ids']
overflowing_tokens = information_first_truncated['overflowing_tokens']
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_first_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, seq0_tokens[(- (2 + stride)):])
information_second_truncated = tokenizer.encode_plus(seq_0, seq_1, max_length=(len(sequence) - 2), add_special_tokens=False, stride=stride, truncation='only_second', return_overflowing_tokens=True)
if isinstance(tokenizer, PreTrainedTokenizerFast):
truncated_sequence = information_second_truncated['input_ids'][0]
overflowing_tokens = information_second_truncated['input_ids'][1]
self.assertEqual(len(information_second_truncated['input_ids']), 2)
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), ((2 + stride) + len(seq0_tokens)))
self.assertEqual(overflowing_tokens, overflow_second_sequence)
else:
truncated_sequence = information_second_truncated['input_ids']
overflowing_tokens = information_second_truncated['overflowing_tokens']
self.assertEqual(len(truncated_sequence), (len(sequence) - 2))
self.assertEqual(truncated_sequence, truncated_second_sequence)
self.assertEqual(len(overflowing_tokens), (2 + stride))
self.assertEqual(overflowing_tokens, seq1_tokens[(- (2 + stride)):])
def test_special_tokens_mask(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence_0 = 'Encode this.'
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(sequence_0, add_special_tokens=True, return_special_tokens_mask=True)
encoded_sequence_w_special = encoded_sequence_dict['input_ids']
special_tokens_mask = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [x for (i, x) in enumerate(encoded_sequence_w_special) if (not special_tokens_mask[i])]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_special_tokens_mask_input_pairs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence_0 = 'Encode this.'
sequence_1 = 'This one too please.'
encoded_sequence = tokenizer.encode(sequence_0, add_special_tokens=False)
encoded_sequence += tokenizer.encode(sequence_1, add_special_tokens=False)
encoded_sequence_dict = tokenizer.encode_plus(sequence_0, sequence_1, add_special_tokens=True, return_special_tokens_mask=True)
encoded_sequence_w_special = encoded_sequence_dict['input_ids']
special_tokens_mask = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(special_tokens_mask), len(encoded_sequence_w_special))
filtered_sequence = [(x if (not special_tokens_mask[i]) else None) for (i, x) in enumerate(encoded_sequence_w_special)]
filtered_sequence = [x for x in filtered_sequence if (x is not None)]
self.assertEqual(encoded_sequence, filtered_sequence)
def test_right_and_left_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence = 'Sequence'
padding_size = 10
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = 'right'
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(sequence, max_length=(sequence_length + padding_size), padding='max_length')
padded_sequence_length = len(padded_sequence)
assert ((sequence_length + padding_size) == padded_sequence_length)
assert ((encoded_sequence + ([padding_idx] * padding_size)) == padded_sequence)
tokenizer.padding_side = 'left'
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(sequence, max_length=(sequence_length + padding_size), padding='max_length')
padded_sequence_length = len(padded_sequence)
assert ((sequence_length + padding_size) == padded_sequence_length)
assert ((([padding_idx] * padding_size) + encoded_sequence) == padded_sequence)
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(sequence, padding=True)
padded_sequence_right_length = len(padded_sequence_right)
assert (sequence_length == padded_sequence_right_length)
assert (encoded_sequence == padded_sequence_right)
tokenizer.padding_side = 'left'
padded_sequence_left = tokenizer.encode(sequence, padding='longest')
padded_sequence_left_length = len(padded_sequence_left)
assert (sequence_length == padded_sequence_left_length)
assert (encoded_sequence == padded_sequence_left)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(sequence)
padded_sequence_right_length = len(padded_sequence_right)
assert (sequence_length == padded_sequence_right_length)
assert (encoded_sequence == padded_sequence_right)
tokenizer.padding_side = 'left'
padded_sequence_left = tokenizer.encode(sequence, padding=False)
padded_sequence_left_length = len(padded_sequence_left)
assert (sequence_length == padded_sequence_left_length)
assert (encoded_sequence == padded_sequence_left)
def test_padding_to_max_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence = 'Sequence'
padding_size = 10
self._check_no_pad_token_padding(tokenizer, sequence)
padding_idx = tokenizer.pad_token_id
tokenizer.padding_side = 'right'
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
padded_sequence = tokenizer.encode(sequence, max_length=(sequence_length + padding_size), pad_to_max_length=True)
padded_sequence_length = len(padded_sequence)
assert ((sequence_length + padding_size) == padded_sequence_length)
assert ((encoded_sequence + ([padding_idx] * padding_size)) == padded_sequence)
encoded_sequence = tokenizer.encode(sequence)
sequence_length = len(encoded_sequence)
tokenizer.padding_side = 'right'
padded_sequence_right = tokenizer.encode(sequence, pad_to_max_length=True)
padded_sequence_right_length = len(padded_sequence_right)
assert (sequence_length == padded_sequence_right_length)
assert (encoded_sequence == padded_sequence_right)
def test_padding_to_multiple_of(self):
tokenizers = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.pad_token is None):
self.skipTest('No padding token.')
else:
empty_tokens = tokenizer('', padding=True, pad_to_multiple_of=8)
normal_tokens = tokenizer('This is a sample input', padding=True, pad_to_multiple_of=8)
for (key, value) in empty_tokens.items():
self.assertEqual((len(value) % 8), 0, 'BatchEncoding.{} is not multiple of 8'.format(key))
for (key, value) in normal_tokens.items():
self.assertEqual((len(value) % 8), 0, 'BatchEncoding.{} is not multiple of 8'.format(key))
normal_tokens = tokenizer('This', pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertNotEqual((len(value) % 8), 0, 'BatchEncoding.{} is not multiple of 8'.format(key))
normal_tokens = tokenizer('This', padding=True, truncation=True, pad_to_multiple_of=8)
for (key, value) in normal_tokens.items():
self.assertEqual((len(value) % 8), 0, 'BatchEncoding.{} is not multiple of 8'.format(key))
self.assertRaises(ValueError, tokenizer.__call__, 'This', padding=True, truncation=True, max_length=12, pad_to_multiple_of=8)
def test_encode_plus_with_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequence = 'Sequence'
self._check_no_pad_token_padding(tokenizer, sequence)
padding_size = 10
padding_idx = tokenizer.pad_token_id
token_type_padding_idx = tokenizer.pad_token_type_id
encoded_sequence = tokenizer.encode_plus(sequence, return_special_tokens_mask=True)
input_ids = encoded_sequence['input_ids']
special_tokens_mask = encoded_sequence['special_tokens_mask']
sequence_length = len(input_ids)
tokenizer.padding_side = 'right'
not_padded_sequence = tokenizer.encode_plus(sequence, padding=True, return_special_tokens_mask=True)
not_padded_input_ids = not_padded_sequence['input_ids']
not_padded_special_tokens_mask = not_padded_sequence['special_tokens_mask']
not_padded_sequence_length = len(not_padded_input_ids)
assert (sequence_length == not_padded_sequence_length)
assert (input_ids == not_padded_input_ids)
assert (special_tokens_mask == not_padded_special_tokens_mask)
not_padded_sequence = tokenizer.encode_plus(sequence, padding=False, return_special_tokens_mask=True)
not_padded_input_ids = not_padded_sequence['input_ids']
not_padded_special_tokens_mask = not_padded_sequence['special_tokens_mask']
not_padded_sequence_length = len(not_padded_input_ids)
assert (sequence_length == not_padded_sequence_length)
assert (input_ids == not_padded_input_ids)
assert (special_tokens_mask == not_padded_special_tokens_mask)
tokenizer.padding_side = 'right'
right_padded_sequence = tokenizer.encode_plus(sequence, max_length=(sequence_length + padding_size), padding='max_length', return_special_tokens_mask=True)
right_padded_input_ids = right_padded_sequence['input_ids']
right_padded_special_tokens_mask = right_padded_sequence['special_tokens_mask']
right_padded_sequence_length = len(right_padded_input_ids)
assert ((sequence_length + padding_size) == right_padded_sequence_length)
assert ((input_ids + ([padding_idx] * padding_size)) == right_padded_input_ids)
assert ((special_tokens_mask + ([1] * padding_size)) == right_padded_special_tokens_mask)
tokenizer.padding_side = 'left'
left_padded_sequence = tokenizer.encode_plus(sequence, max_length=(sequence_length + padding_size), padding='max_length', return_special_tokens_mask=True)
left_padded_input_ids = left_padded_sequence['input_ids']
left_padded_special_tokens_mask = left_padded_sequence['special_tokens_mask']
left_padded_sequence_length = len(left_padded_input_ids)
assert ((sequence_length + padding_size) == left_padded_sequence_length)
assert ((([padding_idx] * padding_size) + input_ids) == left_padded_input_ids)
assert ((([1] * padding_size) + special_tokens_mask) == left_padded_special_tokens_mask)
if ('token_type_ids' in tokenizer.model_input_names):
token_type_ids = encoded_sequence['token_type_ids']
left_padded_token_type_ids = left_padded_sequence['token_type_ids']
right_padded_token_type_ids = right_padded_sequence['token_type_ids']
assert ((token_type_ids + ([token_type_padding_idx] * padding_size)) == right_padded_token_type_ids)
assert ((([token_type_padding_idx] * padding_size) + token_type_ids) == left_padded_token_type_ids)
if ('attention_mask' in tokenizer.model_input_names):
attention_mask = encoded_sequence['attention_mask']
right_padded_attention_mask = right_padded_sequence['attention_mask']
left_padded_attention_mask = left_padded_sequence['attention_mask']
assert ((attention_mask + ([0] * padding_size)) == right_padded_attention_mask)
assert ((([0] * padding_size) + attention_mask) == left_padded_attention_mask)
def test_separate_tokenizers(self):
tokenizer = self.get_tokenizer(random_argument=True)
assert (tokenizer.init_kwargs['random_argument'] is True)
new_tokenizer = self.get_tokenizer(random_argument=False)
assert (tokenizer.init_kwargs['random_argument'] is True)
assert (new_tokenizer.init_kwargs['random_argument'] is False)
def test_get_vocab(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
vocab_dict = tokenizer.get_vocab()
self.assertIsInstance(vocab_dict, dict)
self.assertGreaterEqual(len(tokenizer), len(vocab_dict))
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
tokenizer.add_tokens(['asdfasdfasdfasdf'])
vocab = [tokenizer.convert_ids_to_tokens(i) for i in range(len(tokenizer))]
self.assertEqual(len(vocab), len(tokenizer))
def test_conversion_reversible(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
vocab = tokenizer.get_vocab()
for (word, ind) in vocab.items():
if (word == tokenizer.unk_token):
continue
self.assertEqual(tokenizer.convert_tokens_to_ids(word), ind)
self.assertEqual(tokenizer.convert_ids_to_tokens(ind), word)
def test_call(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
encoded_sequences_1 = tokenizer.encode_plus(sequences[0])
encoded_sequences_2 = tokenizer(sequences[0])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
encoded_sequences_1 = tokenizer.encode_plus(sequences[0], sequences[1])
encoded_sequences_2 = tokenizer(sequences[0], sequences[1])
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
encoded_sequences_1 = tokenizer.batch_encode_plus(sequences)
encoded_sequences_2 = tokenizer(sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
encoded_sequences_1 = tokenizer.batch_encode_plus(list(zip(sequences, sequences)))
encoded_sequences_2 = tokenizer(sequences, sequences)
self.assertEqual(encoded_sequences_1, encoded_sequences_2)
def test_batch_encode_plus_batch_sequence_length(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
encoded_sequences = [tokenizer.encode_plus(sequence) for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, padding=False)
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
maximum_length = len(max([encoded_sequence['input_ids'] for encoded_sequence in encoded_sequences], key=len))
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences_padded = [tokenizer.encode_plus(sequence, max_length=maximum_length, padding='max_length') for sequence in sequences]
encoded_sequences_batch_padded = tokenizer.batch_encode_plus(sequences, padding=True)
self.assertListEqual(encoded_sequences_padded, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch_padded))
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=True)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(sequences, max_length=(maximum_length + 10), padding='longest')
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key])
encoded_sequences_batch_padded_1 = tokenizer.batch_encode_plus(sequences, padding=False)
encoded_sequences_batch_padded_2 = tokenizer.batch_encode_plus(sequences, max_length=(maximum_length + 10), padding=False)
for key in encoded_sequences_batch_padded_1.keys():
self.assertListEqual(encoded_sequences_batch_padded_1[key], encoded_sequences_batch_padded_2[key])
_tokenizers
def test_added_token_serializable(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
new_token = AddedToken('new_token', lstrip=True)
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]})
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(tmp_dir_name)
tokenizer.from_pretrained(tmp_dir_name)
def test_batch_encode_plus_padding(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
max_length = 100
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [tokenizer.encode_plus(sequence, max_length=max_length, padding='max_length') for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, max_length=max_length, padding='max_length')
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
tokenizer.padding_side = 'left'
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
max_length = 100
self._check_no_pad_token_padding(tokenizer, sequences)
encoded_sequences = [tokenizer.encode_plus(sequence, max_length=max_length, padding='max_length') for sequence in sequences]
encoded_sequences_batch = tokenizer.batch_encode_plus(sequences, max_length=max_length, padding='max_length')
self.assertListEqual(encoded_sequences, self.convert_batch_encode_plus_format_to_encode_plus(encoded_sequences_batch))
def test_pretokenized_inputs(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (hasattr(tokenizer, 'add_prefix_space') and (not tokenizer.add_prefix_space)):
continue
(sequence, ids) = self.get_clean_sequence(tokenizer, with_prefix_space=True, max_length=20)
token_sequence = sequence.split()
output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode(sequence, add_special_tokens=False)
self.assertEqual(output, output_sequence)
output = tokenizer.encode(token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode(sequence, add_special_tokens=True)
self.assertEqual(output, output_sequence)
output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode_plus(token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode_plus(sequence, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
sequence_batch = (([sequence.strip()] * 2) + [((sequence.strip() + ' ') + sequence.strip())])
token_sequence_batch = [s.split() for s in sequence_batch]
sequence_batch_cleaned_up_spaces = [(' ' + ' '.join(s)) for s in token_sequence_batch]
output = tokenizer.batch_encode_plus(token_sequence_batch, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.batch_encode_plus(sequence_batch_cleaned_up_spaces, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.batch_encode_plus(token_sequence_batch, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.batch_encode_plus(sequence_batch_cleaned_up_spaces, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=False)
self.assertEqual(output, output_sequence)
output = tokenizer.encode(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode(sequence, sequence, add_special_tokens=True)
self.assertEqual(output, output_sequence)
output = tokenizer.encode_plus(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.encode_plus(token_sequence, token_sequence, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.encode_plus(sequence, sequence, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
sequence_pair_batch = (([(sequence.strip(), sequence.strip())] * 2) + [(((sequence.strip() + ' ') + sequence.strip()), sequence.strip())])
token_sequence_pair_batch = [tuple((s.split() for s in pair)) for pair in sequence_pair_batch]
sequence_pair_batch_cleaned_up_spaces = [tuple(((' ' + ' '.join(s)) for s in pair)) for pair in token_sequence_pair_batch]
output = tokenizer.batch_encode_plus(token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=False)
output_sequence = tokenizer.batch_encode_plus(sequence_pair_batch_cleaned_up_spaces, add_special_tokens=False)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
output = tokenizer.batch_encode_plus(token_sequence_pair_batch, is_split_into_words=True, add_special_tokens=True)
output_sequence = tokenizer.batch_encode_plus(sequence_pair_batch_cleaned_up_spaces, add_special_tokens=True)
for key in output.keys():
self.assertEqual(output[key], output_sequence[key])
def test_prepare_for_model(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
string_sequence = 'Testing the prepare_for_model method.'
ids = tokenizer.encode(string_sequence, add_special_tokens=False)
prepared_input_dict = tokenizer.prepare_for_model(ids, add_special_tokens=True)
input_dict = tokenizer.encode_plus(string_sequence, add_special_tokens=True)
self.assertEqual(input_dict, prepared_input_dict)
def test_batch_encode_plus_overflowing_tokens(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
string_sequences = ['Testing the prepare_for_model method.', 'Test']
if (tokenizer.pad_token is None):
tokenizer.add_special_tokens({'pad_token': '[PAD]'})
tokenizer.batch_encode_plus(string_sequences, return_overflowing_tokens=True, truncation=True, padding=True, max_length=3)
_pt_tf_cross_test
def test_batch_encode_plus_tensors(self):
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
sequences = ['Testing batch encode plus', 'Testing batch encode plus with different sequence lengths', 'Testing batch encode plus with different sequence lengths correctly pads']
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors='pt')
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors='tf')
if (tokenizer.pad_token_id is None):
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, padding=True, return_tensors='pt')
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, padding='longest', return_tensors='tf')
else:
pytorch_tensor = tokenizer.batch_encode_plus(sequences, padding=True, return_tensors='pt')
tensorflow_tensor = tokenizer.batch_encode_plus(sequences, padding='longest', return_tensors='tf')
encoded_sequences = tokenizer.batch_encode_plus(sequences, padding=True)
for key in encoded_sequences.keys():
pytorch_value = pytorch_tensor[key].tolist()
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
encoded_value = encoded_sequences[key]
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
def _check_no_pad_token_padding(self, tokenizer, sequences):
if (tokenizer.pad_token_id is None):
with self.assertRaises(ValueError):
if isinstance(sequences, list):
tokenizer.batch_encode_plus(sequences, padding='longest')
else:
tokenizer.encode_plus(sequences, padding=True)
tokenizer.add_special_tokens({'pad_token': '<PAD>'})
_torch
def test_torch_encode_plus_sent_to_model(self):
import torch
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
model = model_class(config)
is_using_common_embeddings = hasattr(model.get_input_embeddings(), 'weight')
assert ((model.get_input_embeddings().weight.shape[0] >= len(tokenizer)) if is_using_common_embeddings else True)
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = ' '.join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors='pt')
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors='pt')
with torch.no_grad():
model(**encoded_sequence)
model(**batch_encoded_sequence)
_tf
def test_tf_encode_plus_sent_to_model(self):
from transformers import TF_MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(TF_MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizers = self.get_tokenizers(do_lower_case=False)
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}'):
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
model = model_class(config)
assert (model.config.vocab_size >= len(tokenizer))
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = ' '.join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors='tf')
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors='tf')
model(encoded_sequence)
model(batch_encoded_sequence)
_torch
def test_np_encode_plus_sent_to_model(self):
from transformers import MODEL_MAPPING, TOKENIZER_MAPPING
MODEL_TOKENIZER_MAPPING = merge_model_tokenizer_mappings(MODEL_MAPPING, TOKENIZER_MAPPING)
tokenizer = self.get_tokenizer()
if (tokenizer.__class__ not in MODEL_TOKENIZER_MAPPING):
return
(config_class, model_class) = MODEL_TOKENIZER_MAPPING[tokenizer.__class__]
config = config_class()
if (config.is_encoder_decoder or (config.pad_token_id is None)):
return
first_ten_tokens = list(tokenizer.get_vocab().keys())[:10]
sequence = ' '.join(first_ten_tokens)
encoded_sequence = tokenizer.encode_plus(sequence, return_tensors='np')
batch_encoded_sequence = tokenizer.batch_encode_plus([sequence, sequence], return_tensors='np')
if (encoded_sequence is None):
raise ValueError('Cannot convert list to numpy tensor on encode_plus()')
if (batch_encoded_sequence is None):
raise ValueError('Cannot convert list to numpy tensor on batch_encode_plus()')
if self.test_rust_tokenizer:
fast_tokenizer = self.get_rust_tokenizer()
encoded_sequence_fast = fast_tokenizer.encode_plus(sequence, return_tensors='np')
batch_encoded_sequence_fast = fast_tokenizer.batch_encode_plus([sequence, sequence], return_tensors='np')
if (encoded_sequence_fast is None):
raise ValueError('Cannot convert list to numpy tensor on encode_plus() (fast)')
if (batch_encoded_sequence_fast is None):
raise ValueError('Cannot convert list to numpy tensor on batch_encode_plus() (fast)')
_torch
def test_prepare_seq2seq_batch(self):
tokenizer = self.get_tokenizer()
if (not hasattr(tokenizer, 'prepare_seq2seq_batch')):
return
src_text = [' UN Chief Says There Is No Military Solution in Syria', " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people."]
tgt_text = ['Seful ONU declara ca nu exista o solutie militara in Siria', 'Secretarul General Ban Ki-moon declara ca raspunsul sau la intensificarea sprijinului militar al Rusiei pentru Siria este ca "nu exista o solutie militara" la conflictul de aproape cinci ani si ca noi arme nu vor face decat sa inrautateasca violentele si mizeria pentru milioane de oameni.']
try:
batch = tokenizer.prepare_seq2seq_batch(src_texts=src_text, tgt_texts=tgt_text, max_length=3, max_target_length=10, return_tensors='pt', src_lang='en_XX')
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 10)
batch = tokenizer.prepare_seq2seq_batch(src_text, tgt_texts=tgt_text, max_length=3, return_tensors='pt')
self.assertEqual(batch.input_ids.shape[1], 3)
self.assertEqual(batch.labels.shape[1], 3)
batch_encoder_only = tokenizer.prepare_seq2seq_batch(src_texts=src_text, max_length=3, max_target_length=10, return_tensors='pt')
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3)
self.assertNotIn('decoder_input_ids', batch_encoder_only)
def test_is_fast(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertFalse(tokenizer_p.is_fast)
self.assertTrue(tokenizer_r.is_fast)
def test_fast_only_inputs(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertRaises(TypeError, tokenizer_r.tokenize, None)
self.assertRaises(TypeError, tokenizer_r.encode, None)
self.assertRaises(TypeError, tokenizer_r.encode_plus, None)
self.assertRaises(TypeError, tokenizer_r.batch_encode_plus, None)
def test_alignement_methods(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
words = ['Wonderful', 'no', 'inspiration', 'example', 'with', 'subtoken']
text = ' '.join(words)
batch_size = 3
encoding = tokenizer_r.encode_plus(text, add_special_tokens=False)
batch_encoding = tokenizer_r.batch_encode_plus(([text] * batch_size), add_special_tokens=False)
num_tokens = len(encoding['input_ids'])
last_word_index = (len(words) - 1)
last_token_index = (num_tokens - 1)
last_batch_index = (batch_size - 1)
last_char_index = (len(text) - 1)
self.assertEqual(len(encoding.words(0)), num_tokens)
self.assertEqual(max(encoding.words(0)), last_word_index)
self.assertEqual(min(encoding.words(0)), 0)
self.assertEqual(len(batch_encoding.words(last_batch_index)), num_tokens)
self.assertEqual(max(batch_encoding.words(last_batch_index)), last_word_index)
self.assertEqual(min(batch_encoding.words(last_batch_index)), 0)
self.assertEqual(len(encoding.tokens(0)), num_tokens)
self.assertEqual(encoding.token_to_word(0), 0)
self.assertEqual(encoding.token_to_word(0, 0), 0)
self.assertEqual(encoding.token_to_word(last_token_index), last_word_index)
self.assertEqual(encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(1, 0), 0)
self.assertEqual(batch_encoding.token_to_word(0, last_token_index), last_word_index)
self.assertEqual(batch_encoding.token_to_word(last_batch_index, last_token_index), last_word_index)
self.assertEqual(encoding.word_to_tokens(0).start, 0)
self.assertEqual(encoding.word_to_tokens(0, 0).start, 0)
self.assertEqual(encoding.word_to_tokens(last_word_index).end, (last_token_index + 1))
self.assertEqual(encoding.word_to_tokens(0, last_word_index).end, (last_token_index + 1))
self.assertEqual(batch_encoding.word_to_tokens(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_tokens(0, last_word_index).end, (last_token_index + 1))
self.assertEqual(batch_encoding.word_to_tokens(last_batch_index, last_word_index).end, (last_token_index + 1))
self.assertEqual(encoding.token_to_chars(0).start, 0)
self.assertEqual(encoding.token_to_chars(0, 0).start, 0)
self.assertEqual(encoding.token_to_chars(last_token_index).end, (last_char_index + 1))
self.assertEqual(encoding.token_to_chars(0, last_token_index).end, (last_char_index + 1))
self.assertEqual(batch_encoding.token_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.token_to_chars(0, last_token_index).end, (last_char_index + 1))
self.assertEqual(batch_encoding.token_to_chars(last_batch_index, last_token_index).end, (last_char_index + 1))
self.assertEqual(encoding.char_to_token(0), 0)
self.assertEqual(encoding.char_to_token(0, 0), 0)
self.assertEqual(encoding.char_to_token(last_char_index), last_token_index)
self.assertEqual(encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(1, 0), 0)
self.assertEqual(batch_encoding.char_to_token(0, last_char_index), last_token_index)
self.assertEqual(batch_encoding.char_to_token(last_batch_index, last_char_index), last_token_index)
self.assertEqual(encoding.char_to_word(0), 0)
self.assertEqual(encoding.char_to_word(0, 0), 0)
self.assertEqual(encoding.char_to_word(last_char_index), last_word_index)
self.assertEqual(encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(1, 0), 0)
self.assertEqual(batch_encoding.char_to_word(0, last_char_index), last_word_index)
self.assertEqual(batch_encoding.char_to_word(last_batch_index, last_char_index), last_word_index)
self.assertEqual(encoding.word_to_chars(0).start, 0)
self.assertEqual(encoding.word_to_chars(0, 0).start, 0)
self.assertEqual(encoding.word_to_chars(last_word_index).end, (last_char_index + 1))
self.assertEqual(encoding.word_to_chars(0, last_word_index).end, (last_char_index + 1))
self.assertEqual(batch_encoding.word_to_chars(1, 0).start, 0)
self.assertEqual(batch_encoding.word_to_chars(0, last_word_index).end, (last_char_index + 1))
self.assertEqual(batch_encoding.word_to_chars(last_batch_index, last_word_index).end, (last_char_index + 1))
self.assertEqual(encoding.token_to_sequence((num_tokens // 2)), 0)
self.assertEqual(encoding.token_to_sequence(0, (num_tokens // 2)), 0)
self.assertEqual(batch_encoding.token_to_sequence(1, (num_tokens // 2)), 0)
self.assertEqual(batch_encoding.token_to_sequence(0, (num_tokens // 2)), 0)
self.assertEqual(batch_encoding.token_to_sequence(last_batch_index, (num_tokens // 2)), 0)
words = ['Wonderful', 'no', 'inspiration', 'example', 'with', 'subtoken']
text = ' '.join(words)
pair_words = ['Amazing', 'example', 'full', 'of', 'inspiration']
pair_text = ' '.join(pair_words)
batch_size = 3
index_word_in_first_seq = words.index('inspiration')
index_word_in_pair_seq = pair_words.index('inspiration')
index_char_in_first_seq = text.find('inspiration')
index_char_in_pair_seq = pair_text.find('inspiration')
pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=False)
pair_batch_encoding = tokenizer_r.batch_encode_plus(([(text, pair_text)] * batch_size), add_special_tokens=False)
num_tokens = len(encoding['input_ids'])
last_word_index = (len(words) - 1)
last_token_index = (num_tokens - 1)
last_batch_index = (batch_size - 1)
last_char_index = (len(text) - 1)
self.assertNotEqual(pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start)
self.assertEqual(pair_encoding['input_ids'][pair_encoding.word_to_tokens(index_word_in_first_seq, sequence_index=0).start], pair_encoding['input_ids'][pair_encoding.word_to_tokens(index_word_in_pair_seq, sequence_index=1).start])
self.assertNotEqual(pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start)
self.assertEqual(pair_batch_encoding['input_ids'][1][pair_batch_encoding.word_to_tokens(1, index_word_in_first_seq, sequence_index=0).start], pair_batch_encoding['input_ids'][1][pair_batch_encoding.word_to_tokens(1, index_word_in_pair_seq, sequence_index=1).start])
self.assertNotEqual(pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1))
self.assertEqual(pair_encoding['input_ids'][pair_encoding.char_to_token(index_char_in_first_seq, sequence_index=0)], pair_encoding['input_ids'][pair_encoding.char_to_token(index_char_in_pair_seq, sequence_index=1)])
self.assertNotEqual(pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1))
self.assertEqual(pair_batch_encoding['input_ids'][1][pair_batch_encoding.char_to_token(1, index_char_in_first_seq, sequence_index=0)], pair_batch_encoding['input_ids'][1][pair_batch_encoding.char_to_token(1, index_char_in_pair_seq, sequence_index=1)])
self.assertNotEqual(pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0), pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1))
self.assertEqual(words[pair_encoding.char_to_word(index_char_in_first_seq, sequence_index=0)], pair_words[pair_encoding.char_to_word(index_char_in_pair_seq, sequence_index=1)])
self.assertNotEqual(pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0), pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1))
self.assertEqual(words[pair_batch_encoding.char_to_word(1, index_char_in_first_seq, sequence_index=0)], pair_words[pair_batch_encoding.char_to_word(1, index_char_in_pair_seq, sequence_index=1)])
self.assertNotEqual(pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start, pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start)
self.assertEqual(text[pair_encoding.word_to_chars(index_word_in_first_seq, sequence_index=0).start], pair_text[pair_encoding.word_to_chars(index_word_in_pair_seq, sequence_index=1).start])
self.assertNotEqual(pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start, pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start)
self.assertEqual(text[pair_batch_encoding.word_to_chars(1, index_word_in_first_seq, sequence_index=0).start], pair_text[pair_batch_encoding.word_to_chars(1, index_word_in_pair_seq, sequence_index=1).start])
pair_encoding = tokenizer_r.encode_plus(text, pair_text, add_special_tokens=True)
pair_sequence_ids = [pair_encoding.token_to_sequence(i) for i in range(len(pair_encoding['input_ids']))]
self.assertIn(0, pair_sequence_ids)
self.assertIn(1, pair_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_sequence_ids)
pair_batch_encoding = tokenizer_r.batch_encode_plus(([(text, pair_text)] * batch_size), add_special_tokens=True)
pair_batch_sequence_ids = [pair_batch_encoding.token_to_sequence(1, i) for i in range(len(pair_batch_encoding['input_ids'][0]))]
self.assertIn(0, pair_batch_sequence_ids)
self.assertIn(1, pair_batch_sequence_ids)
if tokenizer_r.num_special_tokens_to_add(pair=True):
self.assertIn(None, pair_batch_sequence_ids)
def test_tokenization_python_rust_equals(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
input_p = tokenizer_p.encode_plus(self._data)
input_r = tokenizer_r.encode_plus(self._data)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_pairs_p = tokenizer_p.encode_plus(self._data, self._data)
input_pairs_r = tokenizer_r.encode_plus(self._data, self._data)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask'])), input_p.keys()):
self.assertSequenceEqual(input_pairs_p[key], input_pairs_r[key])
input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True)
input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key])
input_p = tokenizer_p.encode_plus(self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True)
input_r = tokenizer_r.encode_plus(self._data, max_length=512, truncation=True, stride=3, return_overflowing_tokens=True)
for key in filter((lambda x: (x in ['input_ids', 'token_type_ids', 'attention_mask'])), input_p.keys()):
self.assertSequenceEqual(input_p[key], input_r[key][0])
def test_num_special_tokens_to_add_equal(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_r.num_special_tokens_to_add(False), tokenizer_p.num_special_tokens_to_add(False))
self.assertEqual(tokenizer_r.num_special_tokens_to_add(True), tokenizer_p.num_special_tokens_to_add(True))
def test_max_length_equal(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertEqual(tokenizer_r.max_len_single_sentence, tokenizer_p.max_len_single_sentence)
self.assertEqual(tokenizer_r.max_len_sentences_pair, tokenizer_p.max_len_sentences_pair)
def test_special_tokens_map_equal(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
self.assertSequenceEqual(tokenizer_p.special_tokens_map.items(), tokenizer_r.special_tokens_map.items())
def test_add_tokens(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
vocab_size = len(tokenizer_r)
self.assertEqual(tokenizer_r.add_tokens(''), 0)
self.assertEqual(tokenizer_r.add_tokens('testoken'), 1)
self.assertEqual(tokenizer_r.add_tokens(['testoken1', 'testtoken2']), 2)
self.assertEqual(len(tokenizer_r), (vocab_size + 3))
self.assertEqual(tokenizer_r.add_special_tokens({}), 0)
self.assertEqual(tokenizer_r.add_special_tokens({'bos_token': '[BOS]', 'eos_token': '[EOS]'}), 2)
self.assertRaises(AssertionError, tokenizer_r.add_special_tokens, {'additional_special_tokens': '<testtoken1>'})
self.assertEqual(tokenizer_r.add_special_tokens({'additional_special_tokens': ['<testtoken2>']}), 1)
self.assertEqual(tokenizer_r.add_special_tokens({'additional_special_tokens': ['<testtoken3>', '<testtoken4>']}), 2)
self.assertEqual(len(tokenizer_r), (vocab_size + 8))
def test_offsets_mapping(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
text = 'Wonderful no inspiration example with subtoken'
pair = 'Along with an awesome pair'
tokens_with_offsets = tokenizer_r.encode_plus(text, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True)
added_tokens = tokenizer_r.num_special_tokens_to_add(False)
offsets = tokens_with_offsets['offset_mapping']
self.assertEqual(len(offsets), len(tokens_with_offsets['input_ids']))
self.assertEqual(sum(tokens_with_offsets['special_tokens_mask']), added_tokens)
tokens_with_offsets = tokenizer_r.encode_plus(text, pair, return_special_tokens_mask=True, return_offsets_mapping=True, add_special_tokens=True)
added_tokens = tokenizer_r.num_special_tokens_to_add(True)
offsets = tokens_with_offsets['offset_mapping']
self.assertEqual(len(offsets), len(tokens_with_offsets['input_ids']))
self.assertEqual(sum(tokens_with_offsets['special_tokens_mask']), added_tokens)
def test_batch_encode_dynamic_overflowing(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
tokenizer = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
with self.subTest('{} ({}, {})'.format(tokenizer.__class__.__name__, pretrained_name, tokenizer.__class__.__name__)):
returned_tensor = ('pt' if is_torch_available() else 'tf')
if ((not tokenizer.pad_token) or (tokenizer.pad_token_id < 0)):
return
tokens = tokenizer.encode_plus('HuggingFace is solving NLP one commit at a time', max_length=6, padding=True, truncation=True, return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
tokens = tokenizer.batch_encode_plus(['HuggingFace is solving NLP one commit at a time'], max_length=6, padding=True, truncation='only_first', return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[(- 1)], 6)
tokens = tokenizer.batch_encode_plus(['HuggingFace is solving NLP one commit at a time', 'Very tiny input'], max_length=6, padding=True, truncation='only_first', return_tensors=returned_tensor, return_overflowing_tokens=True)
for key in filter((lambda x: ('overflow_to_sample_mapping' not in x)), tokens.keys()):
self.assertEqual(len(tokens[key].shape), 2)
self.assertEqual(tokens[key].shape[(- 1)], 6)
def test_compare_pretokenized_inputs(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
if (hasattr(tokenizer_p, 'add_prefix_space') and (not tokenizer_p.add_prefix_space)):
continue
pretokenized_input_simple = 'This is a sample input'.split()
pretokenized_input_pair = 'This is a sample pair'.split()
output_r = tokenizer_r.encode(pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False)
output_p = tokenizer_p.encode(pretokenized_input_simple, is_split_into_words=True, add_special_tokens=False)
self.assertEqual(output_p, output_r)
kwargs = {'is_split_into_words': True, 'return_overflowing_tokens': False, 'return_special_tokens_mask': True, 'return_offsets_mapping': False}
batch_kwargs = {'is_split_into_words': True, 'return_overflowing_tokens': False, 'return_special_tokens_mask': True, 'return_offsets_mapping': False}
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
input_batch = (([pretokenized_input_simple] * 2) + [(pretokenized_input_simple + pretokenized_input_pair)])
output_r = tokenizer_r.batch_encode_plus(input_batch, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
output_r = tokenizer_r.encode(pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True)
output_p = tokenizer_p.encode(pretokenized_input_simple, pretokenized_input_pair, is_split_into_words=True)
self.assertEqual(output_p, output_r)
output_r = tokenizer_r.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
output_p = tokenizer_p.encode_plus(pretokenized_input_simple, pretokenized_input_pair, **kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
input_batch_pair = (([pretokenized_input_simple, pretokenized_input_pair] * 2) + [(pretokenized_input_simple + pretokenized_input_pair), pretokenized_input_pair])
output_r = tokenizer_r.batch_encode_plus(input_batch_pair, **batch_kwargs)
output_p = tokenizer_p.batch_encode_plus(input_batch_pair, **batch_kwargs)
for key in output_p.keys():
self.assertEqual(output_p[key], output_r[key])
def test_create_token_type_ids(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
input_simple = [1, 2, 3]
input_pair = [1, 2, 3]
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple)
self.assertEqual(output_p, output_r)
output_r = tokenizer_r.create_token_type_ids_from_sequences(input_simple, input_pair)
output_p = tokenizer_p.create_token_type_ids_from_sequences(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_build_inputs_with_special_tokens(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
input_simple = tokenizer_p.encode('This is a sample input', add_special_tokens=False)
input_pair = tokenizer_p.encode('This is a sample pair', add_special_tokens=False)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple)
self.assertEqual(output_p, output_r)
output_r = tokenizer_r.build_inputs_with_special_tokens(input_simple, input_pair)
output_p = tokenizer_p.build_inputs_with_special_tokens(input_simple, input_pair)
self.assertEqual(output_p, output_r)
def test_padding(self, max_length=50):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
def assert_padded_input_match(input_r: list, input_p: list, max_length: int):
self.assertEqual(len(input_r), max_length)
self.assertEqual(len(input_p), max_length)
padded_tokens_r = list(takewhile((lambda i: (i == tokenizer_r.pad_token_id)), reversed(input_r)))
padded_tokens_p = list(takewhile((lambda i: (i == tokenizer_p.pad_token_id)), reversed(input_p)))
self.assertSequenceEqual(padded_tokens_r, padded_tokens_p)
def assert_batch_padded_input_match(input_r: dict, input_p: dict, max_length: int):
for i_r in input_r.values():
(self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length))
(self.assertEqual(len(i_r), 2), self.assertEqual(len(i_r[0]), max_length), self.assertEqual(len(i_r[1]), max_length))
for (i_r, i_p) in zip(input_r['input_ids'], input_p['input_ids']):
assert_padded_input_match(i_r, i_p, max_length)
for (i_r, i_p) in zip(input_r['attention_mask'], input_p['attention_mask']):
self.assertSequenceEqual(i_r, i_p)
input_r = tokenizer_r.encode('This is a simple input', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode('This is a simple input', max_length=max_length, pad_to_max_length=True)
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode('This is a simple input', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode('This is a simple input', max_length=max_length, padding='max_length')
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode('This is a simple input', padding='longest')
input_p = tokenizer_p.encode('This is a simple input', padding=True)
assert_padded_input_match(input_r, input_p, len(input_r))
input_r = tokenizer_r.encode('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
assert_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.encode('This is a simple input', 'This is a pair', padding=True)
input_p = tokenizer_p.encode('This is a simple input', 'This is a pair', padding='longest')
assert_padded_input_match(input_r, input_p, len(input_r))
input_r = tokenizer_r.encode_plus('This is a simple input', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus('This is a simple input', max_length=max_length, pad_to_max_length=True)
assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus('This is a simple input', max_length=max_length, padding='max_length')
assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', padding='longest')
input_p = tokenizer_p.encode_plus('This is a simple input', padding=True)
assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']))
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, pad_to_max_length=True)
assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
input_p = tokenizer_p.encode_plus('This is a simple input', 'This is a pair', max_length=max_length, padding='max_length')
assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length)
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.encode_plus('This is a simple input', 'This is a pair', padding='longest')
input_p = tokenizer_p.encode_plus('This is a simple input', 'This is a pair', padding=True)
assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']))
self.assertSequenceEqual(input_r['attention_mask'], input_p['attention_mask'])
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, pad_to_max_length=True)
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, pad_to_max_length=True)
assert_batch_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding='max_length')
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding='max_length')
assert_batch_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding='longest')
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], max_length=max_length, padding=True)
assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]))
input_r = tokenizer_r.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], padding='longest')
input_p = tokenizer_p.batch_encode_plus(['This is a simple input 1', 'This is a simple input 2'], padding=True)
assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]))
input_r = tokenizer_r.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], max_length=max_length, truncation=True, padding='max_length')
input_p = tokenizer_p.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], max_length=max_length, truncation=True, padding='max_length')
assert_batch_padded_input_match(input_r, input_p, max_length)
input_r = tokenizer_r.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], padding=True)
input_p = tokenizer_p.batch_encode_plus([('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2')], padding='longest')
assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]))
input_r = tokenizer_r.encode_plus('This is a input 1')
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.encode_plus('This is a input 1')
input_p = tokenizer_r.pad(input_p)
assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], len(input_r['input_ids']))
input_r = tokenizer_r.encode_plus('This is a input 1')
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_r.encode_plus('This is a input 1')
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding='max_length')
assert_padded_input_match(input_r['input_ids'], input_p['input_ids'], max_length)
input_r = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_r = tokenizer_r.pad(input_r)
input_p = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_p = tokenizer_r.pad(input_p)
assert_batch_padded_input_match(input_r, input_p, len(input_r['input_ids'][0]))
input_r = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_r = tokenizer_r.pad(input_r, max_length=max_length, padding='max_length')
input_p = tokenizer_r.batch_encode_plus(['This is a input 1', 'This is a much longer input whilch should be padded'])
input_p = tokenizer_r.pad(input_p, max_length=max_length, padding='max_length')
assert_batch_padded_input_match(input_r, input_p, max_length)
def test_save_pretrained(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tmpdirname2 = tempfile.mkdtemp()
tokenizer_r_files = tokenizer_r.save_pretrained(tmpdirname2)
tokenizer_p_files = tokenizer_p.save_pretrained(tmpdirname2)
self.assertSequenceEqual(tokenizer_r_files, tokenizer_p_files)
tokenizer_rp = tokenizer_r.from_pretrained(tmpdirname2)
tokenizer_pp = tokenizer_p.from_pretrained(tmpdirname2)
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(tokenizer_rp, key))
shutil.rmtree(tmpdirname2)
def test_embeded_special_tokens(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
sentence = 'A, <mask> AllenNLP sentence.'
tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True)
tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True)
for key in tokens_p.keys():
self.assertEqual(tokens_r[key], tokens_p[key])
if ('token_type_ids' in tokens_r):
self.assertEqual(sum(tokens_r['token_type_ids']), sum(tokens_p['token_type_ids']))
tokens_r = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
tokens_p = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
self.assertSequenceEqual(tokens_r, tokens_p)
def test_compare_add_special_tokens(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
simple_num_special_tokens_to_add = tokenizer_r.num_special_tokens_to_add(pair=False)
for text in ['', ' ']:
no_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.tokenize(text, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), (len(with_special_tokens) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.encode(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode(text, add_special_tokens=True)
self.assertEqual(len(no_special_tokens), (len(with_special_tokens) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=False)
with_special_tokens = tokenizer_r.encode_plus(text, add_special_tokens=True)
for key in no_special_tokens.keys():
self.assertEqual(len(no_special_tokens[key]), (len(with_special_tokens[key]) - simple_num_special_tokens_to_add))
no_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=False)
with_special_tokens = tokenizer_r.batch_encode_plus([text, text], add_special_tokens=True)
for key in no_special_tokens.keys():
for (i_no, i_with) in zip(no_special_tokens[key], with_special_tokens[key]):
self.assertEqual(len(i_no), (len(i_with) - simple_num_special_tokens_to_add))
def test_compare_prepare_for_model(self):
for (tokenizer, pretrained_name, kwargs) in self.tokenizers_list:
with self.subTest('{} ({})'.format(tokenizer.__class__.__name__, pretrained_name)):
tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs)
tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs)
string_sequence = 'Asserting that both tokenizers are equal'
python_output = tokenizer_p.prepare_for_model(tokenizer_p.encode(string_sequence, add_special_tokens=False))
rust_output = tokenizer_r.prepare_for_model(tokenizer_r.encode(string_sequence, add_special_tokens=False))
for key in python_output:
self.assertEqual(python_output[key], rust_output[key]) |
_registry(dataset_type='CIFAR10', framework='onnxrt_qlinearops, onnxrt_integerops', dataset_format='')
class CIFAR10(Dataset):
url = '
filename = 'cifar-10-python.tar.gz'
tgz_md5 = 'c58f30108f718f92721af3b95e74349a'
train_list = [['data_batch_1', 'c99cafc152244af753f735de768cd75f'], ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], ['data_batch_4', '634dddfa80567beed471001a'], ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb']]
test_list = [['test_batch', '40351d587109b95175f43aff81a1287e']]
meta = {'filename': 'batches.meta', 'key': 'label_names', 'md5': '5ff9c542aee3614f3951f8cda6e48888'}
def __init__(self, root, train=False, transform=None, filter=None, download=True):
self.root = root
if download:
self.download()
if (not self._check_integrity()):
raise RuntimeError('Dataset not found or corrupted. You can use download=True to download it')
if train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data = []
self.targets = []
for (file_name, checksum) in downloaded_list:
file_path = os.path.join(self.root, file_name)
with open(file_path, 'rb') as f:
entry = pickle.load(f, encoding='latin1')
self.data.append(entry['data'])
if ('labels' in entry):
self.targets.extend(entry['labels'])
else:
self.targets.extend(entry['fine_labels'])
self.data = np.vstack(self.data).reshape((- 1), 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1))
self.load_meta()
self.transform = transform
def load_meta(self):
path = os.path.join(self.root, self.meta['filename'])
if (not check_integrity(path, self.meta['md5'])):
raise RuntimeError(('Dataset metadata file not found or corrupted.' + ' You can use download=True to download it'))
with open(path, 'rb') as infile:
data = pickle.load(infile, encoding='latin1')
self.classes = data[self.meta['key']]
self.class_to_idx = {_class: i for (i, _class) in enumerate(self.classes)}
def __getitem__(self, index):
(image, label) = (self.data[index], self.targets[index])
if (self.transform is not None):
(image, label) = self.transform((image, label))
return (image, label)
def __len__(self):
return len(self.data)
def download(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
download_root = os.path.expanduser(self.root)
filename = os.path.basename(self.url)
download_url(self.url, download_root, filename, self.tgz_md5)
archive = os.path.join(download_root, filename)
print('Extracting {} to {}'.format(archive, download_root))
with tarfile.open(archive, 'r:gz') as tar:
tar.extractall(path=download_root)
def _check_integrity(self):
root = self.root
for fentry in (self.train_list + self.test_list):
(filename, md5) = (fentry[0], fentry[1])
fpath = os.path.join(root, filename)
if (not check_integrity(fpath, md5)):
return False
return True |
def get_SVs(net, prefix):
d = net.state_dict()
return {('%s_%s' % (prefix, key)).replace('.', '_'): float(d[key].item()) for key in d if ('sv' in key)} |
class MobileViTFeatureExtractor(MobileViTImageProcessor):
def __init__(self, *args, **kwargs) -> None:
warnings.warn('The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use MobileViTImageProcessor instead.', FutureWarning)
super().__init__(*args, **kwargs) |
class IndexInitializer(trackable_base.Trackable):
def __init__(self, filename, name=None):
self._name = name
self._filename_arg = filename
self._filename = self._track_trackable(trackable.TrackableAsset(filename), '_filename')
def _shared_name(self):
shared_name = ('index_%s' % self._filename_arg)
def initialize(self, index):
with tf.name_scope(self._name, 'index_file_init', (index.resource_handle,)):
filename = tf.convert_to_tensor(self._filename, tf.string, name='asset_filepath')
init_op = search_op.initialize_index_from_file(index.resource_handle, filename)
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, init_op)
tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, filename)
return init_op |
def _count_class_sample(y):
(unique, counts) = np.unique(y, return_counts=True)
return dict(zip(unique, counts)) |
def var_binned(t, y, w, freq, nbins, linterp=True):
ypred = binned_pdm_model(t, y, w, freq, nbins, linterp=linterp)(((t * freq) % 1.0))
return np.dot(w, np.power((y - ypred), 2)) |
class TestBaseDataset(unittest.TestCase):
def test_init_processors(self):
path = os.path.join(os.path.abspath(__file__), '../../../pythia/common/defaults/configs/datasets/vqa/vqa2.yml')
configuration = Configuration(os.path.abspath(path))
self._fix_configuration(configuration)
configuration.freeze()
base_dataset = BaseDataset('vqa2', 'train', configuration.get_config()['dataset_attributes']['vqa2'])
expected_processors = ['answer_processor', 'ocr_token_processor', 'bbox_processor']
self.assertFalse(any((hasattr(base_dataset, key) for key in expected_processors)))
for processor in expected_processors:
self.assertIsNone(registry.get('{}_{}'.format('vqa2', processor)))
base_dataset.init_processors()
self.assertTrue(all((hasattr(base_dataset, key) for key in expected_processors)))
for processor in expected_processors:
self.assertIsNotNone(registry.get('{}_{}'.format('vqa2', processor)))
def _fix_configuration(self, configuration):
vqa2_config = configuration.config['dataset_attributes']['vqa2']
processors = vqa2_config['processors']
processors.pop('text_processor')
processors.pop('context_processor') |
def add_to_freeze_collection(vars):
if (not isinstance(vars, (list, tuple))):
vars = [vars]
for v in vars:
tf.add_to_collection('freeze', v) |
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(train_parser, (['--task', 'cross_lingual_lm', data_dir, '--arch', arch, '--optimizer', 'adam', '--lr-scheduler', 'reduce_lr_on_plateau', '--lr-shrink', '0.5', '--lr', '0.0001', '--min-lr', '1e-09', '--dropout', '0.1', '--attention-dropout', '0.1', '--criterion', 'legacy_masked_lm_loss', '--masked-lm-only', '--monolingual-langs', 'in,out', '--num-segment', '5', '--encoder-layers', '1', '--encoder-embed-dim', '32', '--encoder-attention-heads', '1', '--encoder-ffn-embed-dim', '32', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--dataset-impl', 'raw'] + list(extra_args)))
train.main(train_args) |
def load_partition_data_cifar100(data_dir, partition_method, partition_alpha, client_number, batch_size, logger):
(X_train, y_train, X_test, y_test, net_dataidx_map, traindata_cls_counts) = partition_data(data_dir, partition_method, client_number, partition_alpha, logger=logger)
data_local_num_dict = dict()
train_data_local_dict = dict()
test_data_local_dict = dict()
(transform_train, transform_test) = _data_transforms_cifar100()
cache_train_data_set = CIFAR100(data_dir, train=True, transform=transform_train, download=True)
cache_test_data_set = CIFAR100(data_dir, train=False, transform=transform_test, download=True)
idx_test = [[] for i in range(100)]
for label in range(100):
idx_test[label] = np.where((y_test == label))[0]
test_dataidxs = [[] for i in range(client_number)]
tmp_tst_num = math.ceil((len(cache_test_data_set) / client_number))
for client_idx in range(client_number):
for label in range(100):
label_num = math.ceil(((traindata_cls_counts[client_idx][label] / sum(traindata_cls_counts[client_idx])) * tmp_tst_num))
rand_perm = np.random.permutation(len(idx_test[label]))
if (len(test_dataidxs[client_idx]) == 0):
test_dataidxs[client_idx] = idx_test[label][rand_perm[:label_num]]
else:
test_dataidxs[client_idx] = np.concatenate((test_dataidxs[client_idx], idx_test[label][rand_perm[:label_num]]))
dataidxs = net_dataidx_map[client_idx]
(train_data_local, test_data_local) = get_dataloader_CIFAR100(data_dir, batch_size, batch_size, dataidxs, test_dataidxs[client_idx], cache_train_data_set=cache_train_data_set, cache_test_data_set=cache_test_data_set, logger=logger)
local_data_num = len(train_data_local.dataset)
data_local_num_dict[client_idx] = local_data_num
logger.info(('client_idx = %d, local_sample_number = %d' % (client_idx, local_data_num)))
train_data_local_dict[client_idx] = train_data_local
test_data_local_dict[client_idx] = test_data_local
record_part(y_test, traindata_cls_counts, test_dataidxs, logger)
return (None, None, None, None, data_local_num_dict, train_data_local_dict, test_data_local_dict, traindata_cls_counts) |
class Runner(object):
def __init__(self, cfg, metric, local_rank, sample_only=False):
self.local_rank = local_rank
self.cfg = cfg
self.best_modelpath = None
(self.last_msg_train, self.last_msg_eval, self.n_xid_train) = ('', '', 0)
self.img_size = data_helper.get_imgsize(cfg.dataset)
self.canvas_size = data_helper.get_canvsize(cfg.dataset)
self.imgd = data_helper.get_imgd(cfg.dataset)
assert (self.cfg.cat_vae.canvas_dim > 0)
self.canvasd = self.cfg.cat_vae.canvas_dim
logger.debug('img_size: {} | imgd: {} | local_rank: {}', self.img_size, self.imgd, self.local_rank)
self.metric = (MetricLogger() if (metric is None) else metric)
if (not cfg.distributed):
self.device = torch.device('cuda')
else:
self.device = torch.device(('cuda:%d' % local_rank))
self.input_dim = (data_helper.get_imgsize(cfg.dataset) ** 2)
if (not sample_only):
self.init_data_loader()
self.model = self.build_model()
self.model.set_metric(self.metric)
self.model.n_xid_train = self.n_xid_train
if cfg.distributed:
self.model = model_helper.DataParallelPassthrough(self.model, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False, find_unused_parameters=True).to(torch.device(('cuda:%d' % local_rank)))
if cfg.use_prior_model:
self.model.set_bank(self.model.device)
self.max_epoch = cfg['epochs']
if (not sample_only):
self.model.num_total_iter = (self.max_epoch * len(self.train_loader))
self.init_epoch = 0
if (not sample_only):
self.init_optimizer()
self.test_loss_best = .0
self.best_epoch = 0
self.model.train()
self.metric.log_model(self.model)
(self.dict_msg_eval, self.dict_msg_train) = ({}, {})
def init_data_loader(self):
cfg = self.cfg
SPLIT_TRAINVAL = data_helper.split_val_from_train(cfg.dataset)
kwargs = {'num_workers': 1, 'pin_memory': False}
train_set = helper.build_data_set(cfg.dataset, 1)
data_helper.label2imgid(train_set)
num_train = len(train_set)
self.test_set_label_offset = num_train
if TRAIN_SUBSET:
selected = list(range(min(len(train_set), ((cfg.batch_size * 10) + 1000))))
train_set = torch.utils.data.Subset(train_set, selected)
if SPLIT_TRAINVAL:
selected = list(range(0, (len(train_set) - 1000)))
ntest = (1000 if (cfg.test_size == (- 1)) else cfg.test_size)
val_set = torch.utils.data.Subset(train_set, list(range((len(train_set) - ntest), len(train_set))))
self.val_set_label_offset = 0
train_set = torch.utils.data.Subset(train_set, selected)
test_set = helper.build_data_set(cfg.dataset, istrain=0)
else:
td = data_helper.get_test_data(cfg.dataset)
test_set = helper.build_data_set(td, istrain=0)
val_set = helper.build_data_set(cfg.dataset, istrain=0)
logger.info('[Non SPLIT_TRAINVAL] num of val={}', len(val_set))
self.val_set_label_offset = self.test_set_label_offset
self.n_xid_train = len(train_set)
if cfg.distributed:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(train_set, shuffle=True)
self.train_loader = torch.utils.data.DataLoader(train_set, sampler=self.train_sampler, batch_size=cfg['batch_size'], **kwargs)
else:
self.train_loader = torch.utils.data.DataLoader(train_set, batch_size=cfg['batch_size'], shuffle=True, **kwargs)
data_helper.label2imgid(test_set)
if (not SPLIT_TRAINVAL):
data_helper.label2imgid(val_set)
if (len(val_set) > 1000):
val_set = torch.utils.data.Subset(val_set, list(range(0, 1000)))
logger.info('[NUM of image] in train:val:test={}:{}:{} | val_set offset={}', len(train_set), len(val_set), len(test_set), self.val_set_label_offset)
if cfg.distributed:
self.full_test_loader = torch.utils.data.DataLoader(test_set, sampler=torch.utils.data.distributed.DistributedSampler(test_set, shuffle=False), batch_size=cfg['test_batch_size'], **kwargs)
else:
self.full_test_loader = torch.utils.data.DataLoader(test_set, batch_size=cfg['test_batch_size'], shuffle=False, **kwargs)
self.num_sample = 50
if SPLIT_TRAINVAL:
self.num_sample = 50
logger.info('SPLIT_TRAINVAL == 1, use last 1k sample in training set as validation | Nsample to estimate val NLL = {}', self.num_sample)
pass
elif (cfg.test_size > 0):
selected = list(range(len(val_set)))
selected = selected[:cfg.test_size]
val_set = torch.utils.data.Subset(val_set, selected)
self.num_sample = 5
if (self.img_size == 256):
self.num_sample = 2
if cfg.distributed:
self.val_loader = torch.utils.data.DataLoader(val_set, sampler=torch.utils.data.distributed.DistributedSampler(val_set, shuffle=False), batch_size=cfg['test_batch_size'], **kwargs)
else:
self.val_loader = torch.utils.data.DataLoader(val_set, batch_size=cfg['test_batch_size'], shuffle=False, **kwargs)
logger.info('build data with shape: {}; batch size {}'.format(train_set[0][0].shape, cfg['batch_size']))
def init_optimizer(self):
cfg = self.cfg
self.optimizer = self.model.get_optim(cfg.lr)
def build_model(self):
cfg = self.cfg
if (cfg.model_name in ['vae', 'cvae', 'cvae2']):
from model.vae import VAE as Model
elif (cfg.model_name == 'cat_vloc_at'):
from model.vary_loc_at import CatVaryLocAT as Model
else:
raise ValueError(('Not support %s' % cfg.model_name))
built_model = Model(cfg)
built_model.to(self.device)
return built_model
def train_epochs(self):
logger.info('start training from E{} to {}', self.init_epoch, self.max_epoch)
EVAL = os.getenv('EVAL', None)
cmt = self.cfg.cmt
if EVAL:
cmt += (' [EVAL] %s' % EVAL)
outdir = os.path.join(self.cfg.exp_dir, self.cfg.exp_name)
pre_msg = f'''
[CMT]: {cmt}
{outdir} '''
slurm_id = os.getenv('SLURM_JOB_ID', None)
slurm_name = os.getenv('SLURM_JOB_NAME', '')
slurm_node = os.getenv('SLURM_JOB_NODELIST', '')
if (len(self.metric.comet_url) > 1):
pre_msg += f'''
[url]: {self.metric.comet_url}'''
else:
pre_msg += f'''
{self.cfg.exp_key}'''
if (slurm_id is not None):
pre_msg += (' |[jid] %s, %s, %s' % (slurm_id, slurm_name, slurm_node))
if (not self.local_rank):
logger.info(pre_msg)
teloss = self.test_loss_best
t0 = time.time()
t1 = time.time()
nwrite = 0
init_epoch = self.init_epoch
epoch = (init_epoch - 1)
for epoch in range(init_epoch, self.max_epoch):
if self.cfg.distributed:
self.train_sampler.set_epoch(epoch)
tic = time.time()
self.metric.start_epoch(epoch)
self.metric.train()
self.model.train()
trloss = self.train(epoch)
epoT = ((time.time() - tic) / 60)
msg = pre_msg
if self.dict_msg_eval.get(self.best_epoch):
msg += ('\n[best]: ' + self.dict_msg_eval[self.best_epoch])
self.last_msg_train = '{} | eT:{:.2f}m'.format(self.metric.msg('train', len(self.train_loader)), epoT)
if (self.local_rank == 0):
logger.info('\n{} | \n{}', self.last_msg_train, msg)
if (((1 + epoch) % self.cfg.test_interval) == 0):
self.metric.eval()
teloss = self.test(epoch, self.num_sample)
if self.local_rank:
continue
if DO_SAMPLE:
self.sample(epoch)
if (teloss < self.test_loss_best):
self.test_loss_best = teloss
modelpath = self.save_model('ckpt_best_eval.pth', epoch=epoch)
self.best_modelpath = modelpath
logger.info((('>' * 10) + ('[best eval %.1f at epo %d]' % (teloss, epoch))))
self.best_epoch = epoch
self.metric.write('best_nll', teloss)
if (epoch in self.dict_msg_eval):
self.metric.log_html(self.dict_msg_eval[epoch])
self.metric.write('best', self.dict_msg_eval[epoch])
else:
logger.info(f'cur E{epoch} {teloss:.3f} | Best E={self.best_epoch}; loss={self.test_loss_best:.3f}: ')
if ((epoch % 50) == 0):
slurm_dir = f"/checkpoint/{os.getenv('USER')}/{os.getenv('SLURM_JOB_ID', None)}"
savedp = self.save_model(('E%05d.pth' % epoch), epoch=epoch)
elif (((time.time() - t0) / 60) > 10):
t0 = time.time()
logger.info(((('*' * 10) + 'snapshot model') + ('*' * 10)))
self.save_model('snapshot.pth', epoch=epoch)
self.init_epoch += 1
if (not self.local_rank):
self.save_model(('ckpt_epo%d.pth' % epoch), epoch=epoch)
self.save_model('snapshot.pth', epoch=epoch)
logger.info('done training')
def load_model(self, ckpt):
loaded_model_dict = {}
for (k, v) in ckpt['model'].items():
if ('module' in k):
k = k.replace('module.', '')
loaded_model_dict[k] = v
self.model.load_state_dict(loaded_model_dict)
self.init_epoch = (ckpt['epo'] + 1)
self.test_loss_best = (ckpt['test_loss'] if ('test_loss' in ckpt) else 0)
def save_model(self, modelpath, epoch=0, expdir=None):
modelpath = (os.path.join(expdir, modelpath) if (expdir is not None) else os.path.join(self.cfg.exp_dir, self.cfg.exp_name, modelpath))
if (not os.path.exists(os.path.dirname(modelpath))):
os.makedirs(os.path.dirname(modelpath))
snapshot = {'model': self.model.state_dict(), 'optim': self.optimizer.state_dict(), 'cfg': self.cfg, 'test_loss': self.test_loss_best, 'epo': epoch, 'best_epo': self.best_epoch}
torch.save(snapshot, modelpath)
logger.info(('[save] model as %s' % modelpath))
return modelpath
_grad()
def sample(self, epoch):
tic = time.time()
self.model.eval()
if (self.cfg.model_name in MODEL_LIST_NO_CANV):
hid = torch.randn(64, *self.model.latent_shape)
sample = self.model.sample(hid.to(self.model.device))
else:
sample = self.model.sample()
hid = None
if (type(sample) is tuple):
(sample, hid) = sample
B = 64
nrow = B
sample = sample.cpu()
canvas = hid
if ((canvas is not None) and (self.cfg.model_name not in MODEL_LIST_NO_CANV)):
vis_shape = [self.img_size, self.img_size]
comparison = [sample.view(B, self.imgd, *vis_shape)]
empty_line = (sample.new_zeros(nrow, self.imgd, *vis_shape) + 0.5)
if (type(canvas) is list):
canvas = [c.cpu() for c in canvas]
for c in canvas:
comparison.append(empty_line)
comparison.append(c.view(B, self.imgd, *vis_shape))
else:
canvas = canvas.cpu()
if (canvas.shape[(- 1)] != self.img_size):
canvas = F.interpolate(canvas, self.img_size, mode='bilinear')
sp_img = [B, self.imgd, self.img_size, self.img_size]
sp_canv = [B, self.canvasd, self.img_size, self.img_size]
comparison = [sample.view(*sp_img), empty_line, canvas.view(*sp_canv).expand(*sp_img)]
comparison = torch.cat(comparison)
fig = make_grid(comparison.cpu(), pad_value=0, nrow=nrow, normalize=True, scale_each=True)
else:
fig = make_grid(sample.view(B, self.imgd, self.img_size, self.img_size), pad_value=0, nrow=nrow)
if (epoch == (- 1)):
tag = ('sample_%d' % (self.init_epoch + epoch))
else:
tag = 'sample'
filename = (((('%s/%s/' % (self.cfg.exp_dir, self.cfg.exp_name)) + ('vae_%s_' % tag)) + str(epoch)) + '.png')
if ((not self.metric.log_made_grid(tag, fig, epoch)) or (epoch == (- 1))):
fig = save_image(fig, filename)
logger.info('save img at {}', filename)
_grad()
def test(self, epoch, num_sample=50):
self.model.eval()
if (not self.local_rank):
self.vis_recont(epoch)
nll = self.compute_elbo_nsample(epoch, write_out=False, num_sample=num_sample)
return nll
_grad()
def compute_elbo_nsample(self, epoch, write_out=True, num_sample=50):
self.model.eval()
self.metric.start_epoch(epoch)
tic = time.time()
output_dir = os.path.join(self.cfg.exp_dir, self.cfg.exp_name)
test_nll = []
cnt = 0
if write_out:
self.metric.test()
tname = 'test'
test_loader = self.full_test_loader
label_offset = self.test_set_label_offset
else:
self.metric.eval()
tname = 'eval'
test_loader = self.val_loader
label_offset = self.val_set_label_offset
for (i, (data, labels)) in enumerate(test_loader):
Bs = len(data)
self.model.set_xid((labels.to(self.model.device) + label_offset))
cnt += Bs
data = data.to(self.model.device).float()
(output, loss_dict) = self.model.test_loss(data, num_sample=num_sample)
test_nll.append(loss_dict['NLL'].item())
self.metric.update(**loss_dict)
if ((((i + 1) % 2000) == 0) and (self.local_rank == 0)):
logger.info('ns: {} {}', num_sample, self.metric.msg('test', (i + 1)))
msg = '{} | cnt={}'.format(self.metric.msg(tname, len(test_loader)), cnt)
self.dict_msg_eval[epoch] = msg
self.last_msg_eval = msg
if (not write_out):
if (not self.local_rank):
logger.info((' ~~ ' + 'eval: {} '), msg)
else:
pre_msg = ((((('\n' + ('--' * 10)) + '\n') + '[cmt]: ') + self.cfg.cmt) + '\n')
if (self.last_msg_train != ''):
pre_msg += (self.last_msg_train + '\n')
if (self.dict_msg_eval.get(self.best_epoch) and (epoch != self.best_epoch)):
pre_msg += (self.dict_msg_eval.get(self.best_epoch) + '\n')
msg = (((pre_msg + msg) + '\n') + f'{self.metric.comet_url}')
msg += ('\n %s \n %s' % (output_dir, ('--' * 10)))
if (not self.local_rank):
logger.info(msg)
return np.mean(test_nll)
_grad()
def vis_recont(self, epoch):
output_dir = os.path.join(self.cfg.exp_dir, self.cfg.exp_name)
tag = 'recont'
filename = (output_dir + ('/eval_%s_%d.png' % (tag, epoch)))
img_size = self.img_size
NVIS = 8
(data_list, label_list) = ([], [])
for (i, (data, labels)) in enumerate(self.full_test_loader):
data = data.to(self.device).float()
labels = (labels.to(self.device) + self.test_set_label_offset)
data_list.append(data)
label_list.append(labels)
if ((len(data_list) * data.shape[0]) > NVIS):
break
(data, labels) = (torch.cat(data_list), torch.cat(label_list))
self.model.set_xid(labels[:NVIS])
data = data[:NVIS]
img = self.model.vis(data)
img = img.view((- 1), self.imgd, img_size, img_size)
fig_grid = make_grid(torch.cat([img, data.view((- 1), self.imgd, img_size, img_size)]), nrow=NVIS, normalize=True, scale_each=True)
if (not self.metric.log_made_grid(tag, fig_grid, epoch)):
fig = save_image(fig_grid, filename)
logger.info('save img at {}', filename)
_grad()
def sample_10k(self, epoch):
self.model.eval()
out = []
out_hid = []
N = 64
logger.info('start sampling, N={}', N)
self.model.sample_10k = False
for k in tqdm(range(((10000 // N) + 1))):
if (self.cfg.model_name in MODEL_LIST_NO_CANV):
hid = torch.randn(64, *self.model.latent_shape)
sample = self.model.sample(hid.to(self.device))
else:
sample = self.model.sample()
hid = None
if (type(sample) is tuple):
(sample, hid) = sample
B = sample.shape[0]
sample = sample.cpu().view(B, self.imgd, self.img_size, self.img_size)
out.append(sample)
if (k < 1):
filename = (((('%s/%s/' % (self.cfg.exp_dir, self.cfg.exp_name)) + 'sample10k_') + str(epoch)) + ('-%d.png' % k))
if ((hid is not None) and (hid.shape[(- 1)] == self.img_size)):
hid = hid.cpu()
if (hid.shape[1] < self.imgd):
hid = hid.expand((- 1), self.imgd, (- 1), (- 1))
if (self.canvas_size < self.img_size):
hid = F.interpolate(hid, self.img_size, mode='bilinear')
sample = torch.cat([sample, hid])
fig = save_image(sample, filename, normalize=True, scale_each=True, nrow=8)
out_pt = torch.cat(out)
logger.info('get output: {}', out_pt.shape)
assert (out_pt.shape[0] > 10000), 'get output less than 10k sample'
out = out_pt.numpy()
filename = (((('%s/%s/' % (self.cfg.exp_dir, self.cfg.exp_name)) + '10k_sample') + str(epoch)) + '.npy')
np.save(filename, out)
logger.info(('save at %s' % filename))
return filename |
class OurMultiheadAttention(nn.Module):
def __init__(self, q_feat_dim, k_feat_dim, out_feat_dim, n_head, d_k=None, d_v=None):
super(OurMultiheadAttention, self).__init__()
if (d_k is None):
d_k = (out_feat_dim // n_head)
if (d_v is None):
d_v = (out_feat_dim // n_head)
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Conv2d(q_feat_dim, (n_head * d_k), 1, bias=False)
self.w_ks = nn.Conv2d(k_feat_dim, (n_head * d_k), 1, bias=False)
self.w_vs = nn.Conv2d(out_feat_dim, (n_head * d_v), 1, bias=False)
self.fc = nn.Conv2d((n_head * d_v), out_feat_dim, 1, bias=False)
def forward(self, q, k, v, attn_type='softmax', **kwargs):
(d_k, d_v, n_head) = (self.d_k, self.d_v, self.n_head)
q = self.w_qs(q).view(q.shape[0], n_head, d_k, q.shape[2], q.shape[3])
k = self.w_ks(k).view(k.shape[0], n_head, d_k, k.shape[2], k.shape[3])
v = self.w_vs(v).view(v.shape[0], n_head, d_v, v.shape[2], v.shape[3])
if (attn_type == 'softmax'):
(q, attn) = softmax_attention(q, k, v)
elif (attn_type == 'dotproduct'):
(q, attn) = dotproduct_attention(q, k, v)
elif (attn_type == 'patch'):
(q, attn) = patch_attention(q, k, v, P=kwargs['P'])
elif (attn_type == 'sparse_long'):
(q, attn) = long_range_attention(q, k, v, P_h=kwargs['ah'], P_w=kwargs['aw'])
elif (attn_type == 'sparse_short'):
(q, attn) = short_range_attention(q, k, v, Q_h=kwargs['ah'], Q_w=kwargs['aw'])
else:
raise NotImplementedError(f'Unknown attention type {attn_type}')
q = q.reshape(q.shape[0], (- 1), q.shape[3], q.shape[4])
q = self.fc(q)
return (q, attn) |
.parametrize(**make_parametrize_kwargs(itertools.chain(places365(), caltech101(), caltech256(), cifar10(), cifar100(), mnist(), fashion_mnist(), kmnist(), emnist(), qmnist(), omniglot(), phototour(), sbdataset(), sbu(), semeion(), stl10(), svhn(), usps(), celeba(), widerface())))
def test_url_is_accessible(url, md5):
retry((lambda : assert_url_is_accessible(url))) |
def get_double_polynomial(idx, vrblvl=0):
if (vrblvl > 0):
print('in get_double_polynomial idx :', idx)
phc = get_phcfun()
adx = pointer(c_int32(idx))
bsz = pointer(c_int32(0))
ccc = pointer(c_double(0.0))
vrb = c_int32(vrblvl)
if (vrblvl > 0):
print('-> get_double_polynomial calls phc', end='')
retval = phc(600, adx, bsz, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
print('-> size of the polynomial :', bsz[0])
szd = bsz[0]
poldata = create_string_buffer(b'', (4 * szd))
if (vrblvl > 0):
print('-> get_double_polynomial calls phc', end='')
retval = phc(67, adx, poldata, ccc, vrb)
if (vrblvl > 0):
print(', return value :', retval)
strpol = int4a2str(poldata, (vrblvl > 0))
pols = strpol.split(';')
result = (pols[0] + ';')
return result |
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, post_max_size=None):
order = scores.sort(0, descending=True)[1]
if (pre_maxsize is not None):
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long)
num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh, boxes.device.index)
keep = order[keep[:num_out].cuda(boxes.device)].contiguous()
if (post_max_size is not None):
keep = keep[:post_max_size]
return keep |
_MASK_PREDICTOR.register('MaskRCNNC4Predictor')
class MaskRCNNC4Predictor(nn.Module):
def __init__(self, cfg, in_channels):
super(MaskRCNNC4Predictor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
dim_reduced = cfg.MODEL.ROI_MASK_HEAD.CONV_LAYERS[(- 1)]
num_inputs = in_channels
self.conv5_mask = ConvTranspose2d(num_inputs, dim_reduced, 2, 2, 0)
self.mask_fcn_logits = Conv2d(dim_reduced, num_classes, 1, 1, 0)
for (name, param) in self.named_parameters():
if ('bias' in name):
nn.init.constant_(param, 0)
elif ('weight' in name):
nn.init.kaiming_normal_(param, mode='fan_out', nonlinearity='relu')
def forward(self, x):
x = F.relu(self.conv5_mask(x))
return self.mask_fcn_logits(x) |
def normed(x, axis=None, keepdims=False):
eps = np.finfo(x.dtype).eps
return (x / (norm(x, axis=axis, keepdims=True) + eps)) |
class ReOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
entities: Optional[Dict] = None
relations: Optional[Dict] = None
pred_relations: Optional[Dict] = None |
def array_list_from_slog(x: SLArrayList) -> ArrayList:
return [array_from_slog(slog) for slog in x] |
class PreActBottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(PreActBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, (self.expansion * planes), kernel_size=1, bias=False)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != (self.expansion * planes))):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, (self.expansion * planes), kernel_size=1, stride=stride, bias=False))
def forward(self, x):
out = F.relu(self.bn1(x))
shortcut = self.shortcut(out)
out = self.conv1(out)
out = self.conv2(F.relu(self.bn2(out)))
out = self.conv3(F.relu(self.bn3(out)))
out += shortcut
return out |
class Ordinal():
DIGIT_MAP = {'1': 'one', '3': 'three', '-2': 'second', '-4': 'four', '-6': 'six'}
ORDINAL_MAP = {'first': '1', 'firstly': '1', 'second': '2', 'secondly': '2', 'twice': '2', 'ii': '2', 'third': '3', 'fourth': '4', 'fifth': '5', 'sixth': '6', 'seventh': '7', 'eighth': '8', 'ninth': '9', 'tenth': '10', 'twelfth': '12', 'centennial': '100'}
def __init__(self, node, amr, align=True):
self.node = node
self.amr = amr
self.value_node = None
self.ops = self._get_ops()
if align:
self.alignment = self._get_alignment()
self.span = self._get_best_span(self.alignment)
self.ner_type = 'ORDINAL_ENTITY'
def to_dict(self, amr, span):
return {'type': 'ordinal-entity', 'span': ' '.join(map(amr.tokens.__getitem__, span)), 'ops': self.ops}
def _get_ops(self):
for (attr, value) in self.node.attributes:
if (attr == 'value'):
return [str(value)]
value = None
graph = self.amr.graph
edges = list(graph._G.edges(self.node))
for (source, target) in edges:
label = graph._G[source][target]['label']
if (label == 'value'):
value = target
break
if (value is None):
return []
self.value_node = value
return list(map(str, value.ops))
def _get_alignment(self):
alignment = {}
for (i, op) in enumerate(self.ops):
if re.search('^".*"$', op):
op = op[1:(- 1)]
for j in range(len(self.amr.tokens)):
alignment_score = self._maybe_align(op, j)
if (alignment_score == 0):
continue
coherence_score = self._get_coherence(j)
score = (alignment_score, coherence_score)
if ((j not in alignment) or (alignment[j].score < score)):
alignment[j] = Alignment(j, i, score)
return alignment
def _get_coherence(self, i):
return 0
def _maybe_align(self, op, index):
lemma = self.amr.lemmas[index].lower().replace(',', '')
if (op == lemma):
return 10
if (((op + 'th') == lemma) or ((op + 'rd') == lemma) or ((op + 'nd') == lemma) or ((op + 'st') == lemma)):
return 10
if ((op == '-1') and (lemma in ('last', 'mast', 'final', 'lastly'))):
return 10
if ((op == '-4') and (lemma == 'preantepenultimate')):
return 10
if ((op == '2') and (lemma == 'latter')):
return 8
if ((lemma in self.ORDINAL_MAP) and (self.ORDINAL_MAP[lemma] == op)):
return 10
if (lemma.startswith('-') and (lemma[1:] == op)):
return 8
if ((op in self.DIGIT_MAP) and (self.DIGIT_MAP[op] == lemma)):
return 8
return 0
def _get_best_span(self, alignment):
indexes = list(alignment.keys())
indexes.sort()
spans = []
last_index = None
for index in indexes:
if (last_index is None):
spans.append([])
elif ((index - last_index) > 2):
spans.append([])
else:
for i in range((last_index + 1), index):
if (self.amr.lemmas[i] in ('-', 'to')):
spans[(- 1)].append((index - 1))
else:
spans.append([])
break
last_index = index
spans[(- 1)].append(index)
if len(spans):
return max(spans, key=(lambda x: sum([alignment[j].score[0] for j in x if (j in alignment)], sum([alignment[j].score[1] for j in x if (j in alignment)]))))
else:
return None
def collapse_ordinal_nodes(ordinals, amr):
node_count = 0
ordinals.sort(key=(lambda x: (x.span[(- 1)] if (x.span is not None) else float('inf'))))
offset = 0
for ordinal in ordinals:
if (ordinal.span is not None):
node_count += 1
abstract = '{}_{}'.format(ordinal.ner_type, node_count)
span = [(index - offset) for index in ordinal.span]
amr.abstract_map[abstract] = ordinal.to_dict(amr, span)
amr.replace_span(span, [abstract], ['JJ'], [ordinal.ner_type])
amr.stems = ((amr.stems[:span[0]] + [abstract]) + amr.stems[(span[(- 1)] + 1):])
for (attr, value) in ordinal.node.attributes:
if (attr == 'value'):
amr.graph.remove_node_attribute(ordinal.node, attr, value)
break
amr.graph.replace_node_attribute(ordinal.node, 'instance', ordinal.node.instance, abstract)
if ordinal.value_node:
amr.graph.remove_edge(ordinal.node, ordinal.value_node)
amr.graph.remove_subtree(ordinal.value_node)
offset += (len(ordinal.span) - 1)
else:
edges = list(amr.graph._G.in_edges(ordinal.node))
for (source, target) in edges:
amr.graph.remove_edge(source, target)
amr.graph.remove_subtree(target) |
class GCN(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, num_layers, dropout):
super(GCN, self).__init__()
self.convs = torch.nn.ModuleList()
self.convs.append(GCNConv(in_channels, hidden_channels, normalize=False))
for _ in range((num_layers - 2)):
self.convs.append(GCNConv(hidden_channels, hidden_channels, normalize=False))
self.convs.append(GCNConv(hidden_channels, out_channels, normalize=False))
self.dropout = dropout
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, adj_t):
for conv in self.convs[:(- 1)]:
x = conv(x, adj_t)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.convs[(- 1)](x, adj_t)
return torch.log_softmax(x, dim=(- 1)) |
_immediately
def allrank(gpu_queue, doc_begin_index, doc_end_index, finish_queue):
import os
import torch
gpuid = gpu_queue.get()
os.environ['CUDA_VISIBLE_DEVICES'] = f'{gpuid}'
device = torch.device(('cuda' if torch.cuda.is_available() else 'cpu'))
assert (torch.cuda.device_count() == 1)
(query_embedding_memmap, query_id_memmap) = get_embed_memmap(args.query_embedding_dir, args.embedding_dim)
qid2pos = {identity: i for (i, identity) in enumerate(query_id_memmap)}
(doc_embedding_memmap, doc_id_memmap) = get_embed_memmap(args.doc_embedding_dir, args.embedding_dim)
assert np.all((doc_id_memmap == list(range(len(doc_id_memmap)))))
doc_embeddings = doc_embedding_memmap[doc_begin_index:doc_end_index]
doc_ids = doc_id_memmap[doc_begin_index:doc_end_index]
doc_embeddings = torch.from_numpy(doc_embeddings).to(device)
results_dict = {qid: PriorityQueue(maxsize=args.hit) for qid in query_id_memmap}
for qid in tqdm(query_id_memmap, desc=f'{gpuid}'):
query_embedding = query_embedding_memmap[qid2pos[qid]]
query_embedding = torch.from_numpy(query_embedding)
query_embedding = query_embedding.to(device)
all_scores = torch.sum((query_embedding * doc_embeddings), dim=(- 1))
k = min(args.hit, len(doc_embeddings))
(top_scores, top_indices) = torch.topk(all_scores, k, largest=True, sorted=True)
(top_scores, top_indices) = (top_scores.cpu(), top_indices.cpu())
top_doc_ids = doc_ids[top_indices.numpy()]
cur_q_queue = results_dict[qid]
for (score, docid) in zip(top_scores, top_doc_ids):
(score, docid) = (score.item(), docid.item())
if cur_q_queue.full():
(lowest_score, lowest_docid) = cur_q_queue.get_nowait()
if (lowest_score >= score):
cur_q_queue.put_nowait((lowest_score, lowest_docid))
break
else:
cur_q_queue.put_nowait((score, docid))
else:
cur_q_queue.put_nowait((score, docid))
finish_queue.put(cur_q_queue.queue)
(doc_embeddings, all_scores, query_embedding, top_scores, top_indices) = (None, None, None, None, None)
torch.cuda.empty_cache()
gpu_queue.put_nowait(gpuid) |
class PerceiverFeatureExtractor(metaclass=DummyObject):
_backends = ['vision']
def __init__(self, *args, **kwargs):
requires_backends(self, ['vision']) |
def remove_output(*sources: str) -> Iterator[None]:
try:
(yield)
finally:
for src in sources:
shutil.rmtree(src) |
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
labeled_minibatch_size = max(target.ne(NO_LABEL).sum(), 1e-08)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(0, keepdim=True)
res.append(correct_k.mul_((100.0 / labeled_minibatch_size)))
return res |
class SegmentationLosses(object):
def __init__(self, weight=None, size_average=True, batch_average=True, ignore_index=255, cuda=False):
self.ignore_index = ignore_index
self.weight = weight
self.size_average = size_average
self.batch_average = batch_average
self.cuda = cuda
def build_loss(self, mode='ce'):
if (mode == 'ce'):
return self.CrossEntropyLoss
elif (mode == 'focal'):
return self.FocalLoss
else:
raise NotImplementedError
def CrossEntropyLoss(self, logit, target):
(n, c, h, w) = logit.size()
criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index, size_average=self.size_average)
if self.cuda:
criterion = criterion.cuda()
loss = criterion(logit, target.long())
if self.batch_average:
loss /= n
return loss
def FocalLoss(self, logit, target, gamma=2, alpha=0.5):
(n, c, h, w) = logit.size()
criterion = nn.CrossEntropyLoss(weight=self.weight, ignore_index=self.ignore_index, size_average=self.size_average)
if self.cuda:
criterion = criterion.cuda()
logpt = (- criterion(logit, target.long()))
pt = torch.exp(logpt)
if (alpha is not None):
logpt *= alpha
loss = ((- ((1 - pt) ** gamma)) * logpt)
if self.batch_average:
loss /= n
return loss |
class ResNet_Strategy(nn.Module):
def __init__(self, block, num_blocks, args):
self.args = args
super(ResNet_Strategy, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.Attack_method = nn.Linear(((512 * block.expansion) * 4), len(args.attack_types))
self.Attack_epsilon = nn.Linear(((512 * block.expansion) * 4), len(args.epsilon_types))
self.Attack_iters = nn.Linear(((512 * block.expansion) * 4), len(args.attack_iters_types))
self.Attack_step_size = nn.Linear(((512 * block.expansion) * 4), len(args.step_size_types))
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
Attack_method = self.Attack_method(out)
Attack_epsilon = self.Attack_epsilon(out)
Attack_iters = self.Attack_iters(out)
Attack_step_size = self.Attack_step_size(out)
return (Attack_method, Attack_epsilon, Attack_iters, Attack_step_size) |
def main(args):
ii2s = Embedding(args)
im_path1 = os.path.join(args.input_dir, args.im_path1)
im_path2 = os.path.join(args.input_dir, args.im_path2)
im_path3 = os.path.join(args.input_dir, args.im_path3)
im_set = {im_path1, im_path2, im_path3}
ii2s.invert_images_in_W([*im_set])
ii2s.invert_images_in_FS([*im_set])
align = Alignment(args)
align.align_images(im_path1, im_path2, sign=args.sign, align_more_region=False, smooth=args.smooth)
if (im_path2 != im_path3):
align.align_images(im_path1, im_path3, sign=args.sign, align_more_region=False, smooth=args.smooth, save_intermediate=False)
blend = Blending(args)
blend.blend_images(im_path1, im_path2, im_path3, sign=args.sign) |
def evaluate(result_sha, root, part='all', mail=mailpy.Mail('')):
mail.msg('Processing Result for KITTI Tracking Benchmark')
classes = []
for c in ('car', 'pedestrian'):
e = trackingEvaluation(t_sha=result_sha, root=root, part=part, mail=mail, cls=c)
try:
if (not e.loadTracker()):
continue
mail.msg('Loading Results - Success')
mail.msg(('Evaluate Object Class: %s' % c.upper()))
classes.append(c)
except:
mail.msg('Feel free to contact us (), if you receive this error message:')
mail.msg(' Caught exception while loading result data.')
break
if (not e.loadGroundtruth()):
raise ValueError('Ground truth not found.')
mail.msg('Loading Groundtruth - Success')
if (len(e.groundtruth) is not len(e.tracker)):
mail.msg('The uploaded data does not provide results for every sequence.')
return False
mail.msg(('Loaded %d Sequences.' % len(e.groundtruth)))
mail.msg('Start Evaluation...')
try:
e.createEvalDir()
except:
mail.msg('Feel free to contact us (), if you receive this error message:')
mail.msg(' Caught exception while creating results.')
if e.compute3rdPartyMetrics():
(MOTA, MOTP, recall, prec, F1, fp, fn, id_switches) = e.saveToStats()
else:
mail.msg('There seem to be no true positives or false positives at all in the submitted data.')
if (len(classes) == 0):
mail.msg('The uploaded results could not be evaluated. Check for format errors.')
return False
mail.msg('Thank you for participating in our benchmark!')
return (MOTA, MOTP, recall, prec, F1, fp, fn, id_switches) |
class Layer(JavaValue, SharedStaticUtils):
def __init__(self, jvalue, bigdl_type, *args):
if jvalue:
invalidInputError((type(jvalue) == JavaObject), f"jvalue type ${type(jvalue)} doesn't match JavaObject ${JavaObject}")
self.value = jvalue
else:
self.value = callBigDlFunc(bigdl_type, self.jvm_class_constructor(), *args)
self.bigdl_type = bigdl_type
def set_running_mean(self, running_mean):
callBigDlFunc(self.bigdl_type, 'setRunningMean', self.value, JTensor.from_ndarray(running_mean))
return self
def set_running_std(self, running_std):
callBigDlFunc(self.bigdl_type, 'setRunningStd', self.value, JTensor.from_ndarray(running_std))
return self
def __str__(self):
return self.value.toString()
def __call__(self, x=None):
x = (x if x else [])
return Node.of(callBigDlFunc(self.bigdl_type, 'createNode', self, to_list(x)))
def from_jvalue(jvalue, bigdl_type='float'):
model = Layer(jvalue=jvalue, bigdl_type=bigdl_type)
model.value = jvalue
return model
def set_name(self, name):
callJavaFunc(self.value.setName, name)
return self
def name(self):
return callJavaFunc(self.value.getName)
def set_seed(self, seed=123):
callBigDlFunc(self.bigdl_type, 'setModelSeed', seed)
return self
def get_dtype(self):
if ('float' == self.bigdl_type):
return 'float32'
else:
return 'float64'
def check_input(input):
def to_jtensor(i):
if isinstance(i, np.ndarray):
return JTensor.from_ndarray(i)
elif isinstance(i, JTensor):
return i
else:
invalidInputError(False, ('Error unknown input type %s' % type(i)))
def check_list(input):
if (type(input) is list):
if (len(input) == 0):
invalidInputError(False, 'Error when checking: empty input')
return list(map((lambda i: check_list(i)), input))
else:
return to_jtensor(input)
if (type(input) is list):
if (len(input) == 0):
invalidInputError(False, 'Error when checking: empty input')
return (list(map((lambda i: check_list(i)), input)), True)
else:
return ([to_jtensor(input)], False)
def convert_output(output):
if (type(output) is JTensor):
return output.to_ndarray()
elif (len(output) == 1):
return output[0].to_ndarray()
else:
return [x.to_ndarray() for x in output]
def forward(self, input):
(jinput, input_is_table) = self.check_input(input)
output = callBigDlFunc(self.bigdl_type, 'modelForward', self.value, jinput, input_is_table)
return self.convert_output(output)
def backward(self, input, grad_output):
(jinput, input_is_table) = self.check_input(input)
(jgrad_output, grad_output_is_table) = self.check_input(grad_output)
output = callBigDlFunc(self.bigdl_type, 'modelBackward', self.value, jinput, input_is_table, jgrad_output, grad_output_is_table)
return self.convert_output(output)
def zero_grad_parameters(self):
callJavaFunc(self.value.zeroGradParameters)
def update_parameters(self, learning_rate):
callBigDlFunc(self.bigdl_type, 'updateParameters', self.value, learning_rate)
def reset(self):
callJavaFunc(self.value.reset)
return self
def parameters(self):
name_to_params = callBigDlFunc(self.bigdl_type, 'modelGetParameters', self.value)
def to_ndarray(params):
return dict(((param_name, np.array(values[0], dtype=self.get_dtype()).reshape(values[1])) for (param_name, values) in params.items()))
return dict(((layer_name, to_ndarray(params)) for (layer_name, params) in name_to_params.items()))
def evaluate(self, *args):
if (len(args) == 0):
callBigDlFunc(self.bigdl_type, 'evaluate', self.value)
return self
elif (len(args) == 3):
(dataset, batch_size, val_methods) = args
if isinstance(dataset, ImageFrame):
return callBigDlFunc(self.bigdl_type, 'modelEvaluateImageFrame', self.value, dataset, batch_size, val_methods)
else:
return callBigDlFunc(self.bigdl_type, 'modelEvaluate', self.value, dataset, batch_size, val_methods)
else:
invalidInputError(False, 'Error when calling evaluate(): it takes no argument or exactly three arguments only')
def _to_jtensors(self, x):
x = to_list(x)
if isinstance(x[0], np.ndarray):
return [JTensor.from_ndarray(i) for i in x]
elif isinstance(x[0], JTensor):
return x
else:
invalidInputError(False, ('Not supported type: %s' % type(x[0])))
def predict_local(self, X, batch_size=(- 1)):
jresults = callBigDlFunc(self.bigdl_type, 'predictLocal', self.value, self._to_jtensors(X), batch_size)
return np.stack([j.to_ndarray() for j in jresults])
def predict_class_local(self, X):
result = callBigDlFunc(self.bigdl_type, 'predictLocalClass', self.value, self._to_jtensors(X))
return np.stack(result)
def predict(self, features, batch_size=(- 1)):
if isinstance(features, RDD):
return self.predict_distributed(features, batch_size)
else:
return self.predict_local(features, batch_size)
def predict_class(self, features):
if isinstance(features, RDD):
return self.predict_class_distributed(features)
else:
return self.predict_class_local(features)
def predict_distributed(self, data_rdd, batch_size=(- 1)):
result = callBigDlFunc(self.bigdl_type, 'modelPredictRDD', self.value, data_rdd, batch_size)
return result.map((lambda data: data.to_ndarray()))
def predict_class_distributed(self, data_rdd):
result = callBigDlFunc(self.bigdl_type, 'modelPredictClass', self.value, data_rdd)
return result
def predict_image(self, image_frame, output_layer=None, share_buffer=False, batch_per_partition=4, predict_key='predict'):
image_frame = callBigDlFunc(self.bigdl_type, 'modelPredictImage', self.value, image_frame, output_layer, share_buffer, batch_per_partition, predict_key)
return ImageFrame(image_frame)
def set_weights(self, weights):
tensors = [JTensor.from_ndarray(param, self.bigdl_type) for param in to_list(weights)]
callBigDlFunc(self.bigdl_type, 'setWeights', self.value, tensors)
def get_weights(self):
tensorWeights = callBigDlFunc(self.bigdl_type, 'getWeights', self.value)
if (tensorWeights is not None):
return [tensor.to_ndarray() for tensor in tensorWeights]
else:
print('The layer does not have weight/bias')
return None
def is_with_weights(self):
return callBigDlFunc(self.bigdl_type, 'isWithWeights', self.value)
def saveModel(self, modelPath, weightPath=None, over_write=False):
callBigDlFunc(self.bigdl_type, 'saveBigDLModule', self.value, modelPath, weightPath, over_write)
def save_caffe(self, prototxt_path, model_path, use_v2=True, overwrite=False):
callBigDlFunc(self.bigdl_type, 'saveCaffe', self.value, prototxt_path, model_path, use_v2, overwrite)
def save_tensorflow(self, inputs, path, byte_order='little_endian', data_format='nhwc'):
callBigDlFunc(self.bigdl_type, 'saveTF', self.value, inputs, path, byte_order, data_format)
def setWRegularizer(self, wRegularizer):
self.value.wRegularizer = wRegularizer.value
def setBRegularizer(self, bRegularizer):
self.value.bRegularizer = bRegularizer.value
def freeze(self, names=None):
callBigDlFunc(self.bigdl_type, 'freeze', self.value, names)
return self
def unfreeze(self, names=None):
callBigDlFunc(self.bigdl_type, 'unFreeze', self.value, names)
return self
def training(self, is_training=True):
if is_training:
callJavaFunc(self.value.training)
else:
callJavaFunc(self.value.evaluate)
return self
def is_training(self):
return callJavaFunc(self.value.isTraining)
def quantize(self):
quantized_model = callBigDlFunc(self.bigdl_type, 'quantize', self.value)
return Layer.of(quantized_model) |
def shape_equal_cmp(*args):
for i in range((len(args) - 1)):
if (args[i].shape != args[(i + 1)].shape):
s = '\n'.join([str(x.shape) for x in args])
raise ValueError(('Expected equal shapes. Got:\n%s' % s))
return True |
class ACE2005Processor(QueryNERProcessor):
def get_labels(self):
return ['GPE', 'ORG', 'PER', 'FAC', 'VEH', 'LOC', 'WEA', 'O'] |
class InceptionV4(nn.Module):
def __init__(self, num_classes=1000, in_chans=3, output_stride=32, drop_rate=0.0, global_pool='avg'):
super(InceptionV4, self).__init__()
assert (output_stride == 32)
self.drop_rate = drop_rate
self.num_classes = num_classes
self.num_features = 1536
self.features = nn.Sequential(BasicConv2d(in_chans, 32, kernel_size=3, stride=2), BasicConv2d(32, 32, kernel_size=3, stride=1), BasicConv2d(32, 64, kernel_size=3, stride=1, padding=1), Mixed3a(), Mixed4a(), Mixed5a(), InceptionA(), InceptionA(), InceptionA(), InceptionA(), ReductionA(), InceptionB(), InceptionB(), InceptionB(), InceptionB(), InceptionB(), InceptionB(), InceptionB(), ReductionB(), InceptionC(), InceptionC(), InceptionC())
self.feature_info = [dict(num_chs=64, reduction=2, module='features.2'), dict(num_chs=160, reduction=4, module='features.3'), dict(num_chs=384, reduction=8, module='features.9'), dict(num_chs=1024, reduction=16, module='features.17'), dict(num_chs=1536, reduction=32, module='features.21')]
(self.global_pool, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
(self.global_pool, self.last_linear) = create_classifier(self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
return self.features(x)
def forward(self, x):
x = self.forward_features(x)
x = self.global_pool(x)
if (self.drop_rate > 0):
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.last_linear(x)
return x |
class BestRun(NamedTuple):
run_id: str
objective: float
hyperparameters: Dict[(str, Any)] |
class FlaxCrossAttnDownBlock2D(nn.Module):
in_channels: int
out_channels: int
dropout: float = 0.0
num_layers: int = 1
num_attention_heads: int = 1
add_downsample: bool = True
use_linear_projection: bool = False
only_cross_attention: bool = False
use_memory_efficient_attention: bool = False
dtype: jnp.dtype = jnp.float32
def setup(self):
resnets = []
attentions = []
for i in range(self.num_layers):
in_channels = (self.in_channels if (i == 0) else self.out_channels)
res_block = FlaxResnetBlock2D(in_channels=in_channels, out_channels=self.out_channels, dropout_prob=self.dropout, dtype=self.dtype)
resnets.append(res_block)
attn_block = FlaxTransformer2DModel(in_channels=self.out_channels, n_heads=self.num_attention_heads, d_head=(self.out_channels // self.num_attention_heads), depth=1, use_linear_projection=self.use_linear_projection, only_cross_attention=self.only_cross_attention, use_memory_efficient_attention=self.use_memory_efficient_attention, dtype=self.dtype)
attentions.append(attn_block)
self.resnets = resnets
self.attentions = attentions
if self.add_downsample:
self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)
def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True):
output_states = ()
for (resnet, attn) in zip(self.resnets, self.attentions):
hidden_states = resnet(hidden_states, temb, deterministic=deterministic)
hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)
output_states += (hidden_states,)
if self.add_downsample:
hidden_states = self.downsamplers_0(hidden_states)
output_states += (hidden_states,)
return (hidden_states, output_states) |
def add_flops_counter_variable_or_reset(module):
if (isinstance(module, torch.nn.Conv2d) or isinstance(module, torch.nn.Linear)):
module.__flops__ = 0 |
class MyNanoChannelsLastCorrectness(TorchNano):
def train(self):
x = torch.Tensor([[[[1, 0]], [[1, 0]]], [[[1, 0]], [[2, 0]]], [[[0, 3]], [[1, 0]]], [[[1, 1]], [[2, 1]]]])
y = torch.Tensor([[0.0], [1.0], [0.0], [1.0]])
train_dataset = torch.utils.data.TensorDataset(x, y)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=4, shuffle=False)
origin_model = ConvModel()
loss_fuc = torch.nn.MSELoss()
optimizer = torch.optim.SGD(origin_model.parameters(), lr=0.25)
(model, optimizer, train_loader) = self.setup(origin_model, optimizer, train_loader)
model.train()
for (X, y) in train_loader:
optimizer.zero_grad()
loss = loss_fuc(model(X), y)
self.backward(loss)
optimizer.step()
result = torch.tensor([[[[0.0, (- 1.0)]], [[(- 1.25), 0.5]]]])
assert origin_model.conv1.weight.equal(result) |
def val_to_vec(size, val):
assert (0 <= val < size)
vec = [0 for _ in range(size)]
vec[int(val)] = 1
return vec |
def copy_noise_bn(noised_src_model, dst_model, diff_coef=0.0):
assert (diff_coef == 0), 'Not support non-zero diff_coef since no clean ref is available.'
found_bn = False
eps = 1e-10
for key in dst_model.state_dict():
if ('bn' in key):
found_bn = True
if (('running_mean' in key) or ('running_var' in key)):
src_noise = noised_src_model.state_dict()[key].data
dst_model.state_dict()[key].data.copy_(src_noise)
if (not found_bn):
raise ValueError(f'Not found BN. Please make suer you are using BN in your model.') |
class PlaneActiveSchedulerND(_SubspacePointActiveSchedulerND):
name = 'Plane'
def __init__(self, N_STEPS, D, point, iaxes):
if (D.nd < 3):
raise Exception('ERROR: requires nd >=3')
if (len(point) != (D.nd - 2)):
raise Exception(('ERROR: point incorrect shape %s' % (point.shape,)))
super().__init__(N_STEPS, D, point, iaxes=iaxes) |
class DeepONet(NN):
def __init__(self, layer_sizes_branch, layer_sizes_trunk, activation, kernel_initializer, use_bias=True):
super().__init__()
self.layer_sizes_func = layer_sizes_branch
self.layer_sizes_loc = layer_sizes_trunk
if isinstance(activation, dict):
self.activation_branch = activations.get(activation['branch'])
self.activation_trunk = activations.get(activation['trunk'])
else:
activation_branch = self.activation_trunk = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
if callable(layer_sizes_branch[1]):
self.branch = layer_sizes_branch[1]
else:
self.branch = FNN(layer_sizes_branch, activation_branch, kernel_initializer)
self.trunk = FNN(layer_sizes_trunk, self.activation_trunk, kernel_initializer)
self.use_bias = use_bias
if use_bias:
self.b = self.create_parameter(shape=(1,), default_initializer=initializers.get('zeros'))
def forward(self, inputs):
x_func = inputs[0]
x_loc = inputs[1]
x_func = self.branch(x_func)
if (self._input_transform is not None):
x_loc = self._input_transform(x_loc)
x_loc = self.activation_trunk(self.trunk(x_loc))
if (x_func.shape[(- 1)] != x_loc.shape[(- 1)]):
raise AssertionError('Output sizes of branch net and trunk net do not match.')
x = paddle.einsum('bi,bi->b', x_func, x_loc)
x = paddle.reshape(x, [(- 1), 1])
if self.use_bias:
x += self.b
if (self._output_transform is not None):
x = self._output_transform(inputs, x)
return x |
def plot_prediction(model, row, window, exponentiate=False, predict_deaths=True):
if predict_deaths:
key = 'deaths'
else:
key = 'cases'
model_predictions = get_auto_reg_predictions(model, row, window, exponentiate, predict_deaths=predict_deaths)
model_predictions = [float(v) for v in model_predictions]
print(model_predictions)
for (i, val) in enumerate(row[key]):
if (val > 0):
start_point = i
break
plt.plot(row[key][start_point:], label=key)
plt.plot(model_predictions[start_point:], label='predictions')
print(model_predictions[start_point:])
plt.fill_between(list(range(len(row[key][start_point:]))), row[key][start_point:], model_predictions[start_point:])
plt.legend()
plt.show() |
def load_traindata_path(dataset_dir, name):
train = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
validation = [11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
which_view = os.path.join(dataset_dir, name)
data_list = {}
data_list['train'] = []
data_list['val'] = []
for k in train:
subject_id = os.path.join(which_view, str(k))
n_slice = len(glob.glob('{0}/rawdata*.mat'.format(subject_id)))
for i in range(11, 30):
raw = '{0}/rawdata{1}.mat'.format(subject_id, i)
sen = '{0}/espirit{1}.mat'.format(subject_id, i)
data_list['train'] += [[raw, sen]]
for k in validation:
subject_id = os.path.join(which_view, str(k))
n_slice = len(glob.glob('{0}/rawdata*.mat'.format(subject_id)))
for i in range(11, 30):
raw = '{0}/rawdata{1}.mat'.format(subject_id, i)
sen = '{0}/espirit{1}.mat'.format(subject_id, i)
data_list['val'] += [[raw, sen]]
return data_list |
class CorrelatedBBTS(Agent):
def __init__(self, n_stages, mu0, sigma0, sigma_tilde, n_sweeps=10):
assert ((n_stages % 2) == 0)
self.n_stages = n_stages
self.n_sweeps = n_sweeps
self.internal_env = CorrelatedBinomialBridge(n_stages, mu0, sigma0)
self.edge2index = defaultdict(dict)
self.index2edge = defaultdict(dict)
edge_counter = 0
for start_node in self.internal_env.graph:
for end_node in self.internal_env.graph[start_node]:
self.edge2index[start_node][end_node] = edge_counter
self.index2edge[edge_counter] = (start_node, end_node)
edge_counter += 1
self.num_edges = edge_counter
self.Mu0 = np.array(([mu0] * self.num_edges))
self.Sigma0 = np.diag(([(sigma0 ** 2)] * self.num_edges))
self.Sigma0inv = np.diag(([((1 / sigma0) ** 2)] * self.num_edges))
self.sigma_tilde = sigma_tilde
self.posterior = (self.Mu0, self.Sigma0, self.Sigma0inv)
self.concentration_history = []
self.log_reward_history = []
self.history_size = 0
def get_posterior_mean(self):
edge_length = copy.deepcopy(self.internal_env.graph)
for start_node in edge_length:
for end_node in edge_length[start_node]:
edge_index = self.edge2index[start_node][end_node]
mean = self.posterior[0][edge_index]
var = self.posterior[0][(edge_index, edge_index)]
edge_length[start_node][end_node] = np.exp((mean + (0.5 * var)))
return edge_length
def get_posterior_sample(self):
flattened_sample = np.random.multivariate_normal(self.posterior[0], self.posterior[1])
edge_length = copy.deepcopy(self.internal_env.graph)
for start_node in edge_length:
for end_node in edge_length[start_node]:
edge_length[start_node][end_node] = np.exp(flattened_sample[self.edge2index[start_node][end_node]])
return edge_length
def update_observation(self, observation, action, reward):
assert (observation == self.n_stages)
(log_rewards, concentration) = _prepare_posterior_update_elements(observation, action, reward, self.num_edges, self.edge2index, self.sigma_tilde, self.internal_env)
(new_Mu, new_Sigma, new_Sigma_inv) = _update_posterior(self.posterior, log_rewards, concentration)
self.posterior = (new_Mu, new_Sigma, new_Sigma_inv)
def pick_action(self, observation):
posterior_sample = self.get_posterior_sample()
self.internal_env.overwrite_edge_length(posterior_sample)
path = self.internal_env.get_shortest_path()
return path |
def product_dict(**kwargs):
keys = kwargs.keys()
vals = kwargs.values()
for instance in itertools.product(*vals):
(yield dict(zip(keys, instance))) |
class FE(nn.Module):
def __init__(self, in_channels, mid_channels):
super().__init__()
self.fe = nn.Sequential(*[CB(in_channels), MCB(in_channels, mid_channels, offset_channels=32)])
def forward(self, x):
out = self.fe(x)
return out |
class XSegNet(object):
VERSION = 1
def __init__(self, name, resolution=256, load_weights=True, weights_file_root=None, training=False, place_model_on_cpu=False, run_on_cpu=False, optimizer=None, data_format='NHWC', raise_on_no_model_files=False):
self.resolution = resolution
self.weights_file_root = (Path(weights_file_root) if (weights_file_root is not None) else Path(__file__).parent)
nn.initialize(data_format=data_format)
tf = nn.tf
model_name = f'{name}_{resolution}'
self.model_filename_list = []
with tf.device('/CPU:0'):
self.input_t = tf.placeholder(nn.floatx, nn.get4Dshape(resolution, resolution, 3))
self.target_t = tf.placeholder(nn.floatx, nn.get4Dshape(resolution, resolution, 1))
with tf.device(('/CPU:0' if place_model_on_cpu else nn.tf_default_device_name)):
self.model = nn.XSeg(3, 32, 1, name=name)
self.model_weights = self.model.get_weights()
if training:
if (optimizer is None):
raise ValueError('Optimizer should be provided for training mode.')
self.opt = optimizer
self.opt.initialize_variables(self.model_weights, vars_on_cpu=place_model_on_cpu)
self.model_filename_list += [[self.opt, f'{model_name}_opt.npy']]
self.model_filename_list += [[self.model, f'{model_name}.npy']]
if (not training):
with tf.device(('/CPU:0' if run_on_cpu else nn.tf_default_device_name)):
(_, pred) = self.model(self.input_t)
def net_run(input_np):
return nn.tf_sess.run([pred], feed_dict={self.input_t: input_np})[0]
self.net_run = net_run
self.initialized = True
for (model, filename) in self.model_filename_list:
do_init = (not load_weights)
if (not do_init):
model_file_path = (self.weights_file_root / filename)
do_init = (not model.load_weights(model_file_path))
if do_init:
if raise_on_no_model_files:
raise Exception(f'{model_file_path} does not exists.')
if (not training):
self.initialized = False
break
if do_init:
model.init_weights()
def get_resolution(self):
return self.resolution
def flow(self, x, pretrain=False):
return self.model(x, pretrain=pretrain)
def get_weights(self):
return self.model_weights
def save_weights(self):
for (model, filename) in io.progress_bar_generator(self.model_filename_list, 'Saving', leave=False):
model.save_weights((self.weights_file_root / filename))
def extract(self, input_image):
if (not self.initialized):
return (0.5 * np.ones((self.resolution, self.resolution, 1), nn.floatx.as_numpy_dtype))
input_shape_len = len(input_image.shape)
if (input_shape_len == 3):
input_image = input_image[(None, ...)]
result = np.clip(self.net_run(input_image), 0, 1.0)
result[(result < 0.1)] = 0
if (input_shape_len == 3):
result = result[0]
return result |
class CIFAR10V2_auto(object):
def __init__(self, batch_size=128, class_balance=False, imb_factor=None):
mean = [0.4914, 0.4822, 0.4465]
std = [0.2023, 0.1994, 0.201]
normalize = transforms.Normalize(mean, std)
transform_train = transforms.Compose([transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip(), CIFAR10Policy(), transforms.ToTensor(), normalize])
transform_test = transforms.Compose([transforms.ToTensor(), normalize])
trainset = IMBALANCECIFAR10(root='/mnt/proj56/jqcui/Data/cifar10', train=True, transform=transform_train, download=False, imb_factor=imb_factor, class_balance=class_balance)
testset = datasets.CIFAR10(root='/mnt/proj56/jqcui/Data/cifar10', train=False, transform=transform_test, download=False)
self.train = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=8, pin_memory=True, drop_last=True)
self.test = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4, pin_memory=True) |
def test_iou_piecewise_sampler():
if (not torch.cuda.is_available()):
pytest.skip()
assigner = MaxIoUAssigner(pos_iou_thr=0.55, neg_iou_thr=0.55, min_pos_iou=0.55, ignore_iof_thr=(- 1), iou_calculator=dict(type='BboxOverlaps3D', coordinate='lidar'))
bboxes = torch.tensor([[32, 32, 16, 8, 38, 42, (- 0.3)], [32, 32, 16, 8, 38, 42, (- 0.3)], [32, 32, 16, 8, 38, 42, (- 0.3)], [32, 32, 16, 8, 38, 42, (- 0.3)], [0, 0, 0, 10, 10, 10, 0.2], [10, 10, 10, 20, 20, 15, 0.6], [5, 5, 5, 15, 15, 15, 0.7], [5, 5, 5, 15, 15, 15, 0.7], [5, 5, 5, 15, 15, 15, 0.7], [32, 32, 16, 8, 38, 42, (- 0.3)], [32, 32, 16, 8, 38, 42, (- 0.3)], [32, 32, 16, 8, 38, 42, (- 0.3)]], dtype=torch.float32).cuda()
gt_bboxes = torch.tensor([[0, 0, 0, 10, 10, 9, 0.2], [5, 10, 10, 20, 20, 15, 0.6]], dtype=torch.float32).cuda()
gt_labels = torch.tensor([1, 1], dtype=torch.int64).cuda()
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = IoUNegPiecewiseSampler(num=10, pos_fraction=0.55, neg_piece_fractions=[0.8, 0.2], neg_iou_piece_thrs=[0.55, 0.1], neg_pos_ub=(- 1), add_gt_as_proposals=False)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert (sample_result.pos_inds == 4)
assert (len(sample_result.pos_bboxes) == len(sample_result.pos_inds))
assert (len(sample_result.neg_bboxes) == len(sample_result.neg_inds)) |
def _build_humanoid_walls_env():
walker = walkers.CMUHumanoidPositionControlled(name='walker', observable_options={'egocentric_camera': dict(enabled=True)})
wall_width = distributions.Uniform(low=1, high=7)
wall_height = distributions.Uniform(low=2.5, high=4.0)
swap_wall_side = distributions.Bernoulli(prob=0.5)
wall_r = distributions.Uniform(low=0.5, high=0.6)
wall_g = distributions.Uniform(low=0.21, high=0.41)
wall_rgba = colors.RgbVariation(r=wall_r, g=wall_g, b=0, alpha=1)
arena = arenas.WallsCorridor(wall_gap=5.0, wall_width=wall_width, wall_height=wall_height, swap_wall_side=swap_wall_side, wall_rgba=wall_rgba, corridor_width=10, corridor_length=100)
humanoid_task = tasks.RunThroughCorridor(walker=walker, arena=arena, walker_spawn_rotation=1.57, physics_timestep=0.005, control_timestep=0.03)
raw_env = composer.Environment(time_limit=30, task=humanoid_task, strip_singleton_obs_buffer_dim=True)
return raw_env |
def ideal_binary_mask(args, mix, sources):
mix_stft = librosa.stft(mix, n_fft=args.nfft, hop_length=args.nhop)
(mix_mag, mix_phase) = librosa.magphase(mix_stft, power=1)
source_1_stft = librosa.stft(sources[0], n_fft=args.nfft, hop_length=args.nhop)
(source_1_mag, source_1_phase) = librosa.magphase(source_1_stft, power=1)
source_2_stft = librosa.stft(sources[1], n_fft=args.nfft, hop_length=args.nhop)
(source_2_mag, source_2_phase) = librosa.magphase(source_2_stft, power=1)
ibm_1 = np.zeros_like(source_1_mag)
ibm_2 = np.zeros_like(source_2_mag)
ibm_1[(source_1_mag > source_2_mag)] = 1
ibm_2[(source_2_mag > source_1_mag)] = 1
ibm_1_output = ((mix_mag * ibm_1) * mix_phase)
ibm_2_output = ((mix_mag * ibm_2) * mix_phase)
ibm_1_output = librosa.istft(ibm_1_output, win_length=args.nfft, hop_length=args.nhop, n_fft=args.nfft)
ibm_2_output = librosa.istft(ibm_2_output, win_length=args.nfft, hop_length=args.nhop, n_fft=args.nfft)
return np.stack([ibm_1_output, ibm_2_output], axis=0) |
def _bf16_wrapper_model(model, bf16_ops_list, prefix=''):
for (name, child) in model.named_children():
op_name = (((prefix + '.') + name) if (prefix != '') else name)
for bf16_op_name in bf16_ops_list:
if (op_name == bf16_op_name[0]):
child = BF16ModuleWrapper(child)
else:
_bf16_wrapper_model(child, bf16_ops_list, op_name)
setattr(model, name, child)
return model |
def test_sigmar_wlog_constbeta():
from galpy.potential import LogarithmicHaloPotential
lp = LogarithmicHaloPotential(normalize=1.0, q=1.0)
rs = numpy.linspace(0.001, 5.0, 101)
assert numpy.all((numpy.fabs((numpy.array([jeans.sigmar(lp, r) for r in rs]) - (1.0 / numpy.sqrt(2.0)))) < 1e-10)), 'Radial sigma computed w/ spherical Jeans equation incorrect for LogarithmicHaloPotential and beta=0'
beta = 0.5
assert numpy.all((numpy.fabs((numpy.array([jeans.sigmar(lp, r, beta=beta) for r in rs]) - (1.0 / numpy.sqrt((2.0 - (2.0 * beta)))))) < 1e-10)), 'Radial sigma computed w/ spherical Jeans equation incorrect for LogarithmicHaloPotential and beta=0.5'
beta = (- 0.5)
assert numpy.all((numpy.fabs((numpy.array([jeans.sigmar(lp, r, beta=beta) for r in rs]) - (1.0 / numpy.sqrt((2.0 - (2.0 * beta)))))) < 1e-10)), 'Radial sigma computed w/ spherical Jeans equation incorrect for LogarithmicHaloPotential and beta=-0.5'
return None |
def get_pretrained_model(destination):
url = ' arbitrary_style_transfer.tar.gz'
os.system('curl -o arbitrary_style_transfer.tar.gz {0}'.format(url))
with tarfile.open('arbitrary_style_transfer.tar.gz') as tar:
if (not os.path.exists(destination)):
os.makedirs(destination)
tar.extractall(destination) |
(kernels.SharedIndependent, inducing_variables.SharedIndependentInducingVariables, TensorLike, TensorLike)
def _exact_shared(kern, Z, u, f, *, multioutput_axis=None, **kwargs):
return _exact_independent(kern, Z, u, f, multioutput_axis=multioutput_axis, **kwargs) |
def full_run(input_doc):
print('Started full run')
print(len(input_doc._.Features[0]))
input_doc = variance_threshold(input_doc, load=True)
print(len(input_doc._.Features[0]))
print('Variance Threshold Done')
scalers = ['QuantileGaussian']
jobs = []
for scaler in scalers:
print(('Beginning:\t%s' % scaler))
p = multiprocessing.Process(target=loop, args=(input_doc, scaler))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
return |
def reduction_b(net):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2, padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID', scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool], 3)
return net |
class SequentialRules():
def __init__(self, steps=10, weighting='div', pruning=20, last_n_days=None, idf_weight=False, session_key='SessionId', item_key='ItemId', time_key='Time'):
self.steps = steps
self.pruning = pruning
self.weighting = weighting
self.last_n_days = last_n_days
self.idf_weight = idf_weight
self.session_key = session_key
self.item_key = item_key
self.time_key = time_key
self.session = (- 1)
self.session_items = []
def fit(self, data, test=None):
if (self.last_n_days != None):
max_time = dt.fromtimestamp(data[self.time_key].max())
date_threshold = (max_time.date() - td(self.last_n_days))
stamp = dt.combine(date_threshold, dt.min.time()).timestamp()
train = data[(data[self.time_key] >= stamp)]
else:
train = data
if self.idf_weight:
self.idf = self.compute_idf(data, item_key=self.item_key, session_key=self.session_key)
cur_session = (- 1)
last_items = []
rules = dict()
index_session = train.columns.get_loc(self.session_key)
index_item = train.columns.get_loc(self.item_key)
for row in train.itertuples(index=False):
(session_id, item_id) = (row[index_session], row[index_item])
if (session_id != cur_session):
cur_session = session_id
last_items = []
else:
for i in range(1, ((self.steps + 1) if (len(last_items) >= self.steps) else (len(last_items) + 1))):
prev_item = last_items[(- i)]
if (not (prev_item in rules)):
rules[prev_item] = dict()
if (not (item_id in rules[prev_item])):
rules[prev_item][item_id] = 0
weight = getattr(self, self.weighting)(i)
if self.idf_weight:
if (self.idf_weight == 1):
weight *= self.idf[prev_item]
elif (self.idf_weight == 2):
weight += self.idf[prev_item]
rules[prev_item][item_id] += weight
last_items.append(item_id)
if (self.pruning > 0):
self.prune(rules)
self.rules = rules
def linear(self, i):
return ((1 - (0.1 * i)) if (i <= 100) else 0)
def same(self, i):
return 1
def div(self, i):
return (1 / i)
def log(self, i):
return (1 / log10((i + 1.7)))
def quadratic(self, i):
return (1 / (i * i))
def predict_next(self, session_id, input_item_id, predict_for_item_ids, input_user_id=None, skip=False, type='view', timestamp=0):
if (session_id != self.session):
self.session_items = []
self.session = session_id
if (type == 'view'):
self.session_items.append(input_item_id)
if skip:
return
preds = np.zeros(len(predict_for_item_ids))
if (input_item_id in self.rules):
for key in self.rules[input_item_id]:
preds[(predict_for_item_ids == key)] = self.rules[input_item_id][key]
series = pd.Series(data=preds, index=predict_for_item_ids)
series = (series / series.max())
return series
def prune(self, rules):
for k1 in rules:
tmp = rules[k1]
if (self.pruning < 1):
keep = (len(tmp) - int((len(tmp) * self.pruning)))
elif (self.pruning >= 1):
keep = self.pruning
counter = col.Counter(tmp)
rules[k1] = dict()
for (k2, v) in counter.most_common(keep):
rules[k1][k2] = v
def compute_idf(self, train, item_key='ItemId', session_key='SessionId'):
idf = pd.DataFrame()
idf['idf'] = train.groupby(item_key).size()
idf['idf'] = np.log((train[session_key].nunique() / idf['idf']))
idf['idf'] = ((idf['idf'] - idf['idf'].min()) / (idf['idf'].max() - idf['idf'].min()))
idf = idf['idf'].to_dict()
return idf
def clear(self):
self.rules = {} |
def imagenet_resnet101_pretrained(output_dim):
return _replace_fc(torchvision.models.resnet101(pretrained=True), output_dim) |
class LogisticRegressionNetwork1(nn.Module):
def __init__(self, num_feature) -> None:
super().__init__()
self.dense = nn.Linear(num_feature, 1)
def forward(self, x):
x = self.dense(x)
return x |
def torch_abs(input, *, out=None):
if (out is not None):
raise ValueError("Don't support in-place abs for MetaTensor analysis")
return input |
class StandardNorm(nn.Module):
def __init__(self, mean, std):
super(StandardNorm, self).__init__()
self.mean = mean
self.std = std
def forward(self, x):
return ((x - self.mean) / self.std)
def inverse(self, x):
return ((x * self.std) + self.mean) |
class OpPattern(JsonSerializer):
def __init__(self, pattern_data: dict):
super().__init__()
self.sequence: List[str] = pattern_data.get('sequence', '').split(',')
self.precision: str = pattern_data.get('precision', None) |
class TestNode():
def __init__(self, nav, nn, actions):
self.tb3 = nav
self.desired_speed = 0.3
self.nn = nn
self.actions = actions
self.desired_position = PoseStamped()
self.desired_action = np.zeros((2,))
rospy.Timer(rospy.Duration(0.2), self.cbControl)
rospy.Timer(rospy.Duration(0.01), self.cbComputeActionGA3C)
def cbControl(self, event):
print((self.desired_action[1] - self.tb3.angle_pose))
print(self.desired_action[1])
print(self.tb3.angle_pose)
print('')
def update_action(self, action):
self.desired_action = action
self.desired_position.pose.position.x = (self.tb3.pose.pose.position.x + ((1 * action[0]) * np.cos(action[1])))
self.desired_position.pose.position.y = (self.tb3.pose.pose.position.y + ((1 * action[0]) * np.sin(action[1])))
def cbComputeActionGA3C(self, event):
if (not self.tb3.goalReached()):
x = self.tb3.pose.pose.position.x
y = self.tb3.pose.pose.position.y
v_x = self.tb3.vel.x
v_y = self.tb3.vel.y
radius = self.tb3.radius
heading_angle = self.tb3.angle_pose
pref_speed = self.desired_speed
goal_x = self.tb3.sub_goal.x
goal_y = self.tb3.sub_goal.y
v = np.linalg.norm(np.array([v_x, v_y]))
if (v > pref_speed):
v_x = ((v_x * pref_speed) / v)
v_y = ((v_y * pref_speed) / v)
host_agent = agent.Agent(x, y, goal_x, goal_y, radius, pref_speed, heading_angle, 0)
host_agent.vel_global_frame = np.array([v_x, v_y])
other_agents_state = copy.deepcopy(self.tb3.obstacles_state)
obs = host_agent.observe(other_agents_state)[1:]
obs = np.expand_dims(obs, axis=0)
predictions = self.nn.predict_p(obs)[0]
raw_action = copy.deepcopy(self.actions[np.argmax(predictions)])
action = np.array([(pref_speed * raw_action[0]), util.wrap((raw_action[1] + self.tb3.angle_pose))])
self.update_action(action)
else:
action = [0, 0]
self.update_action(action) |
class iVAE(nn.Module):
def __init__(self, latent_dim, data_dim, aux_dim, prior=None, decoder=None, encoder=None, n_layers=3, hidden_dim=50, activation='lrelu', slope=0.1, device='cpu', anneal=False):
super().__init__()
self.data_dim = data_dim
self.latent_dim = latent_dim
self.aux_dim = aux_dim
self.hidden_dim = hidden_dim
self.n_layers = n_layers
self.activation = activation
self.slope = slope
self.anneal_params = anneal
if (prior is None):
self.prior_dist = Normal(device=device)
else:
self.prior_dist = prior
if (decoder is None):
self.decoder_dist = Normal(device=device)
else:
self.decoder_dist = decoder
if (encoder is None):
self.encoder_dist = Normal(device=device)
else:
self.encoder_dist = encoder
self.prior_mean = torch.zeros(1).to(device)
self.logl = MLP(aux_dim, latent_dim, hidden_dim, n_layers, activation=activation, slope=slope, device=device)
self.f = MLP(latent_dim, data_dim, hidden_dim, n_layers, activation=activation, slope=slope, device=device)
self.decoder_var = (0.01 * torch.ones(1).to(device))
self.g = MLP((data_dim + aux_dim), latent_dim, hidden_dim, n_layers, activation=activation, slope=slope, device=device)
self.logv = MLP((data_dim + aux_dim), latent_dim, hidden_dim, n_layers, activation=activation, slope=slope, device=device)
self.apply(weights_init)
self._training_hyperparams = [1.0, 1.0, 1.0, 1.0, 1]
def encoder_params(self, x, u):
xu = torch.cat((x, u), 1)
g = self.g(xu)
logv = self.logv(xu)
return (g, logv.exp())
def decoder_params(self, s):
f = self.f(s)
return (f, self.decoder_var)
def prior_params(self, u):
logl = self.logl(u)
return (self.prior_mean, logl.exp())
def forward(self, x, u):
prior_params = self.prior_params(u)
encoder_params = self.encoder_params(x, u)
z = self.encoder_dist.sample(*encoder_params)
decoder_params = self.decoder_params(z)
return (decoder_params, encoder_params, z, prior_params)
def elbo(self, x, u):
(decoder_params, (g, v), z, prior_params) = self.forward(x, u)
log_px_z = self.decoder_dist.log_pdf(x, *decoder_params)
log_qz_xu = self.encoder_dist.log_pdf(z, g, v)
log_pz_u = self.prior_dist.log_pdf(z, *prior_params)
if self.anneal_params:
(a, b, c, d, N) = self._training_hyperparams
M = z.size(0)
log_qz_tmp = self.encoder_dist.log_pdf(z.view(M, 1, self.latent_dim), g.view(1, M, self.latent_dim), v.view(1, M, self.latent_dim), reduce=False)
log_qz = (torch.logsumexp(log_qz_tmp.sum(dim=(- 1)), dim=1, keepdim=False) - np.log((M * N)))
log_qz_i = (torch.logsumexp(log_qz_tmp, dim=1, keepdim=False) - np.log((M * N))).sum(dim=(- 1))
return (((((a * log_px_z) - (b * (log_qz_xu - log_qz))) - (c * (log_qz - log_qz_i))) - (d * (log_qz_i - log_pz_u))).mean(), z)
else:
return (((log_px_z + log_pz_u) - log_qz_xu).mean(), z)
def anneal(self, N, max_iter, it):
thr = int((max_iter / 1.6))
a = (0.5 / self.decoder_var.item())
self._training_hyperparams[(- 1)] = N
self._training_hyperparams[0] = min((2 * a), (a + ((a * it) / thr)))
self._training_hyperparams[1] = max(1, ((a * 0.3) * (1 - (it / thr))))
self._training_hyperparams[2] = min(1, (it / thr))
self._training_hyperparams[3] = max(1, ((a * 0.5) * (1 - (it / thr))))
if (it > thr):
self.anneal_params = False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.