code stringlengths 101 5.91M |
|---|
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.double_conv = nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True), nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1), nn.BatchNorm2d(out_channels), nn.ReLU(inplace=True))
def forward(self, x):
return self.double_conv(x) |
class ModuleConverter():
def __init__(self, mode='fa'):
self.mode = mode
def convert(self, module, copy_weights=True, layer_config=None, output_dim=None):
layer_counts = self.count_layers(module)
self.replaced_layers_counts = defaultdict((lambda : 0))
self._replace_layers_recursive(module, self.mode, copy_weights, layer_config, output_dim, self.replaced_layers_counts)
print('Module has been converted to {} mode:\n'.format(self.mode))
if (layer_config is not None):
print('The layer configuration was: ', layer_config)
for (layer, count) in self.replaced_layers_counts.items():
if (layer_counts[layer] != count):
print('- There were originally {} {} layers and {} were converted.'.format(layer_counts[layer], layer, count))
else:
print('- All the {} {} layers were converted successfully.'.format(count, layer))
return module
def _replace_layers_recursive(self, module, mode, copy_weights, layer_config, output_dim, replaced_layers):
for module_name in module._modules.keys():
layer = getattr(module, module_name)
new_layer = convert_layer(layer, mode, copy_weights, layer_config, output_dim)
if (new_layer is not None):
replaced_layers[str(type(layer))] += 1
setattr(module, module_name, new_layer)
for (name, child_module) in module.named_children():
self._replace_layers_recursive(child_module, mode, copy_weights, layer_config, output_dim, replaced_layers)
def count_layers(module):
layer_counts = defaultdict((lambda : 0))
for layer in module.modules():
layer_counts[str(type(layer))] += 1
return layer_counts |
def _word_to_index(word, indd):
if (word in indd):
return indd[word]
else:
return len(indd) |
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes):
self.inplanes = 128
super(ResNet, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=False)
self.conv2 = conv3x3(64, 64)
self.bn2 = BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=False)
self.conv3 = conv3x3(64, 128)
self.bn3 = BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.relu = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4, multi_grid=(1, 1, 1))
if (layers == [3, 4, 23, 3]):
self.pspmodule = PSPModule(2048, 512)
self.head = nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
self.dsn = nn.Sequential(nn.Conv2d(1024, 512, kernel_size=3, stride=1, padding=1), InPlaceABNSync(512), nn.Dropout2d(0.1), nn.Conv2d(512, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
elif (layers == [2, 2, 2, 2]):
self.pspmodule = PSPModule(512, 128)
self.head = nn.Conv2d(128, num_classes, kernel_size=1, stride=1, padding=0, bias=True)
self.dsn = nn.Sequential(nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1), InPlaceABNSync(128), nn.Dropout2d(0.1), nn.Conv2d(128, num_classes, kernel_size=1, stride=1, padding=0, bias=True))
else:
raise ValueError('layers should be [3, 4, 23, 3] or [2, 2, 2, 2]')
def _make_layer(self, block, planes, blocks, stride=1, dilation=1, multi_grid=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), BatchNorm2d((planes * block.expansion), affine=affine_par))
layers = []
generate_multi_grid = (lambda index, grids: (grids[(index % len(grids))] if isinstance(grids, tuple) else 1))
layers.append(block(self.inplanes, planes, stride, dilation=dilation, downsample=downsample, multi_grid=generate_multi_grid(0, multi_grid)))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation, multi_grid=generate_multi_grid(i, multi_grid)))
return nn.Sequential(*layers)
def forward(self, x):
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x_dsn = self.dsn(x)
x = self.layer4(x)
x_feat_after_psp = self.pspmodule(x)
x = self.head(x_feat_after_psp)
return [x, x_dsn, x_feat_after_psp] |
class SparseDense(ZooKerasLayer):
def __init__(self, output_dim, init='glorot_uniform', activation=None, W_regularizer=None, b_regularizer=None, backward_start=(- 1), backward_length=(- 1), init_weight=None, init_bias=None, init_grad_weight=None, init_grad_bias=None, bias=True, input_shape=None, **kwargs):
super(SparseDense, self).__init__(None, output_dim, init, activation, W_regularizer, b_regularizer, backward_start, backward_length, init_weight, init_bias, init_grad_weight, init_grad_bias, bias, (list(input_shape) if input_shape else None), **kwargs) |
.parametrize('loader_parameters', [{'path_data': [str(Path(__data_testing_dir__, 'microscopy_png'))], 'target_suffix': ['_seg-myelin-manual'], 'extensions': ['.png'], 'roi_params': {'suffix': None, 'slice_filter_roi': None}, 'contrast_params': {'contrast_lst': [], 'balance': {}}, 'slice_axis': 'axial', 'slice_filter_params': {'filter_empty_mask': False, 'filter_empty_input': True}, 'patch_filter_params': {'filter_empty_mask': False, 'filter_empty_input': False}, 'multichannel': False}])
.parametrize('model_parameters', [{'name': 'Unet', 'dropout_rate': 0.3, 'bn_momentum': 0.1, 'final_activation': 'sigmoid', 'depth': 3, 'length_2D': [256, 128], 'stride_2D': [244, 116]}])
.parametrize('transform_parameters', [{'Resample': {'wspace': 0.0002, 'hspace': 0.0001}, 'NumpyToTensor': {}}])
def test_2d_patches_and_resampling(download_data_testing_test_files, loader_parameters, model_parameters, transform_parameters):
loader_parameters.update({LoaderParamsKW.MODEL_PARAMS: model_parameters})
bids_df = BidsDataframe(loader_parameters, __tmp_dir__, derivatives=True)
data_lst = ['sub-rat3_ses-01_sample-data9_SEM.png']
ds = imed_loader.load_dataset(bids_df, **{**loader_parameters, **{'data_list': data_lst, 'transforms_params': transform_parameters, 'dataset_type': 'training'}})
assert (ds.is_2d_patch == True)
assert (ds[0]['input'].shape == (1, 256, 128))
assert (ds[0]['input_metadata'][0].metadata[MetadataKW.INDEX_SHAPE] == (1512, 382))
assert (len(ds) == 28) |
def aggregate_equiv(equiv_set, input_vec, predicate_dict, aggregator):
for pair in equiv_set:
aggregator_vec = []
for pred in pair.split(','):
aggregator_vec.append(input_vec[predicate_dict[pred]])
if (aggregator is 'max'):
aggregator_value = np.max(aggregator_vec)
elif (aggregator is 'min'):
aggregator_value = np.min(aggregator_vec)
elif (aggregator is 'mean'):
aggregator_value = np.mean(aggregator_vec)
for pred in pair.split(','):
input_vec[predicate_dict[pred]] = aggregator_value
return input_vec |
class StickyActionEnv(gym.Wrapper):
def __init__(self, env, p=0.25):
super().__init__(env)
self.p = p
self.last_action = 0
def step(self, action):
if (np.random.uniform() < self.p):
action = self.last_action
self.last_action = action
(obs, reward, done, info) = self.env.step(action)
return (obs, reward, done, info) |
def get_assigned_file(checkpoint_dir, num):
assign_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(num))
return assign_file |
_tf
class TFCoreModelTesterMixin():
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_mismatched_shapes = True
test_resize_embeddings = True
test_head_masking = True
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
inputs_dict = copy.deepcopy(inputs_dict)
if (model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING)):
inputs_dict = {k: (tf.tile(tf.expand_dims(v, 1), ((1, self.model_tester.num_choices) + ((1,) * (v.ndim - 1)))) if (isinstance(v, tf.Tensor) and (v.ndim > 0)) else v) for (k, v) in inputs_dict.items()}
if return_labels:
if (model_class in get_values(TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING)):
inputs_dict['labels'] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
elif (model_class in get_values(TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING)):
inputs_dict['start_positions'] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
inputs_dict['end_positions'] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif (model_class in [*get_values(TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)]):
inputs_dict['labels'] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif (model_class in get_values(TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING)):
inputs_dict['next_sentence_label'] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif (model_class in [*get_values(TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING), *get_values(TF_MODEL_FOR_CAUSAL_LM_MAPPING), *get_values(TF_MODEL_FOR_MASKED_LM_MAPPING), *get_values(TF_MODEL_FOR_PRETRAINING_MAPPING), *get_values(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING)]):
inputs_dict['labels'] = tf.zeros((self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32)
return inputs_dict
def test_graph_mode(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
def test_xla_mode(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
(experimental_compile=True)
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
def test_xla_fit(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
if getattr(model, 'hf_compute_loss', None):
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
prepared_for_class = {key: val for (key, val) in prepared_for_class.items() if (key not in ('head_mask', 'decoder_head_mask', 'cross_attn_head_mask', 'decoder_input_ids'))}
possible_label_cols = {'labels', 'label', 'label_ids', 'start_positions', 'start_position', 'end_positions', 'end_position', 'next_sentence_label'}
label_names = possible_label_cols.intersection(set(prepared_for_class))
self.assertGreater(len(label_names), 0, msg='No matching label names found!')
labels = {key: val for (key, val) in prepared_for_class.items() if (key in label_names)}
inputs_minus_labels = {key: val for (key, val) in prepared_for_class.items() if (key not in label_names)}
self.assertGreater(len(inputs_minus_labels), 0)
model.compile(optimizer=tf.keras.optimizers.SGD(0.0), jit_compile=True)
history = model.fit(prepared_for_class, validation_data=prepared_for_class, steps_per_epoch=1, validation_steps=1, shuffle=False, verbose=0)
loss = history.history['loss'][0]
self.assertTrue((not isnan(loss)))
val_loss = history.history['val_loss'][0]
self.assertTrue((not isnan(val_loss)))
model = model_class(config)
model.compile(optimizer=tf.keras.optimizers.SGD(0.0), jit_compile=True)
history = model.fit(inputs_minus_labels, labels, validation_data=(inputs_minus_labels, labels), steps_per_epoch=1, validation_steps=1, shuffle=False, verbose=0)
loss = history.history['loss'][0]
self.assertTrue((not isnan(loss)))
val_loss = history.history['val_loss'][0]
self.assertTrue((not isnan(val_loss)))
def test_saved_model_creation_extended(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
if hasattr(config, 'use_cache'):
config.use_cache = True
encoder_seq_length = getattr(self.model_tester, 'encoder_seq_length', self.model_tester.seq_length)
encoder_key_length = getattr(self.model_tester, 'key_length', encoder_seq_length)
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
num_out = len(model(class_inputs_dict))
for key in list(class_inputs_dict.keys()):
if (key not in model.serving.input_signature[0]):
del class_inputs_dict[key]
elif (isinstance(class_inputs_dict[key], tf.Tensor) and class_inputs_dict[key].dtype.is_integer):
class_inputs_dict[key] = tf.cast(class_inputs_dict[key], tf.int32)
if (set(class_inputs_dict.keys()) != set(model.serving.input_signature[0].keys())):
continue
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, 'saved_model', '1')
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output_hidden_states = outputs['encoder_hidden_states']
output_attentions = outputs['encoder_attentions']
else:
output_hidden_states = outputs['hidden_states']
output_attentions = outputs['attentions']
self.assertEqual(len(outputs), num_out)
expected_num_layers = getattr(self.model_tester, 'expected_num_hidden_layers', (self.model_tester.num_hidden_layers + 1))
self.assertEqual(len(output_hidden_states), expected_num_layers)
self.assertListEqual(list(output_hidden_states[0].shape[(- 2):]), [self.model_tester.seq_length, self.model_tester.hidden_size])
self.assertEqual(len(output_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(list(output_attentions[0].shape[(- 3):]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length])
def test_mixed_precision(self):
tf.keras.mixed_precision.set_global_policy('mixed_float16')
try:
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
outputs = model(class_inputs_dict)
self.assertIsNotNone(outputs)
finally:
tf.keras.mixed_precision.set_global_policy('float32')
def test_train_pipeline_custom_model(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
if ('head_mask' in inputs_dict):
del inputs_dict['head_mask']
if ('decoder_head_mask' in inputs_dict):
del inputs_dict['decoder_head_mask']
if ('cross_attn_head_mask' in inputs_dict):
del inputs_dict['cross_attn_head_mask']
tf_main_layer_classes = {module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(module) if module_member_name.endswith('MainLayer') for module_member in (getattr(module, module_member_name),) if (isinstance(module_member, type) and (tf.keras.layers.Layer in module_member.__bases__) and getattr(module_member, '_keras_serializable', False))}
for main_layer_class in tf_main_layer_classes:
if ('T5' in main_layer_class.__name__):
shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name='shared')
config.use_cache = False
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for (name, tensor) in inputs_dict.items()}
if hasattr(self.model_tester, 'num_labels'):
num_labels = self.model_tester.num_labels
else:
num_labels = 2
X = tf.data.Dataset.from_tensor_slices((inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))).batch(1)
hidden_states = main_layer(symbolic_inputs)[0]
outputs = tf.keras.layers.Dense(num_labels, activation='softmax', name='outputs')(hidden_states)
model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['binary_accuracy'])
model.fit(X, epochs=1)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, 'keras_model.h5')
model.save(filepath)
if ('T5' in main_layer_class.__name__):
model = tf.keras.models.load_model(filepath, custom_objects={main_layer_class.__name__: main_layer_class, 'TFSharedEmbeddings': TFSharedEmbeddings})
else:
model = tf.keras.models.load_model(filepath, custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(model, tf.keras.Model)
model(inputs_dict)
def test_graph_mode_with_inputs_embeds(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(inputs_dict)
if (not self.is_encoder_decoder):
input_ids = inputs['input_ids']
del inputs['input_ids']
else:
encoder_input_ids = inputs['input_ids']
decoder_input_ids = inputs.get('decoder_input_ids', encoder_input_ids)
del inputs['input_ids']
inputs.pop('decoder_input_ids', None)
if (not self.is_encoder_decoder):
inputs['inputs_embeds'] = model.get_input_embeddings()(input_ids)
else:
inputs['inputs_embeds'] = model.get_input_embeddings()(encoder_input_ids)
inputs['decoder_inputs_embeds'] = model.get_input_embeddings()(decoder_input_ids)
inputs = self._prepare_for_class(inputs, model_class)
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
def _generate_random_bad_tokens(self, num_bad_tokens, model):
special_tokens = []
if (model.config.bos_token_id is not None):
special_tokens.append(model.config.bos_token_id)
if (model.config.pad_token_id is not None):
special_tokens.append(model.config.pad_token_id)
if (model.config.eos_token_id is not None):
special_tokens.append(model.config.eos_token_id)
bad_tokens = []
while (len(bad_tokens) < num_bad_tokens):
token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
if (token not in special_tokens):
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].numpy().tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
for bad_word_ids in bad_words_ids:
for generated_ids_slice in generated_ids:
for i in range(len(bad_word_ids), len(generated_ids_slice)):
if (generated_ids_slice[(i - len(bad_word_ids)):i] == bad_word_ids):
return True
return False |
_register
class PrunerV2():
def __init__(self, target_sparsity=None, pruning_type=None, pattern=None, op_names=None, excluded_op_names=None, start_step=None, end_step=None, pruning_scope=None, pruning_frequency=None, min_sparsity_ratio_per_op=None, max_sparsity_ratio_per_op=None, sparsity_decay_type=None, pruning_op_types=None, reg_type=None, criterion_reduce_type=None, parameters=None, resume_from_pruned_checkpoint=None):
self.pruner_config = DotDict({'target_sparsity': target_sparsity, 'pruning_type': pruning_type, 'pattern': pattern, 'op_names': op_names, 'excluded_op_names': excluded_op_names, 'start_step': start_step, 'end_step': end_step, 'pruning_scope': pruning_scope, 'pruning_frequency': pruning_frequency, 'min_sparsity_ratio_per_op': min_sparsity_ratio_per_op, 'max_sparsity_ratio_per_op': max_sparsity_ratio_per_op, 'sparsity_decay_type': sparsity_decay_type, 'pruning_op_types': pruning_op_types, 'reg_type': reg_type, 'criterion_reduce_type': criterion_reduce_type, 'parameters': parameters, 'resume_from_pruned_checkpoint': resume_from_pruned_checkpoint}) |
def prepro_each(args, data_type, start_ratio=0.0, stop_ratio=1.0, out_name='default', in_path=None):
if (args.tokenizer == 'PTB'):
import nltk
sent_tokenize = nltk.sent_tokenize
def word_tokenize(tokens):
return [token.replace("''", '"').replace('``', '"') for token in nltk.word_tokenize(tokens)]
elif (args.tokenizer == 'Stanford'):
from my.corenlp_interface import CoreNLPInterface
interface = CoreNLPInterface(args.url, args.port)
sent_tokenize = interface.split_doc
word_tokenize = interface.split_sent
else:
raise Exception()
if (not args.split):
sent_tokenize = (lambda para: [para])
source_path = (in_path or os.path.join(args.source_dir, '{}-{}v1.1.json'.format(data_type, args.suffix)))
source_data = json.load(open(source_path, 'r'))
(q, cq, y, rx, rcx, ids, idxs) = ([], [], [], [], [], [], [])
na = []
cy = []
(x, cx) = ([], [])
answerss = []
p = []
(word_counter, char_counter, lower_word_counter) = (Counter(), Counter(), Counter())
start_ai = int(round((len(source_data['data']) * start_ratio)))
stop_ai = int(round((len(source_data['data']) * stop_ratio)))
for (ai, article) in enumerate(tqdm(source_data['data'][start_ai:stop_ai])):
(xp, cxp) = ([], [])
pp = []
x.append(xp)
cx.append(cxp)
p.append(pp)
for (pi, para) in enumerate(article['paragraphs']):
context = para['context']
context = context.replace("''", '" ')
context = context.replace('``', '" ')
xi = list(map(word_tokenize, sent_tokenize(context)))
xi = [process_tokens(tokens) for tokens in xi]
cxi = [[list(xijk) for xijk in xij] for xij in xi]
xp.append(xi)
cxp.append(cxi)
pp.append(context)
for xij in xi:
for xijk in xij:
word_counter[xijk] += len(para['qas'])
lower_word_counter[xijk.lower()] += len(para['qas'])
for xijkl in xijk:
char_counter[xijkl] += len(para['qas'])
rxi = [ai, pi]
assert ((len(x) - 1) == ai)
assert ((len(x[ai]) - 1) == pi)
for qa in para['qas']:
qi = word_tokenize(qa['question'])
qi = process_tokens(qi)
cqi = [list(qij) for qij in qi]
yi = []
cyi = []
answers = []
for answer in qa['answers']:
answer_text = answer['text']
answers.append(answer_text)
answer_start = answer['answer_start']
answer_stop = (answer_start + len(answer_text))
(yi0, yi1) = get_word_span(context, xi, answer_start, answer_stop)
assert (len(xi[yi0[0]]) > yi0[1])
assert (len(xi[yi1[0]]) >= yi1[1])
w0 = xi[yi0[0]][yi0[1]]
w1 = xi[yi1[0]][(yi1[1] - 1)]
i0 = get_word_idx(context, xi, yi0)
i1 = get_word_idx(context, xi, (yi1[0], (yi1[1] - 1)))
cyi0 = (answer_start - i0)
cyi1 = ((answer_stop - i1) - 1)
assert (answer_text[0] == w0[cyi0]), (answer_text, w0, cyi0)
assert (answer_text[(- 1)] == w1[cyi1])
assert (cyi0 < 32), (answer_text, w0)
assert (cyi1 < 32), (answer_text, w1)
yi.append([yi0, yi1])
cyi.append([cyi0, cyi1])
if (len(qa['answers']) == 0):
yi.append([(0, 0), (0, 1)])
cyi.append([0, 1])
na.append(True)
else:
na.append(False)
for qij in qi:
word_counter[qij] += 1
lower_word_counter[qij.lower()] += 1
for qijk in qij:
char_counter[qijk] += 1
q.append(qi)
cq.append(cqi)
y.append(yi)
cy.append(cyi)
rx.append(rxi)
rcx.append(rxi)
ids.append(qa['id'])
idxs.append(len(idxs))
answerss.append(answers)
if args.debug:
break
word2vec_dict = get_word2vec(args, word_counter)
lower_word2vec_dict = get_word2vec(args, lower_word_counter)
data = {'q': q, 'cq': cq, 'y': y, '*x': rx, '*cx': rcx, 'cy': cy, 'idxs': idxs, 'ids': ids, 'answerss': answerss, '*p': rx, 'na': na}
shared = {'x': x, 'cx': cx, 'p': p, 'word_counter': word_counter, 'char_counter': char_counter, 'lower_word_counter': lower_word_counter, 'word2vec': word2vec_dict, 'lower_word2vec': lower_word2vec_dict}
print('saving ...')
save(args, data, shared, out_name) |
_module()
class AutoAugment(object):
def __init__(self, policies):
assert (isinstance(policies, list) and (len(policies) > 0)), 'Policies must be a non-empty list.'
for policy in policies:
assert (isinstance(policy, list) and (len(policy) > 0)), 'Each policy in policies must be a non-empty list.'
for augment in policy:
assert (isinstance(augment, dict) and ('type' in augment)), 'Each specific augmentation must be a dict with key "type".'
self.policies = copy.deepcopy(policies)
self.transforms = [Compose(policy) for policy in self.policies]
def __call__(self, results):
transform = np.random.choice(self.transforms)
return transform(results)
def __repr__(self):
return f'{self.__class__.__name__}(policies={self.policies}' |
def evaluate():
global DATABASE_VECTORS
global QUERY_VECTORS
global array
with tf.Graph().as_default():
with tf.device(('/gpu:' + str(GPU_INDEX))):
print('In Graph')
query = placeholder_inputs(BATCH_NUM_QUERIES, 1, NUM_POINTS)
positives = placeholder_inputs(BATCH_NUM_QUERIES, POSITIVES_PER_QUERY, NUM_POINTS)
negatives = placeholder_inputs(BATCH_NUM_QUERIES, NEGATIVES_PER_QUERY, NUM_POINTS)
eval_queries = placeholder_inputs(EVAL_BATCH_SIZE, 1, NUM_POINTS)
is_training_pl = tf.placeholder(tf.bool, shape=())
print(is_training_pl)
batch = tf.Variable(0)
bn_decay = get_bn_decay(batch)
with tf.variable_scope('query_triplets') as scope:
vecs = tf.concat([query, positives, negatives], 1)
print(vecs)
out_vecs = forward(vecs, is_training_pl, bn_decay=bn_decay)
print(out_vecs)
(q_vec, pos_vecs, neg_vecs) = tf.split(out_vecs, [1, POSITIVES_PER_QUERY, NEGATIVES_PER_QUERY], 1)
print(q_vec)
print(pos_vecs)
print(neg_vecs)
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.95)
config = tf.ConfigProto(gpu_options=gpu_options)
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
saver.restore(sess, os.path.join(LOG_DIR, model_file))
print('Model restored.')
ops = {'query': query, 'positives': positives, 'negatives': negatives, 'is_training_pl': is_training_pl, 'eval_queries': eval_queries, 'q_vec': q_vec, 'pos_vecs': pos_vecs, 'neg_vecs': neg_vecs}
recall = np.zeros(NUMBER_NEIBORS)
count = 0
similarity = []
one_percent_recall = []
for i in range(len(DATABASE_SETS)):
DATABASE_VECTORS.append(get_latent_vectors(sess, ops, DATABASE_SETS[i]))
for j in range(len(QUERY_SETS)):
QUERY_VECTORS.append(get_latent_vectors(sess, ops, QUERY_SETS[j]))
for m in range(len(QUERY_SETS)):
for n in range(len(QUERY_SETS)):
if (m == n):
continue
(pair_recall, pair_similarity, pair_opr) = get_recall(sess, ops, m, n)
recall += np.array(pair_recall)
count += 1
one_percent_recall.append(pair_opr)
for x in pair_similarity:
similarity.append(x)
print()
ave_recall = (recall / count)
print(ave_recall)
average_similarity = np.mean(similarity)
print(average_similarity)
ave_one_percent_recall = np.mean(one_percent_recall)
print(ave_one_percent_recall)
with open(output_file, 'w') as output:
output.write('Average Recall :\n')
output.write(str(ave_recall))
output.write('\n\n')
output.write('Average Similarity:\n')
output.write(str(average_similarity))
output.write('\n\n')
output.write('Average Top 1% Recall:\n')
output.write(str(ave_one_percent_recall)) |
def error_orders(i, month, day, td, lb_days=5, metric='normalized'):
d0 = date(2020, month, day)
mepis = []
preds = df_county[f'all_deaths_pred_{month}_{day}_ensemble_{horizon}'].values
err = []
for lb in range(lb_days):
d1 = (d0 - timedelta((lb + 1)))
d2 = (d0 - timedelta((lb + td)))
actual = df_county[f"#Deaths_{d1.strftime('%m-%d-%Y')}"].values[i]
pred = df_county[f'all_deaths_pred_{d2.month}_{d2.day}_ensemble_{horizon}'].values[i][(td - 1)]
if (metric == 'normalized'):
err.append(abs(((actual / max(pred, 1)) - 1)))
elif (metric == 'absolute'):
err.append(abs((actual - pred)))
d1 = (d0 + timedelta((td - 1)))
actual = df_county[f"#Deaths_{d1.strftime('%m-%d-%Y')}"].values[i]
pred = df_county[f'all_deaths_pred_{d0.month}_{d0.day}_ensemble_{horizon}'].values[i][(td - 1)]
if (metric == 'normalized'):
err.append(abs(((actual / max(pred, 1)) - 1)))
elif (metric == 'absolute'):
err.append(abs((actual - pred)))
error_orders = (1 + np.argsort(np.array(err)))
return error_orders |
def main_worker(gpu, ngpus_per_node, args):
global best_acc1
args.gpu = gpu
if (args.multiprocessing_distributed and (args.gpu != 0)):
def print_pass(*args):
pass
builtins.print = print_pass
if (args.gpu is not None):
print('Use GPU: {} for training'.format(args.gpu))
if args.distributed:
if ((args.dist_url == 'env://') and (args.rank == (- 1))):
args.rank = int(os.environ['RANK'])
if args.multiprocessing_distributed:
args.rank = ((args.rank * ngpus_per_node) + gpu)
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank)
print("=> creating model '{}'".format(args.teacher_arch))
if (args.teacher_arch == 'resnet50w2'):
teacher_model = resnet50w2(num_classes=1000)
elif (args.teacher_arch in ['resnet50', 'resnet101', 'resnet152']):
teacher_model = models.__dict__[args.teacher_arch](num_classes=1000)
elif (args.teacher_arch in ['SWAVresnet50', 'DCresnet50', 'SELAresnet50']):
teacher_model = swav_resnet50(num_classes=1000)
else:
print('Error')
sys.exit((- 1))
if args.teacher:
if os.path.isfile(args.teacher):
print("=> loading checkpoint '{}'".format(args.teacher))
if (args.gpu is None):
checkpoint = torch.load(args.teacher)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.teacher, map_location=loc)
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
state_dict[k[len('module.'):]] = state_dict[k]
del state_dict[k]
args.start_epoch = checkpoint['epoch']
msg = teacher_model.load_state_dict(state_dict)
print(msg)
print("=> loaded checkpoint '{}' (epoch {})".format(args.teacher, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.teacher))
print("=> creating model '{}'".format(args.arch))
if (args.arch == 'efficientb0'):
model = efficientnet_b0(pretrained=False, num_classes=1000)
elif (args.arch == 'efficientb1'):
model = efficientnet_b1(pretrained=False, num_classes=1000)
elif (args.arch == 'mobilenetv3'):
model = mobilenetv3_large_100(num_classes=1000)
else:
model = models.__dict__[args.arch]()
print(model)
if args.pretrained:
if os.path.isfile(args.pretrained):
print("=> loading checkpoint '{}'".format(args.pretrained))
checkpoint = torch.load(args.pretrained, map_location='cpu')
state_dict = checkpoint['state_dict']
for k in list(state_dict.keys()):
if (args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']):
if (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.classifier'))):
state_dict[k[len('module.encoder_q.'):]] = state_dict[k]
elif (k.startswith('module.encoder_q') and (not k.startswith('module.encoder_q.fc'))):
state_dict[k[len('module.encoder_q.'):]] = state_dict[k]
del state_dict[k]
args.start_epoch = 0
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
if (args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']):
assert (set(msg.missing_keys) == {'classifier.weight', 'classifier.bias'})
else:
assert (set(msg.missing_keys) == {'fc.weight', 'fc.bias'})
print("=> loaded pre-trained model '{}'".format(args.pretrained))
else:
print("=> no checkpoint found at '{}'".format(args.pretrained))
for (name, param) in teacher_model.named_parameters():
param.requires_grad = False
for (name, param) in model.named_parameters():
if ((args.arch in ['resnet18', 'resnet34']) and (name not in ['fc.weight', 'fc.bias'])):
param.requires_grad = False
if ((args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']) and (name not in ['classifier.weight', 'classifier.bias'])):
param.requires_grad = False
if (args.arch in ['efficientb0', 'efficientb1', 'mobilenetv3']):
model.classifier.weight.data.normal_(mean=0.0, std=0.01)
model.classifier.bias.data.zero_()
else:
model.fc.weight.data.normal_(mean=0.0, std=0.01)
model.fc.bias.data.zero_()
if args.distributed:
if (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
teacher_model.cuda(args.gpu)
args.batch_size = int((args.batch_size / ngpus_per_node))
args.workers = int((((args.workers + ngpus_per_node) - 1) / ngpus_per_node))
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
elif (args.gpu is not None):
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif (args.arch.startswith('alexnet') or args.arch.startswith('vgg')):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
criterion = loss_fn_kd
parameters = list(filter((lambda p: p.requires_grad), model.parameters()))
assert (len(parameters) == 2)
optimizer = torch.optim.SGD(parameters, args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
if args.resume:
checkpoint_path = get_last_checkpoint(args.resume)
if os.path.isfile(checkpoint_path):
print("=> loading checkpoint '{}'".format(checkpoint_path))
if (args.gpu is None):
checkpoint = torch.load(checkpoint_path)
else:
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(checkpoint_path, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if (args.gpu is not None):
best_acc1 = best_acc1.to(args.gpu)
out = model.load_state_dict(checkpoint['state_dict'], strict=False)
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, teacher_model, criterion, args)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch, args)
train(train_loader, model, teacher_model, criterion, optimizer, epoch, args)
acc1 = validate(val_loader, model, teacher_model, criterion, args)
is_best = (acc1 > best_acc1)
best_acc1 = max(acc1, best_acc1)
if ((not args.multiprocessing_distributed) or (args.multiprocessing_distributed and ((args.rank % ngpus_per_node) == 0))):
save_checkpoint({'epoch': (epoch + 1), 'arch': args.arch, 'state_dict': model.state_dict(), 'best_acc1': best_acc1, 'optimizer': optimizer.state_dict()}, is_best)
if (epoch == args.start_epoch):
sanity_check(model.state_dict(), args.pretrained, args) |
_task('translation', dataclass=TranslationConfig)
class TranslationTask(FairseqTask):
cfg: TranslationConfig
def __init__(self, cfg: TranslationConfig, src_dict, tgt_dict):
super().__init__(cfg)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
def setup_task(cls, cfg: TranslationConfig, **kwargs):
paths = utils.split_paths(cfg.data)
assert (len(paths) > 0)
if ((cfg.source_lang is None) or (cfg.target_lang is None)):
(cfg.source_lang, cfg.target_lang) = data_utils.infer_language_pair(paths[0])
if ((cfg.source_lang is None) or (cfg.target_lang is None)):
raise Exception('Could not infer language pair, please provide it explicitly')
src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(cfg.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(cfg.target_lang)))
assert (src_dict.pad() == tgt_dict.pad())
assert (src_dict.eos() == tgt_dict.eos())
assert (src_dict.unk() == tgt_dict.unk())
logger.info('[{}] dictionary: {} types'.format(cfg.source_lang, len(src_dict)))
logger.info('[{}] dictionary: {} types'.format(cfg.target_lang, len(tgt_dict)))
return cls(cfg, src_dict, tgt_dict)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
paths = utils.split_paths(self.cfg.data)
assert (len(paths) > 0)
if (split != self.cfg.train_subset):
paths = paths[:1]
data_path = paths[((epoch - 1) % len(paths))]
(src, tgt) = (self.cfg.source_lang, self.cfg.target_lang)
self.datasets[split] = load_langpair_dataset(data_path, split, src, self.src_dict, tgt, self.tgt_dict, combine=combine, dataset_impl=self.cfg.dataset_impl, upsample_primary=self.cfg.upsample_primary, left_pad_source=self.cfg.left_pad_source, left_pad_target=self.cfg.left_pad_target, max_source_positions=self.cfg.max_source_positions, max_target_positions=self.cfg.max_target_positions, load_alignments=self.cfg.load_alignments, truncate_source=self.cfg.truncate_source, num_buckets=self.cfg.num_batch_buckets, shuffle=(split != 'test'), pad_to_multiple=self.cfg.required_seq_len_multiple)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary, tgt_dict=self.target_dictionary, constraints=constraints)
def build_model(self, cfg, from_checkpoint=False):
model = super().build_model(cfg, from_checkpoint)
if self.cfg.eval_bleu:
detok_args = json.loads(self.cfg.eval_bleu_detok_args)
self.tokenizer = encoders.build_tokenizer(Namespace(tokenizer=self.cfg.eval_bleu_detok, **detok_args))
gen_args = json.loads(self.cfg.eval_bleu_args)
self.sequence_generator = self.build_generator([model], Namespace(**gen_args))
return model
def valid_step(self, sample, model, criterion):
(loss, sample_size, logging_output) = super().valid_step(sample, model, criterion)
if self.cfg.eval_bleu:
bleu = self._inference_with_bleu(self.sequence_generator, sample, model)
logging_output['_bleu_sys_len'] = bleu.sys_len
logging_output['_bleu_ref_len'] = bleu.ref_len
assert (len(bleu.counts) == EVAL_BLEU_ORDER)
for i in range(EVAL_BLEU_ORDER):
logging_output[('_bleu_counts_' + str(i))] = bleu.counts[i]
logging_output[('_bleu_totals_' + str(i))] = bleu.totals[i]
return (loss, sample_size, logging_output)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
if self.cfg.eval_bleu:
def sum_logs(key):
import torch
result = sum((log.get(key, 0) for log in logging_outputs))
if torch.is_tensor(result):
result = result.cpu()
return result
(counts, totals) = ([], [])
for i in range(EVAL_BLEU_ORDER):
counts.append(sum_logs(('_bleu_counts_' + str(i))))
totals.append(sum_logs(('_bleu_totals_' + str(i))))
if (max(totals) > 0):
metrics.log_scalar('_bleu_counts', np.array(counts))
metrics.log_scalar('_bleu_totals', np.array(totals))
metrics.log_scalar('_bleu_sys_len', sum_logs('_bleu_sys_len'))
metrics.log_scalar('_bleu_ref_len', sum_logs('_bleu_ref_len'))
def compute_bleu(meters):
import inspect
try:
from sacrebleu.metrics import BLEU
comp_bleu = BLEU.compute_bleu
except ImportError:
import sacrebleu
comp_bleu = sacrebleu.compute_bleu
fn_sig = inspect.getfullargspec(comp_bleu)[0]
if ('smooth_method' in fn_sig):
smooth = {'smooth_method': 'exp'}
else:
smooth = {'smooth': 'exp'}
bleu = comp_bleu(correct=meters['_bleu_counts'].sum, total=meters['_bleu_totals'].sum, sys_len=meters['_bleu_sys_len'].sum, ref_len=meters['_bleu_ref_len'].sum, **smooth)
return round(bleu.score, 2)
metrics.log_derived('bleu', compute_bleu)
def max_positions(self):
return (self.cfg.max_source_positions, self.cfg.max_target_positions)
def source_dictionary(self):
return self.src_dict
def target_dictionary(self):
return self.tgt_dict
def _inference_with_bleu(self, generator, sample, model):
import sacrebleu
def decode(toks, escape_unk=False):
s = self.tgt_dict.string(toks.int().cpu(), self.cfg.eval_bleu_remove_bpe, unk_string=('UNKNOWNTOKENINREF' if escape_unk else 'UNKNOWNTOKENINHYP'))
if self.tokenizer:
s = self.tokenizer.decode(s)
return s
gen_out = self.inference_step(generator, [model], sample, prefix_tokens=None)
(hyps, refs) = ([], [])
for i in range(len(gen_out)):
hyps.append(decode(gen_out[i][0]['tokens']))
refs.append(decode(utils.strip_pad(sample['target'][i], self.tgt_dict.pad()), escape_unk=True))
if self.cfg.eval_bleu_print_samples:
logger.info(('example hypothesis: ' + hyps[0]))
logger.info(('example reference: ' + refs[0]))
if self.cfg.eval_tokenized_bleu:
return sacrebleu.corpus_bleu(hyps, [refs], tokenize='none')
else:
return sacrebleu.corpus_bleu(hyps, [refs]) |
def main(args):
data_path = Path(args.data_path)
output_path = Path(args.out_path)
os.makedirs(str(output_path), exist_ok=True)
for split in ['train', 'val']:
convert(split, data_path, output_path, args.subset_fract) |
def encode(v, **kwargs):
norm = torch.norm(v)
w = v.view((- 1))
t = [time.time()]
signs = torch.sign(w).int()
probs = (torch.abs(w) / norm)
mask = torch.distributions.Bernoulli(probs).sample().byte()
t += [time.time()]
idx = torch.arange(0, len(w))
t += [time.time()]
if v.is_cuda:
idx = idx.cuda()
mask = mask.cuda()
t += [time.time()]
selected = torch.masked_select(idx, mask).long()
signs = torch.masked_select(signs, mask)
t += [time.time()]
data = {'masking_time': (t[(- 1)] - t[(- 2)]), 'gen_mask_time': (t[1] - t[0]), 'to_gpu_time': (t[(- 2)] - t[(- 3)])}
return ({'signs': signs, 'size': v.size(), 'selected': selected, 'norm': norm}, data) |
def create_tf_node(op, name, inputs):
from tensorflow.core.framework import node_def_pb2
new_node = node_def_pb2.NodeDef()
new_node.op = op
new_node.name = name
for input_name in inputs:
new_node.input.extend([input_name])
return new_node |
class INItPrClient(ItPrClient):
def init_optimizer(self):
self.optimizer = SGD(self.model.parameters(), lr=INIT_LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)
self.optimizer_scheduler = lr_scheduler.StepLR(self.optimizer, step_size=STEP_SIZE, gamma=(0.5 ** (STEP_SIZE / LR_HALF_LIFE)))
self.optimizer_wrapper = OptimizerWrapper(self.model, self.optimizer, self.optimizer_scheduler)
def init_train_loader(self, tl):
self.train_loader = tl |
def get_args():
cuda_devices = [f'cuda:{i}' for i in range(torch.cuda.device_count())]
parser = argparse.ArgumentParser()
parser.add_argument('-device', type=str, choices=(['auto', 'cpu', 'cuda'] + cuda_devices), default='auto', help='Which device to use')
parser.add_argument('-cpus', type=str, default='auto', help='How many CPUs to use')
parser.add_argument('-batch', type=int, default=256, help='Size of a batch / How many CPUs to use')
parser.add_argument('-seed', type=int, default=None, help='Random seed')
parser.add_argument('-load_model', type=str, default=None, help='Load model from this file')
parser.add_argument('-epoch', type=int, default=1000, help='Epoch length')
parser.add_argument('-max_epochs', type=int, default=None, help='Terminate after this many epochs')
parser.add_argument('-mp_iterations', type=int, default=5, help='Number of message passes')
parser.add_argument('-lr', type=float, default=0.0003, help='Initial learning rate')
parser.add_argument('-alpha_h', type=float, default=2.5e-05, help='Initial entropy regularization constant')
parser.add_argument('-boxes', type=int, default=5, help='Number of boxes')
parser.add_argument('-trace', action='store_const', const=True, help='Show trace of the agent')
parser.add_argument('-eval', action='store_const', const=True, help='Evaluate the agent')
cmd_args = parser.parse_args()
return cmd_args |
class QuaternionToReal(nn.Module):
def __init__(self, in_channels):
super(QuaternionToReal, self).__init__()
self.in_channels = in_channels
def forward(self, x, quat_format=False):
if quat_format:
norm = x.norm()
if (len(norm.shape) == 1):
out = Q(torch.cat([norm, *([torch.zeros_like(norm)] * 3)], 0))
else:
out = Q(torch.cat([norm, *([torch.zeros_like(norm)] * 3)], 1))
else:
out = x.norm()
return out |
def test_add_end_edge():
d1 = Exponential()
d2 = Gamma()
model = SparseHMM([d1, d2])
model.add_edge(d1, model.end, 0.2)
model.add_edge(d2, model.end, 0.3)
assert_raises(ValueError, model._initialize) |
def evaluate(data_file, model_folder, loss):
test_losses = dict()
dataset = cloud_maps(folder=data_file, input_imgs=4, output_imgs=6, train=False)
test_dl = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=2, pin_memory=True)
model_name = 'AA_TransUNet'
model = AA_TransUnet.load_from_checkpoint('/AA_TransUNet/results/Model_Saved/T21_CBAM_end_100.ckpt')
model_loss = get_model_loss(model, test_dl, loss)
test_losses[model_name] = model_loss
print(f'Model Name: {model_name}, Loss(MSE): {model_loss}')
return test_losses |
class BaseDRLAgent(ABC):
def __init__(self, ns: str=None, robot_name: str=None, hyperparameter_path: str=DEFAULT_HYPERPARAMETER, action_space_path: str=DEFAULT_ACTION_SPACE, *args, **kwargs) -> None:
self._is_train_mode = rospy.get_param('/train_mode')
self._ns = ('' if ((ns is None) or (ns == '')) else (ns + '/'))
self._ns_robot = (self._ns if (robot_name is None) else ((self._ns + robot_name) + '/'))
self._robot_sim_ns = robot_name
self.load_hyperparameters(path=hyperparameter_path)
robot_setting_path = os.path.join(ROOT_ROBOT_PATH, (self.robot_config_name + '.model.yaml'))
self.read_setting_files(robot_setting_path, action_space_path)
self.setup_action_space()
self.setup_reward_calculator()
self.observation_collector = ObservationCollector(self._ns_robot, self._num_laser_beams, self._laser_range)
self._action_frequency = (1 / rospy.get_param('/robot_action_rate'))
if self._is_train_mode:
self._action_pub = rospy.Publisher(f'{self._ns_robot}cmd_vel', Twist, queue_size=1)
else:
self._action_pub = rospy.Publisher(f'{self._ns_robot}cmd_vel_pub', Twist, queue_size=1)
def setup_agent(self) -> None:
raise NotImplementedError
def load_hyperparameters(self, path: str) -> None:
assert os.path.isfile(path), f'Hyperparameters file cannot be found at {path}!'
with open(path, 'r') as file:
hyperparams = json.load(file)
self._agent_params = hyperparams
self._get_robot_name_from_params()
rospy.set_param('actions_in_obs', self._agent_params.get('actions_in_observationspace', False))
import rl_agent.model.custom_policy
import rl_agent.model.custom_sb3_policy
def read_setting_files(self, robot_setting_yaml: str, action_space_yaml: str) -> None:
self._num_laser_beams = None
self._laser_range = None
self._robot_radius = (rospy.get_param('radius') * 1.05)
with open(robot_setting_yaml, 'r') as fd:
robot_data = yaml.safe_load(fd)
for plugin in robot_data['plugins']:
if (plugin['type'] == 'Laser'):
laser_angle_min = plugin['angle']['min']
laser_angle_max = plugin['angle']['max']
laser_angle_increment = plugin['angle']['increment']
self._num_laser_beams = int((round(((laser_angle_max - laser_angle_min) / laser_angle_increment)) + 1))
self._laser_range = plugin['range']
if (self._num_laser_beams is None):
self._num_laser_beams = DEFAULT_NUM_LASER_BEAMS
print(f"{self._robot_sim_ns}:Wasn't able to read the number of laser beams.Set to default: {{DEFAULT_NUM_LASER_BEAMS}}")
if (self._laser_range is None):
self._laser_range = DEFAULT_LASER_RANGE
print(f"{self._robot_sim_ns}:Wasn't able to read the laser range.Set to default: {{DEFAULT_LASER_RANGE}}")
with open(action_space_yaml, 'r') as fd:
setting_data = yaml.safe_load(fd)
self._holonomic = setting_data['robot']['holonomic']
self._discrete_actions = setting_data['robot']['discrete_actions']
self._cont_actions = {'linear_range': setting_data['robot']['continuous_actions']['linear_range'], 'angular_range': setting_data['robot']['continuous_actions']['angular_range']}
def _get_robot_name_from_params(self):
assert (self._agent_params and self._agent_params['robot'])
self.robot_config_name = self._agent_params['robot']
def setup_action_space(self) -> None:
assert (self._discrete_actions or self._cont_actions)
assert (self._agent_params and ('discrete_action_space' in self._agent_params))
if self._agent_params['discrete_action_space']:
assert (not self._holonomic), 'Discrete action space currently not supported for holonomic robots'
self.action_space = spaces.Discrete(len(self._discrete_actions))
else:
linear_range = self._cont_actions['linear_range'].copy()
angular_range = self._cont_actions['angular_range'].copy()
if (not self._holonomic):
self._action_space = spaces.Box(low=np.array([linear_range[0], angular_range[0]]), high=np.array([linear_range[1], angular_range[1]]), dtype=np.float32)
else:
(linear_range_x, linear_range_y) = (linear_range['x'], linear_range['y'])
self._action_space = spaces.Box(low=np.array([linear_range_x[0], linear_range_y[0], angular_range[0]]), high=np.array([linear_range_x[1], linear_range_y[1], angular_range[1]]), dtype=np.float)
def setup_reward_calculator(self) -> None:
assert (self._agent_params and ('reward_fnc' in self._agent_params))
self.reward_calculator = RewardCalculator(holonomic=self._holonomic, robot_radius=self._robot_radius, safe_dist=(1.6 * self._robot_radius), goal_radius=GOAL_RADIUS, rule=self._agent_params['reward_fnc'], extended_eval=False)
def action_space(self) -> spaces.Box:
return self._action_space
def observation_space(self) -> spaces.Box:
return self.observation_collector.observation_space
def get_observations(self) -> Tuple[(np.ndarray, dict)]:
(merged_obs, obs_dict) = self.observation_collector.get_observations()
if self._agent_params['normalize']:
merged_obs = self.normalize_observations(merged_obs)
return (merged_obs, obs_dict)
def normalize_observations(self, merged_obs: np.ndarray) -> np.ndarray:
assert (self._agent_params['normalize'] and hasattr(self, '_obs_norm_func'))
return self._obs_norm_func(merged_obs)
def get_action(self, obs: np.ndarray) -> np.ndarray:
assert self._agent, 'Agent model not initialized!'
action = self._agent.predict(obs, deterministic=True)[0]
if self._agent_params['discrete_action_space']:
action = self._get_disc_action(action)
else:
action = np.maximum(np.minimum(self._action_space.high, action), self._action_space.low)
return action
def get_reward(self, action: np.ndarray, obs_dict: dict) -> float:
return self.reward_calculator.get_reward(action=action, **obs_dict)
def publish_action(self, action: np.ndarray) -> None:
action_msg = (self._get_hol_action_msg(action) if self._holonomic else self._get_nonhol_action_msg(action))
self._action_pub.publish(action_msg)
def _get_disc_action(self, action: int) -> np.ndarray:
return np.array([self._discrete_actions[action]['linear'], self._discrete_actions[action]['angular']])
def _get_hol_action_msg(self, action: np.ndarray):
assert (len(action) == 3), 'Holonomic robots require action arrays to have 3 entries.'
action_msg = Twist()
action_msg.linear.x = action[0]
action_msg.linear.y = action[1]
action_msg.angular.z = action[2]
return action_msg
def _get_nonhol_action_msg(self, action: np.ndarray):
assert (len(action) == 2), 'Non-holonomic robots require action arrays to have 2 entries.'
action_msg = Twist()
action_msg.linear.x = action[0]
action_msg.angular.z = action[1]
return action_msg |
class SpdSegment():
def __init__(self, segment_xml):
self.spkr = segment_xml.speaker.get_text()
self.start = float(segment_xml.start.get_text())
self.end = float(segment_xml.end.get_text())
self.callsign_list = []
try:
self.callsign_list = list(eval(segment_xml.callsigns.get_text()))
except:
print('Warning, could not load callsigns', file=sys.stderr)
def to_csv(self, sep=';', label=''):
(spkr, s, e, callsign_list) = (self.spkr, self.start, self.end, self.callsign_list)
(s, e) = (f'{s:.2f}', f'{e:.2f}')
return sep.join([label, spkr, s, e, ' '.join(callsign_list)]) |
def parse_init(init_file):
with open(init_file, 'r', encoding='utf-8', newline='\n') as f:
lines = f.readlines()
line_index = 0
while ((line_index < len(lines)) and (not lines[line_index].startswith('_import_structure = {'))):
line_index += 1
if (line_index >= len(lines)):
return None
objects = []
while ((not lines[line_index].startswith('if TYPE_CHECKING')) and (find_backend(lines[line_index]) is None)):
line = lines[line_index]
if _re_one_line_import_struct.search(line):
content = _re_one_line_import_struct.search(line).groups()[0]
imports = re.findall('\\[([^\\]]+)\\]', content)
for imp in imports:
objects.extend([obj[1:(- 1)] for obj in imp.split(', ')])
line_index += 1
continue
single_line_import_search = _re_import_struct_key_value.search(line)
if (single_line_import_search is not None):
imports = [obj[1:(- 1)] for obj in single_line_import_search.groups()[0].split(', ') if (len(obj) > 0)]
objects.extend(imports)
elif line.startswith(((' ' * 8) + '"')):
objects.append(line[9:(- 3)])
line_index += 1
import_dict_objects = {'none': objects}
while (not lines[line_index].startswith('if TYPE_CHECKING')):
backend = find_backend(lines[line_index])
if (_re_try.search(lines[(line_index - 1)]) is None):
backend = None
if (backend is not None):
line_index += 1
while (_re_else.search(lines[line_index]) is None):
line_index += 1
line_index += 1
objects = []
while ((len(lines[line_index]) <= 1) or lines[line_index].startswith((' ' * 4))):
line = lines[line_index]
if (_re_import_struct_add_one.search(line) is not None):
objects.append(_re_import_struct_add_one.search(line).groups()[0])
elif (_re_import_struct_add_many.search(line) is not None):
imports = _re_import_struct_add_many.search(line).groups()[0].split(', ')
imports = [obj[1:(- 1)] for obj in imports if (len(obj) > 0)]
objects.extend(imports)
elif (_re_between_brackets.search(line) is not None):
imports = _re_between_brackets.search(line).groups()[0].split(', ')
imports = [obj[1:(- 1)] for obj in imports if (len(obj) > 0)]
objects.extend(imports)
elif (_re_quote_object.search(line) is not None):
objects.append(_re_quote_object.search(line).groups()[0])
elif line.startswith(((' ' * 8) + '"')):
objects.append(line[9:(- 3)])
elif line.startswith(((' ' * 12) + '"')):
objects.append(line[13:(- 3)])
line_index += 1
import_dict_objects[backend] = objects
else:
line_index += 1
objects = []
while ((line_index < len(lines)) and (find_backend(lines[line_index]) is None) and (not lines[line_index].startswith('else'))):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if (single_line_import_search is not None):
objects.extend(single_line_import_search.groups()[0].split(', '))
elif line.startswith((' ' * 8)):
objects.append(line[8:(- 2)])
line_index += 1
type_hint_objects = {'none': objects}
while (line_index < len(lines)):
backend = find_backend(lines[line_index])
if (_re_try.search(lines[(line_index - 1)]) is None):
backend = None
if (backend is not None):
line_index += 1
while (_re_else.search(lines[line_index]) is None):
line_index += 1
line_index += 1
objects = []
while ((len(lines[line_index]) <= 1) or lines[line_index].startswith((' ' * 8))):
line = lines[line_index]
single_line_import_search = _re_import.search(line)
if (single_line_import_search is not None):
objects.extend(single_line_import_search.groups()[0].split(', '))
elif line.startswith((' ' * 12)):
objects.append(line[12:(- 2)])
line_index += 1
type_hint_objects[backend] = objects
else:
line_index += 1
return (import_dict_objects, type_hint_objects) |
def train(args, net, device, train_loader, optimizer, epoch, logger):
net.train()
for (batch_idx, (data, target)) in enumerate(train_loader):
start = time()
(data, target) = (data.to(device), target.to(device))
model_fn = (lambda : net(data))
loss_fn = (lambda pred: F.cross_entropy(pred, target))
if isinstance(optimizer, CurveBall):
(loss, predictions) = optimizer.step(model_fn, loss_fn)
else:
optimizer.zero_grad()
predictions = model_fn()
loss = loss_fn(predictions)
loss.backward()
optimizer.step()
pred = predictions.max(1, keepdim=True)[1]
accuracy = pred.eq(target.view_as(pred)).double().mean()
stats = {'train.loss': loss.item(), 'train.accuracy': accuracy.item()}
if logger:
logger.update_average(stats)
if (logger.avg_count['train.loss'] > 3):
logger.update_average({'train.time': (time() - start)})
logger.print(line_prefix=('ep %i ' % epoch), prefix='train')
else:
print(stats) |
def one_line_log(config, cur_step, loss, batch_per_epoch, start_time, validation=False):
s_step = f'Step: {cur_step:<6}'
s_loss = (f'Loss: {loss:<6.4f}' if (not validation) else f'Val loss: {loss:<6.4f}')
s_epoch = f'Epoch: {(cur_step // batch_per_epoch):<4.0f}'
s_mvid = f'Mimg: {((cur_step * config.dataloader.params.batch_size) / 1000000.0):<6.4f}'
s_delay = f'Elapsed time: {delay2str((time.time() - start_time)):<10}'
print(f'{s_step} | {s_loss} {s_epoch} {s_mvid} | {s_delay}', end='\r')
if ((cur_step % 1000) == 0):
print() |
def tanh_tanh2_2(x, mu, sd):
xn = ((x - mu) / sd)
tanh = torch.tanh(xn)
sech2 = (1 - (tanh ** 2))
t = tanh
jt = ((1 / sd) * sech2)
jjt = ((((1 / (sd ** 2)) * (- 2)) * tanh) * sech2)
t2 = (tanh ** 2)
jt2 = ((1 / sd) * ((2 * tanh) * sech2))
jjt2 = ((1 / (sd ** 2)) * ((2 * (sech2 ** 2)) - ((4 * (tanh ** 2)) * sech2)))
return (t, jt, jjt, t2, jt2, jjt2) |
class FocalLossBinary(_Loss):
def __init__(self, ignore: int=None, reduced: bool=False, gamma: float=2.0, alpha: float=0.25, threshold: float=0.5, reduction: str='mean'):
super().__init__()
self.ignore = ignore
if reduced:
self.loss_fn = partial(reduced_focal_loss, gamma=gamma, threshold=threshold, reduction=reduction)
else:
self.loss_fn = partial(sigmoid_focal_loss, gamma=gamma, alpha=alpha, reduction=reduction)
def forward(self, logits, targets):
targets = targets.view((- 1))
logits = logits.view((- 1))
if (self.ignore is not None):
not_ignored = (targets != self.ignore)
logits = logits[not_ignored]
targets = targets[not_ignored]
loss = self.loss_fn(logits, targets)
return loss |
def remove_last(tensors, term_bin_weights):
new_tensors = []
for (idx, tensor, weights) in zip(count(), tensors, term_bin_weights):
if (tensor is None):
result = None
elif (weights is None):
result = tensor
else:
n_dimensions = weights.ndim
entire_tensor = ([slice(None)] * n_dimensions)
higher = []
for dimension_idx in range(n_dimensions):
dim_slices = entire_tensor.copy()
dim_slices[dimension_idx] = (- 1)
total_sum = np.sum(weights[tuple(dim_slices)])
higher.append((True if (total_sum == 0) else False))
result = trim_tensor(tensor, None, higher)
new_tensors.append(result)
return new_tensors |
def search_by_batch(model, beams, mem_dict):
def ready_to_submit(hypotheses):
inp = model.prepare_incremental_input([hyp.seq[(- 1):] for hyp in hypotheses]).cuda()
concat_hyps = dict()
for hyp in hypotheses:
for (k, v) in hyp.state_dict.items():
concat_hyps[k] = (concat_hyps.get(k, []) + [v])
for (k, v) in concat_hyps.items():
if (len(v[0].size()) >= 3):
concat_hyps[k] = torch.cat(v, 1)
else:
concat_hyps[k] = torch.cat(v, 0)
return (concat_hyps, inp)
while True:
hypotheses = []
indices = []
offset = (- 1)
for (idx, beam) in enumerate(beams):
if (not beam.completed()):
for hyp in beam.hypotheses:
hypotheses.append(hyp)
indices.append(idx)
offset = (len(hyp.seq) - 1)
if (not hypotheses):
break
(state_dict, inp) = ready_to_submit(hypotheses)
cur_mem_dict = dict()
indices = torch.tensor(indices).cuda()
for (k, v) in mem_dict.items():
if (v is None):
cur_mem_dict[k] = None
elif isinstance(v, list):
cur_mem_dict[k] = [v[i] for i in indices]
else:
cur_mem_dict[k] = v.index_select(1, indices)
(state_dict, results) = model.decode_step(inp, state_dict, cur_mem_dict, offset, beams[0].beam_size)
_len_each_beam = [len(beam.hypotheses) for beam in beams if (not beam.completed())]
_state_dict_each_beam = [dict() for _ in _len_each_beam]
for (k, v) in state_dict.items():
split_dim = (1 if (len(v.size()) >= 3) else 0)
for (i, x) in enumerate(v.split(_len_each_beam, dim=split_dim)):
_state_dict_each_beam[i][k] = x
_pos = 0
_idx = 0
for beam in beams:
if (not beam.completed()):
_len = len(beam.hypotheses)
beam.update(_state_dict_each_beam[_idx], results[_pos:(_pos + _len)])
_pos += _len
_idx += 1 |
class Flatten(nn.Module):
def forward(self, input):
if (input.dim() > 1):
input = input.view(input.size(0), (- 1))
return input |
def yaw_diff(gt_box: EvalBox, eval_box: EvalBox, period: float=(2 * np.pi)) -> float:
yaw_gt = quaternion_yaw(Quaternion(gt_box.rotation))
yaw_est = quaternion_yaw(Quaternion(eval_box.rotation))
return abs(angle_diff(yaw_gt, yaw_est, period)) |
def build_model1(X_train, y_train, X_valid, y_valid, max_len, max_features, embed_size, embedding_matrix, lr=0.0, lr_d=0.0, spatial_dr=0.0, dense_units=128, conv_size=128, dr=0.2, patience=3, fold_id=1):
file_path = f'best_model_fold_{fold_id}.hdf5'
check_point = ModelCheckpoint(file_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
early_stop = EarlyStopping(monitor='val_loss', mode='min', patience=patience)
main_input = Input(shape=(max_len,), name='main_input')
x = Embedding((max_features + 1), (embed_size * 2), input_length=max_len, weights=[embedding_matrix], trainable=False)(main_input)
x = SpatialDropout1D(0.4)(x)
x = Bidirectional(LSTM(150, return_sequences=True))(x)
x = Bidirectional(LSTM(150, return_sequences=True))(x)
hidden = concatenate([Attention(max_len)(x), GlobalMaxPooling1D()(x)])
hidden = Dense(1024, activation='selu')(hidden)
hidden = Dropout(0.4)(hidden)
hidden = Dense(512, activation='selu')(hidden)
hidden = Dropout(0.4)(hidden)
hidden1 = Dense(128, activation='selu')(hidden)
output_lay1 = Dense(8, activation='sigmoid')(hidden1)
model = Model(inputs=[main_input], outputs=output_lay1)
model.compile(loss='binary_crossentropy', optimizer=Adam(lr=lr, decay=lr_d), metrics=['binary_accuracy'])
from keras.utils import plot_model
plot_model(model, to_file='model1.png')
model2 = Model(inputs=[main_input], outputs=output_lay1)
model.fit(X_train, y_train, batch_size=16, epochs=50, validation_data=(X_valid, y_valid), verbose=1, callbacks=[early_stop, check_point])
model2.load_weights(file_path)
model2.compile(loss='binary_crossentropy', optimizer=Adam(lr=lr, decay=lr_d), metrics=['binary_accuracy'])
return model2 |
def get_synset(t):
from nltk.corpus import wordnet as wn
if (t.endswith('_outdoor') or t.endswith('_indoor')):
t = '_'.join(t.split('_')[:(- 1)])
ss = wn.synsets(t, pos=wn.NOUN)
if ss:
return ss[0]
while ('_' in t):
t = '_'.join(t.split('_'))[:(- 1)]
ss = wn.synsets(t.split('_')[0], pos=wn.NOUN)
if ss:
return ss[0]
return None |
def make_dir(_dir: str) -> None:
if (not exists(_dir)):
try:
os.makedirs(_dir, exist_ok=True)
except FileExistsError:
pass |
def gtzan_path2gt(file_path):
tag = file_path[(file_path.rfind('/') + 1):file_path.rfind('.', 0, (- 4))]
print(tag)
if (tag == 'blues'):
return 0
elif (tag == 'classical'):
return 1
elif (tag == 'country'):
return 2
elif (tag == 'disco'):
return 3
elif (tag == 'hiphop'):
return 4
elif (tag == 'jazz'):
return 5
elif (tag == 'metal'):
return 6
elif (tag == 'pop'):
return 7
elif (tag == 'reggae'):
return 8
elif (tag == 'rock'):
return 9
else:
print((('Warning: did not find the corresponding ground truth (' + str(tag)) + ').'))
import ipdb
ipdb.set_trace() |
class UNet(nn.Module):
def __init__(self, in_channels=3, w=4, n_classes=2):
super(UNet, self).__init__()
self.inc = inconv(in_channels, int((16 * w)))
self.down1 = down(int((16 * w)), int((32 * w)))
self.down2 = down(int((32 * w)), int((64 * w)))
self.down3 = down(int((64 * w)), int((128 * w)))
self.down4 = down(int((128 * w)), int((128 * w)))
self.up1 = up(int((256 * w)), int((64 * w)))
self.up2 = up(int((128 * w)), int((32 * w)))
self.up3 = up(int((64 * w)), int((16 * w)))
self.up4 = up(int((32 * w)), int((16 * w)))
self.outc = outconv(int((16 * w)), n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return x |
def _build_corpus(data_path, env_params, sort_dict):
if sort_dict:
corpus_path = os.path.join(data_path, 'corpus_sorted.pt')
else:
corpus_path = os.path.join(data_path, 'corpus.pt')
if os.path.exists(corpus_path):
print('Loading an existing corpus file from {}'.format(corpus_path))
corpus = torch.load(corpus_path)
else:
print('Creating a corpus file at {}'.format(corpus_path))
if env_params['distributed']:
if (env_params['rank'] == 0):
corpus = Corpus(data_path, sort_dict)
torch.save(corpus, corpus_path)
torch.distributed.broadcast(torch.zeros(1).cuda(), src=0)
else:
print('Waiting rank0 to create a corpus file.')
torch.distributed.broadcast(torch.zeros(1).cuda(), src=0)
corpus = torch.load(corpus_path)
else:
corpus = Corpus(data_path, sort_dict)
torch.save(corpus, corpus_path)
return corpus |
class ImageNet(ImageList):
def __init__(self, root, list_file, memcached, mclient_path):
super(ImageNet, self).__init__(root, list_file, memcached, mclient_path) |
def Sharpness(img, v):
assert (0.1 <= v <= 1.9)
return PIL.ImageEnhance.Sharpness(img).enhance(v) |
_torch
class DataCollatorIntegrationTest(unittest.TestCase):
def setUp(self):
self.tmpdirname = tempfile.mkdtemp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]']
self.vocab_file = os.path.join(self.tmpdirname, 'vocab.txt')
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_default_with_dict(self):
features = [{'label': i, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch['labels'].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch['labels'].dtype, torch.long)
self.assertEqual(batch['inputs'].shape, torch.Size([8, 6]))
features = [{'label_ids': [0, 1, 2], 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch['labels'].equal(torch.tensor(([[0, 1, 2]] * 8))))
self.assertEqual(batch['labels'].dtype, torch.long)
self.assertEqual(batch['inputs'].shape, torch.Size([8, 6]))
features = [{'label': i, 'inputs': np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(batch['labels'].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch['labels'].dtype, torch.long)
self.assertEqual(batch['inputs'].shape, torch.Size([8, 10]))
features = [{'label': torch.tensor(i), 'inputs': np.random.randint(0, 10, [10])} for i in range(8)]
batch = default_data_collator(features)
self.assertEqual(batch['labels'].dtype, torch.long)
self.assertTrue(batch['labels'].equal(torch.tensor(list(range(8)))))
self.assertEqual(batch['labels'].dtype, torch.long)
self.assertEqual(batch['inputs'].shape, torch.Size([8, 10]))
def test_default_classification_and_regression(self):
data_collator = default_data_collator
features = [{'input_ids': [0, 1, 2, 3, 4], 'label': i} for i in range(4)]
batch = data_collator(features)
self.assertEqual(batch['labels'].dtype, torch.long)
features = [{'input_ids': [0, 1, 2, 3, 4], 'label': float(i)} for i in range(4)]
batch = data_collator(features)
self.assertEqual(batch['labels'].dtype, torch.float)
def test_default_with_no_labels(self):
features = [{'label': None, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(('labels' not in batch))
self.assertEqual(batch['inputs'].shape, torch.Size([8, 6]))
features = [{'label_ids': None, 'inputs': [0, 1, 2, 3, 4, 5]} for i in range(8)]
batch = default_data_collator(features)
self.assertTrue(('labels' not in batch))
self.assertEqual(batch['inputs'].shape, torch.Size([8, 6]))
def test_data_collator_with_padding(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2]}, {'input_ids': [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorWithPadding(tokenizer)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size([2, 6]))
self.assertEqual(batch['input_ids'][0].tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
data_collator = DataCollatorWithPadding(tokenizer, padding='max_length', max_length=10)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size([2, 10]))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size([2, 8]))
def test_data_collator_for_token_classification(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2], 'labels': [0, 1, 2]}, {'input_ids': [0, 1, 2, 3, 4, 5], 'labels': [0, 1, 2, 3, 4, 5]}]
data_collator = DataCollatorForTokenClassification(tokenizer)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size([2, 6]))
self.assertEqual(batch['input_ids'][0].tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
self.assertEqual(batch['labels'].shape, torch.Size([2, 6]))
self.assertEqual(batch['labels'][0].tolist(), ([0, 1, 2] + ([(- 100)] * 3)))
data_collator = DataCollatorForTokenClassification(tokenizer, padding='max_length', max_length=10)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size([2, 10]))
self.assertEqual(batch['labels'].shape, torch.Size([2, 10]))
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size([2, 8]))
self.assertEqual(batch['labels'].shape, torch.Size([2, 8]))
data_collator = DataCollatorForTokenClassification(tokenizer, label_pad_token_id=(- 1))
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size([2, 6]))
self.assertEqual(batch['input_ids'][0].tolist(), ([0, 1, 2] + ([tokenizer.pad_token_id] * 3)))
self.assertEqual(batch['labels'].shape, torch.Size([2, 6]))
self.assertEqual(batch['labels'][0].tolist(), ([0, 1, 2] + ([(- 1)] * 3)))
def _test_no_pad_and_pad(self, no_pad_features, pad_features):
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 10)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 10)))
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 10)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 10)))
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False, pad_to_multiple_of=8)
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 16)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 16)))
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 16)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 16)))
tokenizer._pad_token = None
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
with self.assertRaises(ValueError):
data_collator(pad_features)
set_seed(42)
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForLanguageModeling(tokenizer)
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 10)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 10)))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(((x == (- 100)) for x in batch['labels'][(~ masked_tokens)].tolist())))
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 10)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 10)))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(((x == (- 100)) for x in batch['labels'][(~ masked_tokens)].tolist())))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8)
batch = data_collator(no_pad_features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 16)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 16)))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(((x == (- 100)) for x in batch['labels'][(~ masked_tokens)].tolist())))
batch = data_collator(pad_features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 16)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 16)))
masked_tokens = (batch['input_ids'] == tokenizer.mask_token_id)
self.assertTrue(torch.any(masked_tokens))
self.assertTrue(all(((x == (- 100)) for x in batch['labels'][(~ masked_tokens)].tolist())))
def test_data_collator_for_language_modeling(self):
no_pad_features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
pad_features = [{'input_ids': list(range(5))}, {'input_ids': list(range(10))}]
self._test_no_pad_and_pad(no_pad_features, pad_features)
no_pad_features = [list(range(10)), list(range(10))]
pad_features = [list(range(5)), list(range(10))]
self._test_no_pad_and_pad(no_pad_features, pad_features)
def test_data_collator_for_whole_word_mask(self):
features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
tokenizer = BertTokenizer(self.vocab_file)
data_collator = DataCollatorForWholeWordMask(tokenizer, return_tensors='pt')
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 10)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 10)))
def test_plm(self):
tokenizer = BertTokenizer(self.vocab_file)
no_pad_features = [{'input_ids': list(range(10))}, {'input_ids': list(range(10))}]
pad_features = [{'input_ids': list(range(5))}, {'input_ids': list(range(10))}]
data_collator = DataCollatorForPermutationLanguageModeling(tokenizer)
batch = data_collator(pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 10)))
self.assertEqual(batch['perm_mask'].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch['target_mapping'].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 10)))
batch = data_collator(no_pad_features)
self.assertIsInstance(batch, dict)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 10)))
self.assertEqual(batch['perm_mask'].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch['target_mapping'].shape, torch.Size((2, 10, 10)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 10)))
example = [np.random.randint(0, 5, [5])]
with self.assertRaises(ValueError):
data_collator(example)
def test_nsp(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': [0, 1, 2, 3, 4], 'token_type_ids': [0, 1, 2, 3, 4], 'next_sentence_label': i} for i in range(2)]
data_collator = DataCollatorForLanguageModeling(tokenizer)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 5)))
self.assertEqual(batch['token_type_ids'].shape, torch.Size((2, 5)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 5)))
self.assertEqual(batch['next_sentence_label'].shape, torch.Size((2,)))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 8)))
self.assertEqual(batch['token_type_ids'].shape, torch.Size((2, 8)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 8)))
self.assertEqual(batch['next_sentence_label'].shape, torch.Size((2,)))
def test_sop(self):
tokenizer = BertTokenizer(self.vocab_file)
features = [{'input_ids': torch.tensor([0, 1, 2, 3, 4]), 'token_type_ids': torch.tensor([0, 1, 2, 3, 4]), 'sentence_order_label': i} for i in range(2)]
data_collator = DataCollatorForLanguageModeling(tokenizer)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 5)))
self.assertEqual(batch['token_type_ids'].shape, torch.Size((2, 5)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 5)))
self.assertEqual(batch['sentence_order_label'].shape, torch.Size((2,)))
data_collator = DataCollatorForLanguageModeling(tokenizer, pad_to_multiple_of=8)
batch = data_collator(features)
self.assertEqual(batch['input_ids'].shape, torch.Size((2, 8)))
self.assertEqual(batch['token_type_ids'].shape, torch.Size((2, 8)))
self.assertEqual(batch['labels'].shape, torch.Size((2, 8)))
self.assertEqual(batch['sentence_order_label'].shape, torch.Size((2,))) |
def test_pr3635_diamond_d0():
o = m.MVD0()
assert (o.b == 1)
assert (o.c == 2)
assert (o.d0 == 3)
assert (o.get_b_b() == 1)
assert (o.get_c_b() == 1)
assert (o.get_d0_b() == 1)
assert (o.get_c_c() == 2)
assert (o.get_d0_c() == 2)
assert (o.get_d0_d0() == 3) |
class TestOptimizersGPU(unittest.TestCase):
def setUp(self):
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
((not torch.cuda.is_available()), 'test requires a GPU')
def test_flat_grads(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_flat_grads') as data_dir:
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
with self.assertRaises(RuntimeError):
train_translation_model(data_dir, 'lstm', ['--required-batch-size-multiple', '1', '--encoder-layers', '1', '--encoder-hidden-size', '32', '--decoder-layers', '1', '--optimizer', 'adafactor', '--fp16'])
train_translation_model(data_dir, 'lstm', ['--required-batch-size-multiple', '1', '--encoder-layers', '1', '--encoder-hidden-size', '32', '--decoder-layers', '1', '--optimizer', 'adafactor', '--fp16', '--fp16-no-flatten-grads']) |
def clear_monitor_files(training_dir):
files = detect_monitor_files(training_dir)
if (len(files) == 0):
return
logger.info('Clearing %d monitor files from previous run (because force=True was provided)', len(files))
for file in files:
os.unlink(file) |
def test_modal_analysis_init():
sample_rate = 48000
x = modal_analysis.CQTModalAnalysis(sample_rate)
assert (x.sample_rate == sample_rate) |
class DatasetFactory(object):
def create_dataset(**kwargs):
assert ('name' in kwargs), 'should provide dataset name'
name = kwargs['name']
if ('OTB' in name):
dataset = OTBDataset(**kwargs)
elif ('LaSOT' == name):
dataset = LaSOTDataset(**kwargs)
elif ('UAV' in name):
dataset = UAVDataset(**kwargs)
elif ('NFS' in name):
dataset = NFSDataset(**kwargs)
elif (('VOT2018' == name) or ('VOT2016' == name)):
dataset = VOTDataset(**kwargs)
elif ('VOT2018-LT' == name):
dataset = VOTLTDataset(**kwargs)
elif ('TrackingNet' == name):
dataset = TrackingNetDataset(**kwargs)
elif ('GOT-10k' == name):
dataset = GOT10kDataset(**kwargs)
else:
raise Exception('unknow dataset {}'.format(kwargs['name']))
return dataset |
class PTBReader(BaseTextReader):
def read_line(self, line):
nltk_tree = nltk.Tree.fromstring(line.strip())
s = nltk_tree.leaves()
if self.lowercase:
s = [w.lower() for w in s]
(yield s) |
class PartitionRandomSampler(Sampler):
def __init__(self, partition_start_end_indices):
self.partition_start_end_indices = partition_start_end_indices
partition_end_indices = [end_idx for (_, end_idx) in self.partition_start_end_indices]
self.num_indices = (max(partition_end_indices) + 1)
def __iter__(self):
randomized_indices = []
for (start_idx, end_idx) in self.partition_start_end_indices:
rand_indices_in_partition = (start_idx + torch.randperm(((end_idx - start_idx) + 1)))
randomized_indices.extend(rand_indices_in_partition)
return iter(randomized_indices)
def __len__(self):
return self.num_indices |
def plot_alignment(alignment, gs):
(fig, ax) = plt.subplots()
im = ax.imshow(alignment)
fig.colorbar(im)
plt.title('{} Steps'.format(gs))
plt.savefig('{}/alignment_{}k.png'.format(hp.logdir, (gs // 1000)), format='png') |
def remove_seed_setting(code: str) -> str:
return re.sub('torch\\.manual_seed\\(\\S+\\)', '', code) |
def create_ssd_anchors(num_layers=6, min_scale=0.2, max_scale=0.95, aspect_ratios=(1.0, 2.0, 3.0, (1.0 / 2), (1.0 / 3)), base_anchor_size=None, reduce_boxes_in_lowest_layer=True):
if (base_anchor_size is None):
base_anchor_size = [1.0, 1.0]
base_anchor_size = tf.constant(base_anchor_size, dtype=tf.float32)
box_specs_list = []
scales = ([(min_scale + (((max_scale - min_scale) * i) / (num_layers - 1))) for i in range(num_layers)] + [1.0])
for (layer, scale, scale_next) in zip(range(num_layers), scales[:(- 1)], scales[1:]):
layer_box_specs = []
if ((layer == 0) and reduce_boxes_in_lowest_layer):
layer_box_specs = [(0.1, 1.0), (scale, 2.0), (scale, 0.5)]
else:
for aspect_ratio in aspect_ratios:
layer_box_specs.append((scale, aspect_ratio))
if (aspect_ratio == 1.0):
layer_box_specs.append((np.sqrt((scale * scale_next)), 1.0))
box_specs_list.append(layer_box_specs)
return MultipleGridAnchorGenerator(box_specs_list, base_anchor_size) |
class MaxPool3d(nn.MaxPool3d):
def forward(self, x):
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 7))):
out_shape = list(x.shape[:2])
for (i, k, p, s, d) in zip(x.shape[(- 3):], _triple(self.kernel_size), _triple(self.padding), _triple(self.stride), _triple(self.dilation)):
o = ((((i + (2 * p)) - ((d * (k - 1)) + 1)) / s) + 1)
o = (math.ceil(o) if self.ceil_mode else math.floor(o))
out_shape.append(o)
empty = NewEmptyTensorOp.apply(x, out_shape)
return empty
return super().forward(x) |
_module()
class CustomDataset(Dataset):
CLASSES = None
PALETTE = None
def __init__(self, ann_file, pipeline, classes=None, data_root=None, img_prefix='', seg_prefix=None, seg_suffix='.png', proposal_file=None, test_mode=False, filter_empty_gt=True, file_client_args=dict(backend='disk')):
self.ann_file = ann_file
self.data_root = data_root
self.img_prefix = img_prefix
self.seg_prefix = seg_prefix
self.seg_suffix = seg_suffix
self.proposal_file = proposal_file
self.test_mode = test_mode
self.filter_empty_gt = filter_empty_gt
self.file_client = mmcv.FileClient(**file_client_args)
self.CLASSES = self.get_classes(classes)
if (self.data_root is not None):
if (not osp.isabs(self.ann_file)):
self.ann_file = osp.join(self.data_root, self.ann_file)
if (not ((self.img_prefix is None) or osp.isabs(self.img_prefix))):
self.img_prefix = osp.join(self.data_root, self.img_prefix)
if (not ((self.seg_prefix is None) or osp.isabs(self.seg_prefix))):
self.seg_prefix = osp.join(self.data_root, self.seg_prefix)
if (not ((self.proposal_file is None) or osp.isabs(self.proposal_file))):
self.proposal_file = osp.join(self.data_root, self.proposal_file)
if hasattr(self.file_client, 'get_local_path'):
with self.file_client.get_local_path(self.ann_file) as local_path:
self.data_infos = self.load_annotations(local_path)
else:
warnings.warn(f'The used MMCV version does not have get_local_path. We treat the {self.ann_file} as local paths and it might cause errors if the path is not a local path. Please use MMCV>= 1.3.16 if you meet errors.')
self.data_infos = self.load_annotations(self.ann_file)
if (self.proposal_file is not None):
if hasattr(self.file_client, 'get_local_path'):
with self.file_client.get_local_path(self.proposal_file) as local_path:
self.proposals = self.load_proposals(local_path)
else:
warnings.warn(f'The used MMCV version does not have get_local_path. We treat the {self.ann_file} as local paths and it might cause errors if the path is not a local path. Please use MMCV>= 1.3.16 if you meet errors.')
self.proposals = self.load_proposals(self.proposal_file)
else:
self.proposals = None
if (not test_mode):
valid_inds = self._filter_imgs()
self.data_infos = [self.data_infos[i] for i in valid_inds]
if (self.proposals is not None):
self.proposals = [self.proposals[i] for i in valid_inds]
self._set_group_flag()
self.pipeline = Compose(pipeline)
def __len__(self):
return len(self.data_infos)
def load_annotations(self, ann_file):
return mmcv.load(ann_file)
def load_proposals(self, proposal_file):
return mmcv.load(proposal_file)
def get_ann_info(self, idx):
return self.data_infos[idx]['ann']
def get_cat_ids(self, idx):
return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()
def pre_pipeline(self, results):
results['img_prefix'] = self.img_prefix
results['seg_prefix'] = self.seg_prefix
results['proposal_file'] = self.proposal_file
results['bbox_fields'] = []
results['mask_fields'] = []
results['seg_fields'] = []
def _filter_imgs(self, min_size=32):
if self.filter_empty_gt:
warnings.warn('CustomDataset does not support filtering empty gt images.')
valid_inds = []
for (i, img_info) in enumerate(self.data_infos):
if (min(img_info['width'], img_info['height']) >= min_size):
valid_inds.append(i)
return valid_inds
def _set_group_flag(self):
self.flag = np.zeros(len(self), dtype=np.uint8)
for i in range(len(self)):
img_info = self.data_infos[i]
if ((img_info['width'] / img_info['height']) > 1):
self.flag[i] = 1
def _rand_another(self, idx):
pool = np.where((self.flag == self.flag[idx]))[0]
return np.random.choice(pool)
def __getitem__(self, idx):
if self.test_mode:
return self.prepare_test_img(idx)
while True:
data = self.prepare_train_img(idx)
if (data is None):
idx = self._rand_another(idx)
continue
return data
def prepare_train_img(self, idx):
img_info = self.data_infos[idx]
ann_info = self.get_ann_info(idx)
results = dict(img_info=img_info, ann_info=ann_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def prepare_test_img(self, idx):
img_info = self.data_infos[idx]
results = dict(img_info=img_info)
if (self.proposals is not None):
results['proposals'] = self.proposals[idx]
self.pre_pipeline(results)
return self.pipeline(results)
def get_classes(cls, classes=None):
if (classes is None):
return cls.CLASSES
if isinstance(classes, str):
class_names = mmcv.list_from_file(classes)
elif isinstance(classes, (tuple, list)):
class_names = classes
else:
raise ValueError(f'Unsupported type {type(classes)} of classes.')
return class_names
def get_cat2imgs(self):
if (self.CLASSES is None):
raise ValueError('self.CLASSES can not be None')
cat2imgs = {i: [] for i in range(len(self.CLASSES))}
for i in range(len(self)):
cat_ids = set(self.get_cat_ids(i))
for cat in cat_ids:
cat2imgs[cat].append(i)
return cat2imgs
def format_results(self, results, **kwargs):
def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None):
if (not isinstance(metric, str)):
assert (len(metric) == 1)
metric = metric[0]
allowed_metrics = ['mAP', 'recall']
if (metric not in allowed_metrics):
raise KeyError(f'metric {metric} is not supported')
annotations = [self.get_ann_info(i) for i in range(len(self))]
eval_results = OrderedDict()
iou_thrs = ([iou_thr] if isinstance(iou_thr, float) else iou_thr)
if (metric == 'mAP'):
assert isinstance(iou_thrs, list)
mean_aps = []
for iou_thr in iou_thrs:
print_log(f'''
{('-' * 15)}iou_thr: {iou_thr}{('-' * 15)}''')
(mean_ap, _) = eval_map(results, annotations, scale_ranges=scale_ranges, iou_thr=iou_thr, dataset=self.CLASSES, logger=logger)
mean_aps.append(mean_ap)
eval_results[f'AP{int((iou_thr * 100)):02d}'] = round(mean_ap, 3)
eval_results['mAP'] = (sum(mean_aps) / len(mean_aps))
elif (metric == 'recall'):
gt_bboxes = [ann['bboxes'] for ann in annotations]
recalls = eval_recalls(gt_bboxes, results, proposal_nums, iou_thr, logger=logger)
for (i, num) in enumerate(proposal_nums):
for (j, iou) in enumerate(iou_thrs):
eval_results[f'{num}{iou}'] = recalls[(i, j)]
if (recalls.shape[1] > 1):
ar = recalls.mean(axis=1)
for (i, num) in enumerate(proposal_nums):
eval_results[f'{num}'] = ar[i]
return eval_results
def __repr__(self):
dataset_type = ('Test' if self.test_mode else 'Train')
result = f'''
{self.__class__.__name__} {dataset_type} dataset with number of images {len(self)}, and instance counts:
'''
if (self.CLASSES is None):
result += 'Category names are not provided. \n'
return result
instance_count = np.zeros((len(self.CLASSES) + 1)).astype(int)
for idx in range(len(self)):
label = self.get_ann_info(idx)['labels']
(unique, counts) = np.unique(label, return_counts=True)
if (len(unique) > 0):
instance_count[unique] += counts
else:
instance_count[(- 1)] += 1
table_data = [(['category', 'count'] * 5)]
row_data = []
for (cls, count) in enumerate(instance_count):
if (cls < len(self.CLASSES)):
row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}']
else:
row_data += ['-1 background', f'{count}']
if (len(row_data) == 10):
table_data.append(row_data)
row_data = []
if (len(row_data) >= 2):
if (row_data[(- 1)] == '0'):
row_data = row_data[:(- 2)]
if (len(row_data) >= 2):
table_data.append([])
table_data.append(row_data)
table = AsciiTable(table_data)
result += table.table
return result |
def get_spans_and_siblings(tree):
def helper(tr, idx=0, name='root'):
if isinstance(tr, (str, int)):
return (1, [(idx, (idx + 1))], [])
(l_size, l_spans, l_sibs) = helper(tr[0], name='l', idx=idx)
(r_size, r_spans, r_sibs) = helper(tr[1], name='r', idx=(idx + l_size))
size = (l_size + r_size)
spans = (([(idx, (idx + size))] + l_spans) + r_spans)
siblings = (([(l_spans[0], r_spans[0], name)] + l_sibs) + r_sibs)
return (size, spans, siblings)
(_, spans, siblings) = helper(tree)
return (spans, siblings) |
class ConfGenerator(nn.Module):
def __init__(self, theta):
super(ConfGenerator, self).__init__()
if (not isinstance(theta, (int, float))):
raise TypeError('(int,float) is expected, got {}'.format(type(theta)))
self.theta = theta
def forward(self, estDisp, gtDisp):
if (not torch.is_tensor(gtDisp)):
raise TypeError('ground truth disparity map is expected to be tensor, got {}'.format(type(gtDisp)))
if (not torch.is_tensor(estDisp)):
raise TypeError('estimated disparity map is expected to be tensor, got {}'.format(type(estDisp)))
assert (estDisp.shape == gtDisp.shape)
if (gtDisp.dim() == 2):
(h, w) = (gtDisp.size(0), gtDisp.size(1))
gtDisp = gtDisp.view(1, 1, h, w)
estDisp = estDisp.view(1, 1, h, w)
if (gtDisp.dim() == 3):
(b, h, w) = (gtDisp.size(0), gtDisp.size(1), gtDisp.size(2))
gtDisp = gtDisp.view(b, 1, h, w)
estDisp = estDisp.view(b, 1, h, w)
if (gtDisp.dim() == 4):
if (gtDisp.size(1) == 1):
self.gtDisp = gtDisp
self.estDisp = estDisp
else:
raise ValueError('2nd dimension size should be 1, got {}'.format(gtDisp.size(1)))
confidence_gt_label = torch.lt(torch.abs((self.estDisp - self.gtDisp)), self.theta).type_as(self.gtDisp)
return confidence_gt_label |
def array_from_nested_dictionary(nested_dict, array_fn, dtype='float32', square_result=False):
if square_result:
outer_key_indices = inner_key_indices = flattened_nested_key_indices(nested_dict)
else:
(outer_key_indices, inner_key_indices) = nested_key_indices(nested_dict)
n_rows = len(outer_key_indices)
n_cols = len(inner_key_indices)
shape = (n_rows, n_cols)
result = array_fn(shape, dtype)
for (outer_key, sub_dictionary) in nested_dict.items():
i = outer_key_indices[outer_key]
for (inner_key, value) in sub_dictionary.items():
j = inner_key_indices[inner_key]
result[(i, j)] = value
outer_key_list = index_dict_to_sorted_list(outer_key_indices)
inner_key_list = index_dict_to_sorted_list(inner_key_indices)
return (result, outer_key_list, inner_key_list) |
def _create_wr_extended_audio(filename, port, mixer_mode, loopback_gain, microphone_gain, profile, level, user):
w = _writer()
w.open(filename)
w.put(_create_header(port, user))
w.put(hl2ss._create_configuration_for_extended_audio(mixer_mode, loopback_gain, microphone_gain, profile, level))
return w |
class LabelClusterUtils():
def __init__(self, dataset):
self._dataset = dataset
self.cluster_split = dataset.cluster_split
self.data_dir = (avod.root_dir() + '/data/label_clusters')
self.clusters = []
self.std_devs = []
def _filter_labels_by_class(obj_labels, classes):
filtered = [[] for _ in range(len(classes))]
for obj_label in obj_labels:
if (obj_label.type in classes):
class_idx = classes.index(obj_label.type)
obj_l = obj_label.l
obj_w = obj_label.w
obj_h = obj_label.h
filtered[class_idx].append([obj_l, obj_w, obj_h])
return filtered
def _get_cluster_file_path(self, dataset, cls, num_clusters):
file_path = '{}/{}/{}/'.format(self.data_dir, dataset.name, dataset.cluster_split, dataset.data_split)
file_path += '{}_{}.txt'.format(cls, num_clusters)
return file_path
def _write_clusters_to_file(self, file_path, clusters, std_devs):
file_dir = os.path.dirname(file_path)
if (not os.path.exists(file_dir)):
os.makedirs(file_dir)
new_file = open(file_path, 'w+')
all_data = np.vstack([clusters, std_devs])
np.savetxt(file_path, all_data, fmt='%.3f')
new_file.close()
def _read_clusters_from_file(self, dataset, cls, num_clusters):
file_path = self._get_cluster_file_path(dataset, cls, num_clusters)
if os.path.isfile(file_path):
cluster_file = open(file_path, 'r')
data = np.loadtxt(file_path)
clusters = np.array(data[0:num_clusters])
std_devs = np.array(data[num_clusters:])
cluster_file.close()
return (clusters, std_devs)
return (None, None)
def _flatten_data(self, data):
all_data = []
for class_idx in range(len(data)):
data_reshaped = np.asarray(data[class_idx]).reshape(((- 1), 3))
all_data.extend(data_reshaped)
return np.asarray(all_data)
def get_clusters(self):
classes = self._dataset.classes
num_clusters = self._dataset.num_clusters
all_clusters = [[] for _ in range(len(classes))]
all_std_devs = [[] for _ in range(len(classes))]
classes_not_loaded = []
for class_idx in range(len(classes)):
(clusters, std_devs) = self._read_clusters_from_file(self._dataset, classes[class_idx], num_clusters[class_idx])
if (clusters is not None):
all_clusters[class_idx].extend(np.asarray(clusters))
all_std_devs[class_idx].extend(np.asarray(std_devs))
else:
classes_not_loaded.append(class_idx)
if (len(classes_not_loaded) == 0):
return (all_clusters, all_std_devs)
sample_list = self._dataset.load_sample_names(self.cluster_split)
all_labels = [[] for _ in range(len(classes))]
num_samples = len(sample_list)
for sample_idx in range(num_samples):
sys.stdout.write('\rClustering labels {} / {}'.format((sample_idx + 1), num_samples))
sys.stdout.flush()
sample_name = sample_list[sample_idx]
img_idx = int(sample_name)
obj_labels = obj_utils.read_labels(self._dataset.label_dir, img_idx)
filtered_labels = LabelClusterUtils._filter_labels_by_class(obj_labels, self._dataset.classes)
for class_idx in range(len(classes)):
all_labels[class_idx].extend(filtered_labels[class_idx])
print('\nFinished reading labels, clustering data...\n')
for class_idx in classes_not_loaded:
labels_for_class = np.array(all_labels[class_idx])
n_clusters_for_class = num_clusters[class_idx]
if (len(labels_for_class) < n_clusters_for_class):
raise ValueError('Number of samples is less than number of clusters {} < {}'.format(len(labels_for_class), n_clusters_for_class))
k_means = KMeans(n_clusters=n_clusters_for_class, random_state=0).fit(labels_for_class)
clusters_for_class = []
std_devs_for_class = []
for cluster_idx in range(len(k_means.cluster_centers_)):
cluster_centre = k_means.cluster_centers_[cluster_idx]
labels_in_cluster = labels_for_class[(k_means.labels_ == cluster_idx)]
std_dev = np.std(labels_in_cluster, axis=0)
formatted_cluster = [float(('%.3f' % value)) for value in cluster_centre]
formatted_std_dev = [float(('%.3f' % value)) for value in std_dev]
clusters_for_class.append(formatted_cluster)
std_devs_for_class.append(formatted_std_dev)
file_path = self._get_cluster_file_path(self._dataset, classes[class_idx], num_clusters[class_idx])
self._write_clusters_to_file(file_path, clusters_for_class, std_devs_for_class)
all_clusters[class_idx].extend(np.asarray(clusters_for_class))
all_std_devs[class_idx].extend(np.asarray(std_devs_for_class))
return (all_clusters, all_std_devs) |
def setup_tictacteo(variation=None):
env = TicTacTeo()
if variation:
env = env.vary(variation)
maintemp = [RuleTemplate(1, True)]
inventedtemp2 = [RuleTemplate(1, True)]
inventedtemp_2extential = [RuleTemplate(2, False)]
invented = Predicate('invented', 2)
invented2 = Predicate('invented2', 2)
invented3 = Predicate('invented3', 1)
invented4 = Predicate('invented4', 1)
program_temp = ProgramTemplate([invented, invented2, invented3, invented4], {invented: inventedtemp2, PLACE: maintemp, invented2: inventedtemp2, invented3: inventedtemp2, invented4: inventedtemp_2extential}, 4)
man = RulesManager(env.language, program_temp)
return (man, env) |
class MSVD_Feats_DataLoader(Dataset):
def __init__(self, data_path, features_path, tokenizer, max_words=30, feature_framerate=1.0, max_frames=100, split_type=''):
self.data_path = data_path
self.features_path = features_path
self.feature_dict = pickle.load(open(features_path, 'rb'))
self.feature_framerate = feature_framerate
self.max_words = max_words
self.max_frames = max_frames
self.tokenizer = tokenizer
assert (split_type in ['train', 'val', 'test'])
split_dict = {}
split_dict['train'] = os.path.join(self.data_path, 'train_list_mapping.txt')
split_dict['val'] = os.path.join(self.data_path, 'val_list_mapping.txt')
split_dict['test'] = os.path.join(self.data_path, 'test_list_mapping.txt')
caption_file = os.path.join(self.data_path, 'raw-captions_mapped.pkl')
self.feature_size = self.feature_dict['vid1'].shape[(- 1)]
with open(caption_file, 'rb') as f:
captions = pickle.load(f)
with open(split_dict[split_type], 'r') as fp:
choiced_video_ids = [itm.strip() for itm in fp.readlines()]
self.sample_len = 0
self.sentences_dict = {}
self.video_sentences_dict = defaultdict(list)
if (split_type == 'train'):
for video_id in captions:
if (video_id in choiced_video_ids):
for cap in captions[video_id]:
cap_txt = ' '.join(cap)
self.sentences_dict[len(self.sentences_dict)] = (video_id, cap_txt)
self.video_sentences_dict[video_id].append(cap_txt)
elif ((split_type == 'val') or (split_type == 'test')):
for itm in captions:
if (itm in choiced_video_ids):
for cap in captions[itm]:
cap_txt = ' '.join(cap)
self.video_sentences_dict[itm].append(cap_txt)
for vid in choiced_video_ids:
self.sentences_dict[len(self.sentences_dict)] = (vid, self.video_sentences_dict[vid][0])
else:
raise NotImplementedError
self.sample_len = len(self.sentences_dict)
def __len__(self):
return self.sample_len
def _get_text(self, video_id, caption=None):
k = 1
choice_video_ids = [video_id]
pairs_text = np.zeros((k, self.max_words), dtype=np.long)
pairs_input_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_output_caption_ids = np.zeros((k, self.max_words), dtype=np.long)
pairs_decoder_mask = np.zeros((k, self.max_words), dtype=np.long)
for (i, video_id) in enumerate(choice_video_ids):
words = []
words = (['[CLS]'] + words)
total_length_with_CLS = (self.max_words - 1)
if (len(words) > total_length_with_CLS):
words = words[:total_length_with_CLS]
words = (words + ['[SEP]'])
input_ids = self.tokenizer.convert_tokens_to_ids(words)
while (len(input_ids) < self.max_words):
input_ids.append(0)
assert (len(input_ids) == self.max_words)
pairs_text[i] = np.array(input_ids)
if (caption is not None):
caption_words = self.tokenizer.tokenize(caption)
else:
caption_words = self._get_single_text(video_id)
if (len(caption_words) > total_length_with_CLS):
caption_words = caption_words[:total_length_with_CLS]
input_caption_words = (['[CLS]'] + caption_words)
output_caption_words = (caption_words + ['[SEP]'])
input_caption_ids = self.tokenizer.convert_tokens_to_ids(input_caption_words)
output_caption_ids = self.tokenizer.convert_tokens_to_ids(output_caption_words)
decoder_mask = ([1] * len(input_caption_ids))
while (len(input_caption_ids) < self.max_words):
input_caption_ids.append(0)
output_caption_ids.append(0)
decoder_mask.append(0)
assert (len(input_caption_ids) == self.max_words)
assert (len(output_caption_ids) == self.max_words)
assert (len(decoder_mask) == self.max_words)
pairs_input_caption_ids[i] = np.array(input_caption_ids)
pairs_output_caption_ids[i] = np.array(output_caption_ids)
pairs_decoder_mask[i] = np.array(decoder_mask)
return (pairs_text, np.array([]), np.array([]), np.array([]), np.array([]), pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, choice_video_ids)
def _get_single_text(self, video_id):
rind = random.randint(0, (len(self.sentences[video_id]) - 1))
caption = self.sentences[video_id][rind]
words = self.tokenizer.tokenize(caption)
return words
def _get_video(self, choice_video_ids):
video_mask = np.zeros((len(choice_video_ids), self.max_frames), dtype=np.long)
max_video_length = ([0] * len(choice_video_ids))
video = np.zeros((len(choice_video_ids), self.max_frames, self.feature_size), dtype=np.float)
for (i, video_id) in enumerate(choice_video_ids):
video_slice = self.feature_dict[video_id]
if (self.max_frames < video_slice.shape[0]):
video_slice = video_slice[:self.max_frames]
slice_shape = video_slice.shape
max_video_length[i] = (max_video_length[i] if (max_video_length[i] > slice_shape[0]) else slice_shape[0])
if (len(video_slice) < 1):
print('video_id: {}'.format(video_id))
else:
video[i][:slice_shape[0]] = video_slice
return (video, video_mask, np.array([]), np.array([]))
def __getitem__(self, idx):
(video_id, caption) = self.sentences_dict[idx]
(pairs_text, pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids, choice_video_ids) = self._get_text(video_id, caption)
(video, video_mask, masked_video, video_labels_index) = self._get_video(choice_video_ids)
(pairs_mask, pairs_segment, pairs_masked_text, pairs_token_labels, masked_video, video_labels_index) = (np.array([]), np.array([]), np.array([]), np.array([]), np.array([]), np.array([]))
return (pairs_text, pairs_mask, pairs_segment, video, video_mask, pairs_masked_text, pairs_token_labels, masked_video, video_labels_index, pairs_input_caption_ids, pairs_decoder_mask, pairs_output_caption_ids) |
def filecopy(src_path: Union[(Path, str)], dst_path: Union[(Path, str)]) -> None:
src_path = verify_path(src_path)
dst_path = verify_path(dst_path)
log.debug(f'Copying over file from {src_path} to {dst_path}')
shutil.copy(src_path, dst_path) |
def preprocess_data_t5(args):
data = preprocess_data(args)
if (args['ID_name'] == 'sst2'):
def add_prefix_sst2(example):
example['sentence'] = ('sst2 sentence: ' + example['sentence'])
return example
data = data.map(add_prefix_sst2)
elif (args['ID_name'] == 'cola'):
def add_prefix_cola(example):
example['sentence'] = ('cola sentence: ' + example['sentence'])
return example
data = data.map(add_prefix_cola)
elif (args['ID_name'] == 'mrpc'):
def add_prefix_mrpc(example):
example['sentence1'] = ('mrpc sentence1: ' + example['sentence1'])
example['sentence2'] = ('sentence2: ' + example['sentence2'])
return example
data = data.map(add_prefix_mrpc)
elif (args['ID_name'] == 'stsb'):
def add_prefix_stsb(example):
example['sentence1'] = ('stsb sentence1: ' + example['sentence1'])
example['sentence2'] = ('sentence2: ' + example['sentence2'])
return example
data = data.map(add_prefix_stsb)
elif (args['ID_name'] == 'qqp'):
def add_prefix_qqp(example):
example['sentence1'] = ('qqp question1: ' + example['sentence1'])
example['sentence2'] = ('question2: ' + example['sentence2'])
return example
data = data.map(add_prefix_qqp)
elif ((args['ID_name'] == 'mnli') or (args['ID_name'] == 'mnli_matched') or (args['ID_name'] == 'mnli_mismatched')):
def add_prefix_mnli(example):
temp_sentence1 = ('mnli hypothesis: ' + example['sentence2'])
example['sentence2'] = ('premise: ' + example['sentence1'])
example['sentence1'] = temp_sentence1
return example
data = data.map(add_prefix_mnli)
elif (args['ID_name'] == 'qnli'):
def add_prefix_qnli(example):
example['sentence1'] = ('qnli question: ' + example['sentence1'])
example['sentence2'] = ('sentence: ' + example['sentence2'])
return example
data = data.map(add_prefix_qnli)
elif (args['ID_name'] == 'rte'):
def add_prefix_rte(example):
example['sentence1'] = ('rte sentence1: ' + example['sentence1'])
example['sentence2'] = ('sentence2: ' + example['sentence2'])
return example
data = data.map(add_prefix_rte)
elif (args['ID_name'] == 'wnli'):
def add_prefix_rte(example):
example['sentence1'] = ('wnli sentence1: ' + example['sentence1'])
example['sentence2'] = ('sentence2: ' + example['sentence2'])
return example
data = data.map(add_prefix_rte)
else:
raise ValueError('task_name not specified in def:preprocess_data')
return data |
class ELUParameter(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ELUPARAMETER |
class RevResBottleneck(nn.Module):
def __init__(self, in_channels, out_channels, stride, preactivate, bottleneck_factor=4):
super(RevResBottleneck, self).__init__()
mid_channels = (out_channels // bottleneck_factor)
if preactivate:
self.conv1 = pre_conv1x1_block(in_channels=in_channels, out_channels=mid_channels)
else:
self.conv1 = conv1x1(in_channels=in_channels, out_channels=mid_channels)
self.conv2 = pre_conv3x3_block(in_channels=mid_channels, out_channels=mid_channels, stride=stride)
self.conv3 = pre_conv1x1_block(in_channels=mid_channels, out_channels=out_channels)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
return x |
class Upsample_unit(nn.Module):
def __init__(self, ind, num_units, in_channels, unit_channels=256, gen_skip=False, gen_cross_conv=False, norm_cfg=dict(type='BN'), out_channels=64):
norm_cfg = cp.deepcopy(norm_cfg)
super().__init__()
self.num_units = num_units
self.norm_cfg = norm_cfg
self.in_skip = ConvModule(in_channels, unit_channels, kernel_size=1, stride=1, padding=0, norm_cfg=self.norm_cfg, act_cfg=None, inplace=True)
self.relu = nn.ReLU(inplace=True)
self.ind = ind
if (self.ind > 0):
self.up_conv = ConvModule(unit_channels, unit_channels, kernel_size=1, stride=1, padding=0, norm_cfg=self.norm_cfg, act_cfg=None, inplace=True)
self.gen_skip = gen_skip
if self.gen_skip:
self.out_skip1 = ConvModule(in_channels, in_channels, kernel_size=1, stride=1, padding=0, norm_cfg=self.norm_cfg, inplace=True)
self.out_skip2 = ConvModule(unit_channels, in_channels, kernel_size=1, stride=1, padding=0, norm_cfg=self.norm_cfg, inplace=True)
self.gen_cross_conv = gen_cross_conv
if ((self.ind == (num_units - 1)) and self.gen_cross_conv):
self.cross_conv = ConvModule(unit_channels, out_channels, kernel_size=1, stride=1, padding=0, norm_cfg=self.norm_cfg, inplace=True)
def forward(self, x, up_x):
out = self.in_skip(x)
if (self.ind > 0):
up_x = F.interpolate(up_x, size=(x.size(2), x.size(3)), mode='bilinear', align_corners=True)
up_x = self.up_conv(up_x)
out = (out + up_x)
out = self.relu(out)
skip1 = None
skip2 = None
if self.gen_skip:
skip1 = self.out_skip1(x)
skip2 = self.out_skip2(out)
cross_conv = None
if ((self.ind == (self.num_units - 1)) and self.gen_cross_conv):
cross_conv = self.cross_conv(out)
return (out, skip1, skip2, cross_conv) |
class SelfAttn(MultiHeadAttn):
def __init__(self, dim_in, dim_out, num_heads=8):
super().__init__(dim_in, dim_in, dim_in, dim_out, num_heads)
def forward(self, x, mask=None):
return super().forward(x, x, x, mask=mask) |
def main():
args = parse_args()
in_video = os.path.expanduser(args.in_video)
if (not os.path.exists(in_video)):
raise Exception("Input file/directory doesn't exist: {}".format(in_video))
if os.path.isfile(in_video):
extract_audio(in_video=in_video, out_audio=args.out_audio)
else:
video_file_paths = []
for file_name in os.listdir(in_video):
if (not os.path.isfile(os.path.join(in_video, file_name))):
continue
(_, file_ext) = os.path.splitext(file_name)
if (file_ext.lower() in ('.mp4', '.mkv', '.avi')):
video_file_path = os.path.join(in_video, file_name)
video_file_paths.append(video_file_path)
video_file_paths = sorted(video_file_paths)
for video_file_path in video_file_paths:
extract_audio(in_video=video_file_path, out_audio='') |
def read_pcd():
files = glob.glob('/home/wang/github/RoBoCar/ROS/pcd/*.pcd')
file_path = []
for file in files:
ts = file.split('/')[7][:(- 4)]
file_path.append(ts)
file_path.sort()
return file_path |
def one_hot(index, classes):
out_idx = torch.arange(classes, device=index.device)
out_idx = torch.unsqueeze(out_idx, 0)
index = torch.unsqueeze(index, (- 1))
return (index == out_idx).float() |
class NeuriR(ConcolicGen):
def __init__(self, opset, record_finder: OpRecordFinder, seed=None, init_fp=False, **kwargs):
BaseGen.__init__(self, opset, seed, **kwargs)
if (seed is not None):
set_z3_state(seed)
self.record_finder = record_finder
self.forward_insert_node(self.make_random_concrete_placeholder(self.random_rank(), dtype=(DType.float32 if init_fp else None)), [])
def make_random_concrete_placeholder(self, rank, dtype=None):
cand: List[AbsTensor] = []
for tensor in self.record_finder.producer.keys():
if ((tensor.ndims == rank) and ((dtype is None) or (tensor.dtype == dtype))):
cand.append(tensor)
for tensor in self.record_finder.consumer.keys():
if ((tensor.ndims == rank) and ((dtype is None) or (tensor.dtype == dtype))):
cand.append(tensor)
if (len(cand) == 0):
MGEN_LOG.warning(f'No record w/ {rank}<{dtype}>. Fallback to base.')
return super().make_random_concrete_placeholder(rank, dtype)
selected = random.choice(cand)
ph = Placeholder(AbsTensor(shape=selected.shape, dtype=selected.dtype))
return ph
def try_concrete_forward_insert_op(self, type2vars, op: AbsOpBase, itensors, otensors) -> bool:
ivars = []
for it in itensors:
if (it not in type2vars):
break
ivars.append(random.choice(type2vars[it]))
if (len(ivars) == len(itensors)):
op.bind_input_like(itensors)
op.bind_output_like(otensors)
self.forward_insert_node(op, ivars)
return True
return False
def try_concrete_backward_insert_op(self, type2vars: Dict[(AbsTensor, List[str])], op: AbsOpBase, itensors, otensors) -> bool:
type2phvars = {k: [v for v in vs if (v in self.placeholders)] for (k, vs) in type2vars.items()}
type2phvars = {k: vs for (k, vs) in type2phvars.items() if (len(vs) > 0)}
ovars = []
for ot in otensors:
if (ot not in type2phvars):
break
cands = [v for v in type2phvars[ot] if (v not in ovars)]
if (len(cands) == 0):
break
ovars.append(random.choice(cands))
if (len(ovars) == len(otensors)):
op_ivars = []
for ttype in itensors:
inst = self.forward_insert_node(Placeholder(ttype=ttype), [])
op_ivars.append(inst.retval())
op.bind_input_like(itensors)
op.bind_output_like(otensors)
self.backward_insert_node(op, op_ivars, ovars)
return True
return False
def try_concrete_insert_forward(self):
op_types = []
type2vars = {}
for (k, v) in self.ir.vars.items():
type2vars.setdefault(v, []).append(k)
for v in type2vars:
for ot in self.record_finder.consumer.get(v, []):
if (ot not in op_types):
op_types.append(ot)
random.shuffle(op_types)
for op_type in op_types:
for (itensors, otensors, attrs) in self.record_finder.op2record[op_type]:
op: AbsOpBase = AutoInfOpBase(op_type, attrs=attrs)
try:
if self.try_concrete_forward_insert_op(type2vars, op, itensors, otensors):
return True
except ConstraintError:
pass
return False
def try_concrete_insert_backward(self):
op_types = []
type2vars = {}
for k in self.placeholders:
v = self.ir.vars[k]
type2vars.setdefault(v, []).append(k)
for v in type2vars:
for ot in self.record_finder.producer.get(v, []):
if (ot not in op_types):
op_types.append(ot)
random.shuffle(op_types)
for op_type in op_types:
for (itensors, otensors, attrs) in self.record_finder.op2record[op_type]:
op: AbsOpBase = AutoInfOpBase(op_type, attrs=attrs)
try:
if self.try_concrete_backward_insert_op(type2vars, op, itensors, otensors):
return True
except ConstraintError:
pass
return False
def try_insert(self):
if (random.random() < 0.5):
if (random.random() < self.forward_prob):
if self.try_concrete_insert_forward():
self.symbolic_impossible = 0
return True
elif self.try_concrete_insert_backward():
self.symbolic_impossible = 0
return True
dtypes_in_ir = set([t.dtype for t in self.ir.vars.values()])
if dtypes_in_ir.isdisjoint(set(DTYPE_GEN_ALL)):
self.symbolic_impossible += 1
return False
return BaseGen.try_insert(self)
def extra_exit_check(self, max_node_size):
return (self.symbolic_impossible >= max_node_size)
def abstract_gen(self, max_node_size=10, max_gen_millisec=2000):
self.symbolic_impossible = 0
return BaseGen.abstract_gen(self, max_node_size, max_gen_millisec) |
def dims_to_shapes(input_dims):
return {key: (tuple([val]) if (val > 0) else tuple()) for (key, val) in input_dims.items()} |
class SmallReactivePolicy():
def __init__(self, observation_space, action_space):
assert (weights_dense1_w.shape == (observation_space.shape[0], 128))
assert (weights_dense2_w.shape == (128, 64))
assert (weights_final_w.shape == (64, action_space.shape[0]))
def act(self, ob):
x = ob
x = relu((np.dot(x, weights_dense1_w) + weights_dense1_b))
x = relu((np.dot(x, weights_dense2_w) + weights_dense2_b))
x = (np.dot(x, weights_final_w) + weights_final_b)
return x |
def bpda_strong(x, y, network_ebm, network_clf, config):
transform_raw_to_clf = raw_to_clf(config.structure.dataset)
fmodel = foolbox.PyTorchModel(network_clf, bounds=(0.0, 1.0), preprocessing=foolbox_preprocess(config.structure.dataset))
x = x.to(config.device.ebm_device)
y = y.to(config.device.clf_device)
x_temp = x.clone().detach()
for i in range(config.attack.iter):
grad = torch.zeros_like(x_temp).to(config.device.ebm_device)
for j in range(config.attack.n_eot):
if ((config.purification.purify_method == 'adp_multiple_noise') or (config.purification.purify_method == 'adp_decision')):
x_temp_eot = adp(x_temp, network_ebm, max_iter=config.purification.max_iter, mode='attack', config=config)[0][(- 1)].to(config.device.clf_device)
else:
x_temp_eot = eval(config.purification.purify_method)(x_temp, network_ebm, max_iter=config.purification.max_iter, mode='attack', config=config)[0][(- 1)].to(config.device.clf_device)
if (config.attack.ball_dim == (- 1)):
attack = foolbox.attacks.LinfPGD(rel_stepsize=0.25, steps=1, random_start=False)
(_, x_temp_eot_d, _) = attack(fmodel, x_temp_eot, y, epsilons=(config.attack.ptb / 255.0))
elif (config.attack.ball_dim == 2):
attack = foolbox.attacks.L2PGD(rel_stepsize=0.25)
(_, x_temp_eot_d, _) = attack(fmodel, x_temp_eot, y, epsilons=(config.attack.ptb / 255.0))
grad += (x_temp_eot_d.detach() - x_temp_eot).to(config.device.ebm_device)
x_clf = transform_raw_to_clf(x_temp.clone().detach()).to(config.device.clf_device)
success = torch.eq(torch.argmax(network_clf(x_clf), dim=1), y)
x_temp = torch.clamp((x + torch.clamp(((x_temp - x) + ((grad.sign() * config.attack.alpha) / 255.0)), (((- 1.0) * config.attack.ptb) / 255.0), (config.attack.ptb / 255.0))), 0.0, 1.0)
x_adv = x_temp.clone().detach()
x_clf = transform_raw_to_clf(x_adv.clone().detach()).to(config.device.clf_device)
success = torch.eq(torch.argmax(network_clf(x_clf), dim=1), y)
acc = success.float().mean(axis=(- 1))
return (x_adv, success, acc) |
class InputExample():
def __init__(self, paragraph, qa_list, label):
self.paragraph = paragraph
self.qa_list = qa_list
self.label = label |
class BasicBlock(nn.Module):
def __init__(self, in_planes, out_planes, stride, bn_aff=True, shortcut=True, dropRate=0.0):
super(BasicBlock, self).__init__()
self.shortcut = shortcut
self.bn_aff = bn_aff
self.bn1 = nn.BatchNorm2d(in_planes, affine=self.bn_aff)
self.relu1 = nn.ReLU(inplace=True)
self.conv1 = nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(out_planes, affine=self.bn_aff)
self.relu2 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(out_planes, out_planes, kernel_size=3, stride=1, padding=1, bias=False)
self.droprate = dropRate
self.equalInOut = (in_planes == out_planes)
self.convShortcut = (((not self.equalInOut) and nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)) or None)
def forward(self, x):
if (not self.equalInOut):
x = self.relu1(self.bn1(x))
else:
out = self.relu1(self.bn1(x))
out = self.relu2(self.bn2(self.conv1((out if self.equalInOut else x))))
if (self.droprate > 0):
out = F.dropout(out, p=self.droprate, training=self.training)
out = self.conv2(out)
if self.shortcut:
out = torch.add((x if self.equalInOut else self.convShortcut(x)), out)
else:
out
return out |
class ScoredBoundingBoxVisualizer(object):
def __init__(self, bbox_visualizer_params=None, score_visualizer_params=None, **kwargs):
if (bbox_visualizer_params is None):
bbox_visualizer_params = {}
if (score_visualizer_params is None):
score_visualizer_params = {}
self.visualizer_bbox = RectangleVisualizer(**bbox_visualizer_params)
self.visualizer_score = TextVisualizer(**score_visualizer_params)
def visualize(self, image_bgr, scored_bboxes):
(boxes_xywh, box_scores) = scored_bboxes
assert (len(boxes_xywh) == len(box_scores)), 'Number of bounding boxes {} should be equal to the number of scores {}'.format(len(boxes_xywh), len(box_scores))
for (i, box_xywh) in enumerate(boxes_xywh):
score_i = box_scores[i]
image_bgr = self.visualizer_bbox.visualize(image_bgr, box_xywh)
score_txt = '{0:6.4f}'.format(score_i)
topleft_xy = (box_xywh[0], box_xywh[1])
image_bgr = self.visualizer_score.visualize(image_bgr, score_txt, topleft_xy)
return image_bgr |
def accuracy(output, target, topk=(1,)):
(output, target) = (to_torch(output), to_torch(target))
maxk = max(topk)
batch_size = target.size(0)
(_, pred) = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, (- 1)).expand_as(pred))
ret = []
for k in topk:
correct_k = correct[:k].view((- 1)).float().sum(dim=0, keepdim=True)
ret.append(correct_k.mul_((1.0 / batch_size)))
return ret |
def check_early_stop(trainer, epochs):
end_epoch = trainer.updater.get_iterator('main').epoch
if (end_epoch < (epochs - 1)):
logging.warning((('Hit early stop at epoch ' + str(end_epoch)) + '\nYou can change the patience or set it to 0 to run all epochs')) |
class TestCommutativeCancellation(QiskitTestCase):
def setUp(self):
self.com_pass_ = CommutationAnalysis()
self.pass_ = CommutativeCancellation()
self.pset = self.pass_.property_set = PropertySet()
def test_all_gates(self):
qr = QuantumRegister(2, 'q')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.h(qr[0])
circuit.x(qr[0])
circuit.x(qr[0])
circuit.y(qr[0])
circuit.y(qr[0])
circuit.rz(0.5, qr[0])
circuit.rz(0.5, qr[0])
circuit.u1(0.5, qr[0])
circuit.u1(0.5, qr[0])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[1])
circuit.cy(qr[0], qr[1])
circuit.cy(qr[0], qr[1])
circuit.cz(qr[0], qr[1])
circuit.cz(qr[0], qr[1])
passmanager = PassManager()
passmanager.append(CommutativeCancellation())
new_circuit = transpile(circuit, pass_manager=passmanager)
expected = QuantumCircuit(qr)
expected.u1(2.0, qr[0])
self.assertEqual(expected, new_circuit)
def test_commutative_circuit1(self):
qr = QuantumRegister(3, 'qr')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.h(qr[2])
circuit.cx(qr[2], qr[1])
circuit.cx(qr[0], qr[1])
passmanager = PassManager()
passmanager.append(CommutativeCancellation())
new_circuit = transpile(circuit, pass_manager=passmanager)
expected = QuantumCircuit(qr)
expected.h(qr[2])
expected.cx(qr[2], qr[1])
self.assertEqual(expected, new_circuit)
def test_commutative_circuit2(self):
qr = QuantumRegister(3, 'qr')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.rz((sympy.pi / 3), qr[2])
circuit.cx(qr[2], qr[1])
circuit.rz((sympy.pi / 3), qr[2])
circuit.t(qr[2])
circuit.s(qr[2])
circuit.x(qr[1])
circuit.cx(qr[0], qr[1])
circuit.x(qr[1])
passmanager = PassManager()
passmanager.append(CommutativeCancellation())
new_circuit = transpile(circuit, pass_manager=passmanager)
expected = QuantumCircuit(qr)
expected.u1(((sympy.pi * 17) / 12), qr[2])
expected.cx(qr[2], qr[1])
self.assertEqual(expected, new_circuit) |
class MetaIterativeEnvExecutor(object):
def __init__(self, env, meta_batch_size, envs_per_task, max_path_length):
self.envs = np.asarray([copy.deepcopy(env) for _ in range((meta_batch_size * envs_per_task))])
self.ts = np.zeros(len(self.envs), dtype='int')
self.max_path_length = max_path_length
def step(self, actions):
assert (len(actions) == self.num_envs)
all_results = [env.step(a) for (a, env) in zip(actions, self.envs)]
(obs, rewards, dones, env_infos) = list(map(list, zip(*all_results)))
dones = np.asarray(dones)
self.ts += 1
dones = np.logical_or((self.ts >= self.max_path_length), dones)
for i in np.argwhere(dones).flatten():
obs[i] = self.envs[i].reset()
self.ts[i] = 0
return (obs, rewards, dones, env_infos)
def set_tasks(self, tasks):
envs_per_task = np.split(self.envs, len(tasks))
for (task, envs) in zip(tasks, envs_per_task):
for env in envs:
env.set_task(task)
def reset(self):
obses = [env.reset() for env in self.envs]
self.ts[:] = 0
return obses
def num_envs(self):
return len(self.envs) |
def url_to_filename(url, etag=None):
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += ('.' + etag_hash.hexdigest())
if url.endswith('.h5'):
filename += '.h5'
return filename |
class ModelCriterionConfig(FairseqDataclass):
loss_weights: Dict[(str, float)] = field(default_factory=dict, metadata={'help': 'weights for the loss terms'})
log_keys: List[str] = field(default_factory=list, metadata={'help': 'additional output keys to log'})
can_sum: bool = True |
.skipif((not torch.cuda.is_available()), reason='requires CUDA support')
def test_forward_equal_with_pytorch_float():
(N, M, D) = (1, 2, 2)
(Lq, L, P) = (2, 2, 2)
shapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()
level_start_index = torch.cat((shapes.new_zeros((1,)), shapes.prod(1).cumsum(0)[:(- 1)]))
S = sum([(H * W).item() for (H, W) in shapes])
torch.manual_seed(3)
value = (torch.rand(N, S, M, D).cuda() * 0.01)
sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()
attention_weights = (torch.rand(N, Lq, M, L, P).cuda() + 1e-05)
attention_weights /= attention_weights.sum((- 1), keepdim=True).sum((- 2), keepdim=True)
im2col_step = 2
output_pytorch = multi_scale_deformable_attn_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()
output_cuda = MultiScaleDeformableAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()
assert torch.allclose(output_cuda, output_pytorch, rtol=0.01, atol=0.001)
max_abs_err = (output_cuda - output_pytorch).abs().max()
max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()
assert (max_abs_err < 1e-09)
assert (max_rel_err < 1e-06) |
def ResNet101(nInputChannels=3, os=16, pretrained=False):
model = ResNet(nInputChannels, Bottleneck, [3, 4, 23, 3], os, pretrained=pretrained)
return model |
def get_layers(layer_type):
if (layer_type == 'dense'):
return (nn.Conv2d, nn.Linear)
elif (layer_type == 'subnet'):
return (SubnetConv, SubnetLinear)
else:
raise ValueError('Incorrect layer type') |
def deconv3d(input_, output_shape, k_t=3, k_h=3, k_w=3, d_t=1, d_h=1, d_w=1, padding='SAME', name='deconv3d'):
with tf.variable_scope(name):
w = _variable_with_weight_decay('w', [k_t, k_h, k_h, output_shape[(- 1)], input_.get_shape()[(- 1)]])
deconv = tf.nn.conv3d_transpose(input_, w, output_shape=output_shape, strides=[1, d_t, d_h, d_w, 1], padding=padding)
b = _variable_with_weight_decay('b', [output_shape[(- 1)]])
return tf.nn.bias_add(deconv, b) |
class TestFineTuneEpocher(TestCase):
def setUp(self) -> None:
global arch_dict
arch_dict = deepcopy(arch_dict)
super().setUp()
pretrain_datsaet = ACDCDataset(root_dir=DATA_PATH, mode='train', transforms=transform)
self._pretrain_loader = iter(DataLoader(pretrain_datsaet))
self._model = get_arch(arch_dict.pop('name'), arch_dict)
self._optimizer = torch.optim.Adam(self._model.parameters(), lr=1e-06, weight_decay=1e-05)
self._model.cuda()
self._projector.cuda() |
def create_decoder(opt):
if (opt.decoder_type == 'AttnDecoderRNN'):
decoder = AttnDecoderRNN(opt.rnn_type, opt.atten_model, opt.embedding_size, opt.hidden_size, opt.num_layers, opt.dropout)
return decoder |
def merge_ans(src, ans):
rst = [(((s + [Constants.SEP_WORD]) + a) + [Constants.SEP_WORD]) for (s, a) in zip(src, ans)]
return rst |
def create_background_tasks():
background_tasks = BackgroundTasks()
background_tasks.add_task(release_model_semaphore)
return background_tasks |
class ViTGraph(nn.Module):
def __init__(self, in_chans=6, num_classes=40, encoder_dim=768, depth=12, num_heads=12, mlp_ratio=4.0, qkv_bias=False, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, embed_args={'NAME': 'groupembed', 'num_groups': 256, 'group_size': 32, 'embed_dim': 256, 'subsample': 'fps', 'group': 'knn', 'feature_type': 'fj'}, norm_args={'norm': 'ln', 'eps': 1e-06}, act_args={'act': 'gelu'}, posembed_norm_args=None, **kwargs):
super().__init__()
if kwargs:
logging.warning(f'kwargs: {kwargs} are not used in {__class__.__name__}')
self.num_classes = num_classes
self.num_features = self.encoder_dim = encoder_dim
self.num_tokens = 1
self.embed_layer = embed_args.NAME.lower()
if (self.embed_layer == 'groupembed'):
self.group_embed = GroupEmbed(in_chans=in_chans, **embed_args)
elif (self.embed_layer == 'kmeans'):
self.group_embed = KMeansEmbed(**embed_args)
self.proj_layer = nn.Linear(embed_args.embed_dim, self.encoder_dim)
self.cls_token = nn.Parameter(torch.zeros(1, 1, self.encoder_dim))
self.cls_pos = nn.Parameter(torch.randn(1, 1, self.encoder_dim))
self.pos_embed = nn.Sequential(create_linearblock(3, 128, norm_args=posembed_norm_args, act_args=act_args), nn.Linear(128, self.encoder_dim))
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]
self.blocks = TransformerEncoder(embed_dim=self.encoder_dim, depth=depth, drop_path_rate=dpr, attn_drop_rate=attn_drop_rate, num_heads=num_heads, act_args=act_args, norm_args=norm_args)
self.norm = (create_norm(norm_args, self.encoder_dim) or nn.Identity())
self.initialize_weights()
def initialize_weights(self):
torch.nn.init.normal_(self.cls_token, std=0.02)
torch.nn.init.normal_(self.cls_pos, std=0.02)
self.apply(self._init_weights)
def _init_weights(m):
if isinstance(m, nn.Linear):
torch.nn.init.xavier_uniform_(m.weight)
if (isinstance(m, nn.Linear) and (m.bias is not None)):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
def forward(self, xyz, features=None):
(center_xyz, features, _, _) = self.group_embed(xyz, features)
features = self.proj_layer(features)
pos_embed = self.pos_embed(center_xyz)
pos_embed = torch.cat((self.cls_pos.expand(features.shape[0], (- 1), (- 1)), pos_embed), dim=1)
cls_token = self.cls_token.expand(features.shape[0], (- 1), (- 1))
features = torch.cat((cls_token, features), dim=1)
features = self.blocks(features, pos_embed)
features = self.norm(features)
return (center_xyz, features)
def forward_features(self, xyz, features=None, num_points=None):
(center_xyz, features, _, _) = self.group_embed(xyz, features)
features = self.proj_layer(features)
pos_embed = self.pos_embed(center_xyz)
pos_embed = torch.cat((self.cls_pos.expand(features.shape[0], (- 1), (- 1)), pos_embed), dim=1)
cls_token = self.cls_token.expand(features.shape[0], (- 1), (- 1))
features = torch.cat((cls_token, features), dim=1)
out_features = self.blocks.forward_features(features, pos_embed, num_points)
out_features[(- 1)] = self.norm(out_features[(- 1)])
return (center_xyz, out_features) |
def resize_images(scale, verbose=False):
img_dir = './src/hu2013/Data/Lady'
output_dir = './src/hu2013/Data'
names = ['Img1.jpg', 'Img2.jpg', 'Img3.jpg']
for name in names:
img = cv2.imread(os.path.join(img_dir, name))
(height, width, _) = img.shape
img_resize = cv2.resize(img, (int((width / scale)), int((height / scale))))
if verbose:
print(('before resize\nheight: %d, width: %d' % (height, width)))
print(('after resize\nheight: %d, width: %d' % (img_resize.shape[0], img_resize.shape[1])))
cv2.imwrite(os.path.join(output_dir, name), img_resize) |
def restrict(x):
g = GraphInterface()
vs = g.add_vertex('p', is_input=True)
vout = g.add_vertex('restrict')
vx = g.add_vertex(x, is_output=True)
vp = g.add_vertex('p', is_output=True)
g.add_edge(vs, vout)
g.add_edge(vout, vp)
g.add_edge(vout, vx)
return g |
_registry(operator_type='Flatten')
class Flatten(Operator):
def __init__(self):
super().__init__() |
def max_abs_sum_seg(scores_list, min_length: int=1):
n = len(scores_list[0])
res = ([0] * n)
paths = {}
for s in range(n):
for e in range(s, n):
if ((e - s) >= (min_length - 1)):
scores_list[s][e] = abs(scores_list[s][e])
else:
scores_list[s][e] = (- 10000)
paths[(- 1)] = []
res[0] = scores_list[0][0]
paths[0] = [0]
for i in range(1, n):
cand = [(res[(j - 1)] + scores_list[j][i]) for j in range((i + 1))]
seg_start = np.argmax(cand)
res[i] = max(cand)
paths[i] = (paths[(seg_start - 1)] + [seg_start])
return (res, paths) |
def adjust_and_move_row_and_column_borders(annotations):
row_anns = dict()
col_anns = dict()
for ann in annotations:
if (ann['category'] == 'table_row'):
row_anns[ann['row_nr']] = ann
elif (ann['category'] == 'table_col'):
col_anns[ann['col_nr']] = ann
col_nrs = sorted(list(col_anns.keys()))
if (len(col_nrs) > 1):
for col_nr in col_nrs[:(- 1)]:
next_col_nr = (col_nr + 1)
(cur_col_x0, cur_col_y0, cur_col_w, cur_col_h) = col_anns[col_nr]['bbox']
cur_col_x1 = (cur_col_x0 + cur_col_w)
cur_col_y1 = (cur_col_y0 + cur_col_h)
(next_col_x0, next_col_y0, next_col_w, next_col_h) = col_anns[next_col_nr]['bbox']
next_col_x1 = (next_col_x0 + next_col_w)
next_col_y1 = (next_col_y0 + next_col_h)
x_diff = (next_col_x0 - cur_col_x1)
x_midpoint = (cur_col_x1 + (x_diff * 0.5))
cur_col_x1 = x_midpoint
next_col_x0 = x_midpoint
col_anns[col_nr]['bbox'] = [cur_col_x0, cur_col_y0, (cur_col_x1 - cur_col_x0), (cur_col_y1 - cur_col_y0)]
col_anns[next_col_nr]['bbox'] = [next_col_x0, next_col_y0, (next_col_x1 - next_col_x0), (next_col_y1 - next_col_y0)]
row_nrs = sorted(list(row_anns.keys()))
if (len(row_nrs) > 1):
for row_nr in row_nrs[:(- 1)]:
next_row_nr = (row_nr + 1)
(cur_row_x0, cur_row_y0, cur_row_w, cur_row_h) = row_anns[row_nr]['bbox']
cur_row_x1 = (cur_row_x0 + cur_row_w)
cur_row_y1 = (cur_row_y0 + cur_row_h)
(next_row_x0, next_row_y0, next_row_w, next_row_h) = row_anns[next_row_nr]['bbox']
next_row_x1 = (next_row_x0 + next_row_w)
next_row_y1 = (next_row_y0 + next_row_h)
y_diff = (next_row_y0 - cur_row_y1)
y_midpoint = (cur_row_y1 + (y_diff * 0.5))
cur_row_y1 = y_midpoint
next_row_y0 = y_midpoint
row_anns[row_nr]['bbox'] = [cur_row_x0, cur_row_y0, (cur_row_x1 - cur_row_x0), (cur_row_y1 - cur_row_y0)]
row_anns[next_row_nr]['bbox'] = [next_row_x0, next_row_y0, (next_row_x1 - next_row_x0), (next_row_y1 - next_row_y0)]
(tabular_max_x, tabular_max_y, tabular_min_x, tabular_min_y) = ((- 1), (- 1), 100000, 100000)
for rowcol in (list(row_anns.values()) + list(col_anns.values())):
(x0, y0, w, h) = rowcol['bbox']
x1 = (x0 + w)
y1 = (y0 + h)
if (x1 > tabular_max_x):
tabular_max_x = x1
if (y1 > tabular_max_y):
tabular_max_y = y1
if (x0 < tabular_min_x):
tabular_min_x = x0
if (y0 < tabular_min_y):
tabular_min_y = y0
num_rows = len(row_anns)
num_cols = len(col_anns)
new_row_x0 = tabular_min_x
new_row_w = (tabular_max_x - tabular_min_x)
new_col_y0 = tabular_min_y
new_col_h = (tabular_max_y - tabular_min_y)
for row in row_anns.values():
(x0, y0, w, h) = row['bbox']
row['bbox'] = [new_row_x0, y0, new_row_w, h]
if (row['row_nr'] == 0):
new_h = ((row['bbox'][3] + row['bbox'][1]) - tabular_min_y)
row['bbox'][1] = tabular_min_y
row['bbox'][3] = new_h
if (row['row_nr'] == (num_rows - 1)):
new_h = (tabular_max_y - row['bbox'][1])
row['bbox'][3] = new_h
for col in col_anns.values():
(x0, y0, w, h) = col['bbox']
col['bbox'] = [x0, new_col_y0, w, new_col_h]
if (col['col_nr'] == 0):
new_w = ((col['bbox'][2] + col['bbox'][0]) - tabular_min_x)
col['bbox'][0] = tabular_min_x
col['bbox'][2] = new_w
if (col['col_nr'] == (num_cols - 1)):
new_w = (tabular_max_x - col['bbox'][0])
col['bbox'][2] = new_w
return annotations |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.