code stringlengths 101 5.91M |
|---|
class StableDiffusionInpaintPipelineLegacy(metaclass=DummyObject):
_backends = ['torch', 'transformers']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers']) |
def load_data_table(table, image_dir, corrupt_images=None):
print('Loading dataframe for', table)
df = pd.read_csv(table)
print('Found', len(df), 'images in table')
df['filepath'] = df.apply((lambda row: get_image_filepath(row, image_dir)), axis=1)
len_before = len(df)
if (corrupt_images is not None):
df = df[df.apply((lambda row: ('{}/{}'.format(row['PTID'], row['Visit']) not in corrupt_images)), axis=1)]
print('Filtered out', (len_before - len(df)), 'of', len_before, 'images because of failed preprocessing')
len_before = len(df)
df = df[map(os.path.exists, df['filepath'])]
print('Filtered out', (len_before - len(df)), 'of', len_before, 'images because of missing files')
len_before = len(df)
df = df[(df['DX'] != 'MCI')]
print('Filtered out', (len_before - len(df)), 'of', len_before, 'images that were MCI')
print('Final dataframe contains', len(df), 'images from', len(df['PTID'].unique()), 'patients')
print()
return df |
class TrialOutput(object):
def __init__(self, config, model_path):
self.config = config
self.model_path = model_path |
class Entity(object):
def __init__(self):
self.name = ''
self.size = 0.05
self.movable = False
self.collide = True
self.density = 25.0
self.color = None
self.max_speed = None
self.accel = None
self.max_a_speed = None
self.state = EntityState()
self.initial_mass = 1.0
def mass(self):
return self.initial_mass |
class ConvolutionalAutoencoder(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(3, 32, 3, padding=1)
self.conv2 = torch.nn.Conv2d(32, 64, 3, padding=1)
self.conv3 = torch.nn.Conv2d(64, 128, 3, padding=1)
self.conv4 = torch.nn.Conv2d(128, latent_code_size, 3, padding=1)
self.trans1 = torch.nn.ConvTranspose2d(latent_code_size, 128, 3, padding=1)
self.trans2 = torch.nn.ConvTranspose2d(128, 64, 3, padding=1)
self.trans3 = torch.nn.ConvTranspose2d(64, 32, 3, padding=1)
self.trans4 = torch.nn.ConvTranspose2d(32, 3, 3, padding=1)
self.mp = torch.nn.MaxPool2d(2, return_indices=True)
self.up = torch.nn.MaxUnpool2d(2)
self.relu = torch.nn.ReLU()
def encoder(self, x):
x = self.conv1(x)
x = self.relu(x)
s1 = x.size()
(x, ind1) = self.mp(x)
x = self.conv2(x)
x = self.relu(x)
s2 = x.size()
(x, ind2) = self.mp(x)
x = self.conv3(x)
x = self.relu(x)
s3 = x.size()
(x, ind3) = self.mp(x)
x = self.conv4(x)
x = self.relu(x)
return (x, ind1, s1, ind2, s2, ind3, s3)
def decoder(self, x, ind1, s1, ind2, s2, ind3, s3):
x = self.trans1(x)
x = self.relu(x)
x = self.up(x, ind3, output_size=s3)
x = self.trans2(x)
x = self.relu(x)
x = self.up(x, ind2, output_size=s2)
x = self.trans3(x)
x = self.relu(x)
x = self.up(x, ind1, output_size=s1)
x = self.trans4(x)
x = self.relu(x)
return x
def forward(self, x):
(x, ind1, s1, ind2, s2, ind3, s3) = self.encoder(x)
output = self.decoder(x, ind1, s1, ind2, s2, ind3, s3)
return output |
class CiteseerBiGraph(BaseData):
def __init__(self, data_root: Optional[str]=None) -> None:
super().__init__('citeseer_bigraph', data_root)
self._content = {'num_u_classes': 6, 'num_u_vertices': 1237, 'num_v_vertices': 742, 'num_edges': 1665, 'dim_u_features': 3703, 'dim_v_features': 3703, 'u_features': {'upon': [{'filename': 'u_features.pkl', 'md5': 'd8c1ccd6026cbb1f05cc3c534b239e00'}], 'loader': load_from_pickle, 'preprocess': [to_tensor, partial(norm_ft, ord=1)]}, 'v_features': {'upon': [{'filename': 'v_features.pkl', 'md5': '7ca1d16ad557945f9b66ef6ac40c0210'}], 'loader': load_from_pickle, 'preprocess': [to_tensor, partial(norm_ft, ord=1)]}, 'edge_list': {'upon': [{'filename': 'edge_list.pkl', 'md5': '2a632085fb8f691af6399fbb71dc1f67'}], 'loader': load_from_pickle}, 'u_labels': {'upon': [{'filename': 'u_labels.pkl', 'md5': 'b4d0034c29f6f5b6da17f3037c2af605'}], 'loader': load_from_pickle, 'preprocess': [to_long_tensor]}} |
def make_chem_data(rule, train=True):
is_train = ('train' if train else 'test')
x_data = np.load(osp.join(foldername, (((('logic_' + str(rule)) + '_X_') + is_train) + '.npy')))
y_data = np.load(osp.join(foldername, (((('logic_' + str(rule)) + '_Y_') + is_train) + '.npy')))
return (x_data, y_data) |
def test_pvt():
with pytest.raises(TypeError):
PyramidVisionTransformer(pretrained=123)
with pytest.raises(AssertionError):
PyramidVisionTransformer(pretrain_img_size=(224, 224, 224))
temp = torch.randn((1, 3, 224, 224))
model = PyramidVisionTransformer(pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformer()
outs = model(temp)
assert (outs[0].shape == (1, 64, 8, 8))
assert (outs[1].shape == (1, 128, 4, 4))
assert (outs[2].shape == (1, 320, 2, 2))
assert (outs[3].shape == (1, 512, 1, 1))
temp = torch.randn((1, 3, 33, 33))
model = PyramidVisionTransformer()
outs = model(temp)
assert (outs[0].shape == (1, 64, 8, 8))
assert (outs[1].shape == (1, 128, 4, 4))
assert (outs[2].shape == (1, 320, 2, 2))
assert (outs[3].shape == (1, 512, 1, 1))
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformer()
outs = model(temp)
assert (outs[0].shape == (1, 64, 28, 34))
assert (outs[1].shape == (1, 128, 14, 17))
assert (outs[2].shape == (1, 320, 7, 8))
assert (outs[3].shape == (1, 512, 3, 4)) |
def seresnet200b(**kwargs):
return get_seresnet(blocks=200, conv1_stride=False, model_name='seresnet200b', **kwargs) |
.parametrize('model_name, with_cls_token, share_embeddings, return_dataframe', [('saint', False, True, False), ('saint', True, True, False), ('saint', False, False, False), ('saint', False, True, True), ('saint', True, True, True), ('saint', False, False, True), ('fttransformer', False, True, False), ('fttransformer', True, True, False), ('fttransformer', False, False, False), ('fttransformer', False, True, True), ('fttransformer', True, True, True), ('fttransformer', False, False, True), ('tabfastformer', False, True, False), ('tabfastformer', True, True, False), ('tabfastformer', False, False, False), ('tabfastformer', False, True, True), ('tabfastformer', True, True, True), ('tabfastformer', False, False, True), ('tabperceiver', False, True, False), ('tabperceiver', False, False, False), ('tabperceiver', False, True, False), ('tabperceiver', False, False, True)])
def test_transformer_family_models(model_name, with_cls_token, share_embeddings, return_dataframe):
embed_cols = ['a', 'b']
cont_cols = ['c', 'd']
tab_preprocessor = TabPreprocessor(cat_embed_cols=embed_cols, continuous_cols=cont_cols, for_transformer=True, with_cls_token=with_cls_token, shared_embed=share_embeddings)
X_tab = tab_preprocessor.fit_transform(df_init)
params = {'column_idx': tab_preprocessor.column_idx, 'cat_embed_input': tab_preprocessor.cat_embed_input, 'continuous_cols': tab_preprocessor.continuous_cols}
deeptabular = _build_model(model_name, params)
model = WideDeep(deeptabular=deeptabular)
t2v = Tab2Vec(model, tab_preprocessor, return_dataframe=return_dataframe)
t2v_out = t2v.transform(df_t2v)
out_dim = ((len(embed_cols) + len(cont_cols)) * deeptabular.input_dim)
assert (t2v_out.shape[1] == out_dim) |
class TFAlbertModelTester():
def __init__(self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, embedding_size=16, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None):
self.parent = parent
self.batch_size = 13
self.seq_length = 7
self.is_training = True
self.use_input_mask = True
self.use_token_type_ids = True
self.use_labels = True
self.vocab_size = 99
self.embedding_size = 16
self.hidden_size = 32
self.num_hidden_layers = 5
self.num_attention_heads = 4
self.intermediate_size = 37
self.hidden_act = 'gelu'
self.hidden_dropout_prob = 0.1
self.attention_probs_dropout_prob = 0.1
self.max_position_embeddings = 512
self.type_vocab_size = 16
self.type_sequence_label_size = 2
self.initializer_range = 0.02
self.num_labels = 3
self.num_choices = 4
self.scope = None
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = AlbertConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, embedding_size=self.embedding_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range)
return (config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels)
def create_and_check_albert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFAlbertModel(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
result = model(inputs)
inputs = [input_ids, input_mask]
result = model(inputs)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def create_and_check_albert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
config.num_labels = self.num_labels
model = TFAlbertForPreTraining(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape, (self.batch_size, self.num_labels))
def create_and_check_albert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFAlbertForMaskedLM(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def create_and_check_albert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
config.num_labels = self.num_labels
model = TFAlbertForSequenceClassification(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels))
def create_and_check_albert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
model = TFAlbertForQuestionAnswering(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
result = model(inputs)
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def create_and_check_albert_for_multiple_choice(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
config.num_choices = self.num_choices
model = TFAlbertForMultipleChoice(config=config)
multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1))
multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1))
multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1))
inputs = {'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result['logits'].shape), [self.batch_size, self.num_choices])
def create_and_check_albert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels):
config.num_labels = self.num_labels
model = TFAlbertForTokenClassification(config=config)
inputs = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
result = model(inputs)
self.parent.assertListEqual(list(result['logits'].shape), [self.batch_size, self.seq_length, self.num_labels])
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return (config, inputs_dict) |
def _pcfg(url='', hf_hub='', mean=None, std=None):
return dict(url=url, hf_hub=hf_hub, mean=mean, std=std) |
def autoselect(method: str, source: Optional[str]=None, backend: Optional[str]=None, **kwargs) -> StainNormalizer:
if (backend is None):
backend = sf.backend()
if (backend == 'tensorflow'):
import slideflow.norm.tensorflow
BackendNormalizer = sf.norm.tensorflow.TensorflowStainNormalizer
elif (backend == 'torch'):
import slideflow.norm.torch
BackendNormalizer = sf.norm.torch.TorchStainNormalizer
elif (backend == 'opencv'):
BackendNormalizer = StainNormalizer
else:
raise errors.UnrecognizedBackendError
if (method in BackendNormalizer.normalizers):
normalizer = BackendNormalizer(method, **kwargs)
else:
normalizer = StainNormalizer(method, **kwargs)
if ((source is not None) and (source != 'dataset')):
normalizer.fit(source)
return normalizer |
def get_hashes_and_lines(raw_line):
hash = hashlib.md5(raw_line).hexdigest()
return (hash, raw_line) |
def plot_precision_recall(data, ax=None):
df = pandas.DataFrame({'threshold': numpy.linspace(0, 1.0, 50, endpoint=False)})
micro = df.apply((lambda r: score(data, average='micro', threshold=r.threshold)), axis=1)
micro['threshold'] = df.threshold
micro['micro'] = micro.precision
macro = df.apply((lambda r: score(data, average='macro', threshold=r.threshold)), axis=1)
macro['threshold'] = df.threshold
macro['macro'] = macro.precision
(fig, (ax2, ax)) = plt.subplots(1, 2, figsize=(8, 4))
macro.plot.line(ax=ax2, y=['precision', 'recall'], x='threshold', ylim=(0.0, 1.0))
micro.plot.line(ax=ax, y='micro', x='recall', ylim=(0.0, 1.0), xlim=(0, 1))
macro.plot.line(ax=ax, y='macro', x='recall', ylim=(0.0, 1.0), xlim=(0, 1))
ax.set_yticks(numpy.arange(0.0, 1.0, 0.1))
ax.grid(True)
ax.set_ylabel('Precision')
ax.set_xlabel('Recall')
ax.set_aspect('equal')
ax2.set_aspect('equal')
ax2.set_yticks(numpy.arange(0.0, 1.0, 0.1))
ax2.grid(True)
ax2.set_xlabel('Probability threshold for "unknown" class')
ax2.set_ylabel('Performance metric')
fig.tight_layout() |
def hawq_top(fp32_model, q_model, dataloader, criterion, enable_act):
orig_eval = True
if fp32_model.training:
orig_eval = False
fp32_model.eval()
ht = HessianTrace(fp32_model, dataloader=dataloader, q_model=q_model)
traces = ht.get_avg_traces(enable_act, num_sample=0)
op_to_traces = traces['weight']
q_model_state_dict = {}
for key in q_model.state_dict().keys():
length = len('_model.')
new_key = key[length:]
q_model_state_dict[new_key] = q_model.state_dict()[key]
weight_quant_loss = compare_weights(ht.model.state_dict(), q_model_state_dict)
pertur_lst = {}
for key in weight_quant_loss:
op_float_tensor = weight_quant_loss[key]['float']
op_qnt_tensor = weight_quant_loss[key]['quantized'].dequantize()
diff_l2 = (torch.norm((op_float_tensor - op_qnt_tensor), p=2) ** 2)
pertur_lst[key] = diff_l2
if enable_act:
act_to_traces = traces['activation']
for (trace_i, pertur_i, act_i) in zip(op_to_traces.keys(), pertur_lst.keys(), act_to_traces.keys()):
op_to_traces[trace_i] = ((pertur_lst[pertur_i] * op_to_traces[trace_i]) + act_to_traces[act_i])
else:
for (trace_i, pertur_i) in zip(op_to_traces.keys(), pertur_lst.keys()):
op_to_traces[trace_i] = op_to_traces[trace_i]
if (orig_eval is False):
fp32_model.train()
return op_to_traces |
def get_random_walk_eval(sos_key, nodes, edges, nsample=5):
group = []
cnt = 0
while (cnt <= nsample):
rand_1 = random_walk_from_sos(sos_key, nodes, edges)
sample_sent1 = tokenizer.decode(rand_1, skip_special_tokens=True)
group.append(sample_sent1)
cnt += 1
stat = {}
b = self_bleu(group)
ed_dis = self_edit_distance(group)
stat['rand_walk_self_bleu'] = b
stat['rand_walk_self_edit'] = ed_dis
return stat |
def _conv_flops_compute(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1):
assert ((weight.shape[1] * groups) == input.shape[1])
batch_size = input.shape[0]
in_channels = input.shape[1]
out_channels = weight.shape[0]
kernel_dims = list(weight.shape[2:])
input_dims = list(input.shape[2:])
length = len(input_dims)
paddings = (padding if (type(padding) is tuple) else ((padding,) * length))
strides = (stride if (type(stride) is tuple) else ((stride,) * length))
dilations = (dilation if (type(dilation) is tuple) else ((dilation,) * length))
output_dims = []
for (idx, input_dim) in enumerate(input_dims):
output_dim = ((((input_dim + (2 * paddings[idx])) - ((dilations[idx] * (kernel_dims[idx] - 1)) + 1)) // strides[idx]) + 1)
output_dims.append(output_dim)
filters_per_channel = (out_channels // groups)
conv_per_position_macs = ((int(_prod(kernel_dims)) * in_channels) * filters_per_channel)
active_elements_count = (batch_size * int(_prod(output_dims)))
overall_conv_macs = (conv_per_position_macs * active_elements_count)
overall_conv_flops = (2 * overall_conv_macs)
bias_flops = 0
if (bias is not None):
bias_flops = (out_channels * active_elements_count)
return (int((overall_conv_flops + bias_flops)), int(overall_conv_macs)) |
class AdversarialEpocher(SemiSupervisedEpocher, ABC):
def _assertion(self):
pass
def __init__(self, *, model: nn.Module, optimizer: T_optim, labeled_loader: T_loader, unlabeled_loader: T_loader, sup_criterion: T_loss, num_batches: int, cur_epoch=0, device='cpu', two_stage: bool=False, disable_bn: bool=False, discriminator=None, discr_optimizer=None, reg_weight=None, dis_consider_image: bool, **kwargs) -> None:
super().__init__(model=model, optimizer=optimizer, labeled_loader=labeled_loader, unlabeled_loader=unlabeled_loader, sup_criterion=sup_criterion, num_batches=num_batches, cur_epoch=cur_epoch, device=device, two_stage=two_stage, disable_bn=disable_bn, **kwargs)
self._discriminator = discriminator
self._discr_optimizer = discr_optimizer
self._reg_weight = float(reg_weight)
assert isinstance(discriminator, nn.Module)
assert isinstance(discr_optimizer, torch.optim.Optimizer)
self._dis_consider_image = dis_consider_image
def _run(self, **kwargs):
self.meters['lr'].add(get_lrs_from_optimizer(self._optimizer))
self._model.train()
return self._run_adver(**kwargs)
def configure_meters(self, meters: MeterInterface) -> MeterInterface:
meters = super(AdversarialEpocher, self).configure_meters(meters)
meters.delete_meter('reg_loss')
with self.meters.focus_on('adv_reg'):
meters.register_meter('dis_loss', AverageValueMeter())
meters.register_meter('gen_loss', AverageValueMeter())
meters.register_meter('reg_weight', AverageValueMeter())
return meters
def _run_adver(self, **kwargs):
criterion = nn.BCELoss()
TRUE_LABEL = 1.0
FAKE_LABEL = 0.0
optimizerD = self._discr_optimizer
optimizerG = self._optimizer
with self.meters.focus_on('adv_reg'):
self.meters['reg_weight'].add(self._reg_weight)
for (self.cur_batch_num, labeled_data) in zip(self.indicator, self._labeled_loader):
((labeled_image, _), labeled_target, labeled_filename, _, label_group) = self._unzip_data(labeled_data, self._device)
if (self._reg_weight > 0):
unlabeled_data = next(self.unlabeled_iter)
((unlabeled_image, _), _, unlabeled_filename, unl_partition, unl_group) = self._unzip_data(unlabeled_data, self._device)
if (self.cur_batch_num < 5):
if (self._reg_weight > 0):
logger.trace(f"{self.__class__.__name__}--cur_batch:{self.cur_batch_num}, labeled_filenames: {','.join(labeled_filename)}, unlabeled_filenames: {','.join(unlabeled_filename)}")
else:
logger.trace(f"{self.__class__.__name__}--cur_batch:{self.cur_batch_num}, labeled_filenames: {','.join(labeled_filename)}")
self._optimizer.zero_grad()
labeled_logits = self._model(labeled_image)
onehot_target = class2one_hot(labeled_target.squeeze(1), self.num_classes)
sup_loss = self._sup_criterion(labeled_logits.softmax(1), onehot_target)
generator_err = torch.tensor(0, device=self.device, dtype=torch.float)
if (self._reg_weight > 0):
unlabeled_logits = self._model(unlabeled_image)
if self._dis_consider_image:
discr_output_unlabeled = self._discriminator(torch.cat([unlabeled_image, unlabeled_logits.softmax(1)], dim=1))
else:
discr_output_unlabeled = self._discriminator(unlabeled_logits.softmax(1))
generator_err = criterion(discr_output_unlabeled, torch.zeros_like(discr_output_unlabeled).fill_(TRUE_LABEL))
generator_loss = (sup_loss + (self._reg_weight * generator_err))
generator_loss.backward()
optimizerG.step()
if self.on_master():
with torch.no_grad():
self.meters['sup_loss'].add(sup_loss.item())
self.meters['sup_dice'].add(labeled_logits.max(1)[1], labeled_target.squeeze(1), group_name=label_group)
with self.meters.focus_on('adv_reg'):
self.meters['gen_loss'].add(generator_err.item())
disc_loss = torch.tensor(0, device=self.device, dtype=torch.float)
if (self._reg_weight > 0):
self._discriminator.zero_grad()
if self._dis_consider_image:
discr_output_labeled = self._discriminator(torch.cat([labeled_image, labeled_logits.detach().softmax(1)], dim=1))
else:
discr_output_labeled = self._discriminator(labeled_logits.detach().softmax(1))
discr_err_labeled = criterion(discr_output_labeled, torch.zeros_like(discr_output_labeled).fill_(TRUE_LABEL))
if self._dis_consider_image:
discr_output_unlabeled = self._discriminator(torch.cat([unlabeled_image, unlabeled_logits.detach().softmax(1)], dim=1))
else:
discr_output_unlabeled = self._discriminator(unlabeled_logits.detach().softmax(1))
discr_err_unlabeled = criterion(discr_output_unlabeled, torch.zeros_like(discr_output_unlabeled).fill_(FAKE_LABEL))
disc_loss = (discr_err_labeled + discr_err_unlabeled)
(disc_loss * self._reg_weight).backward()
optimizerD.step()
if self.on_master():
with self.meters.focus_on('adv_reg'):
self.meters['dis_loss'].add(disc_loss.item())
report_dict = self.meters.statistics()
self.indicator.set_postfix_statics(report_dict, cache_time=10)
_cache()
def unlabeled_iter(self):
return iter(self._unlabeled_loader) |
class TestCommutationAnalysis(QiskitTestCase):
def setUp(self):
self.pass_ = CommutationAnalysis()
self.pset = self.pass_.property_set = PropertySet()
def assertCommutationSet(self, result, expected):
result_to_compare = {}
for (qbit_str, sets) in result.items():
if (not isinstance(qbit_str, str)):
continue
result_to_compare[qbit_str] = []
for commutation_set in sets:
result_to_compare[qbit_str].append(sorted([node._node_id for node in commutation_set]))
for (qbit_str, sets) in expected.items():
for commutation_set in sets:
commutation_set.sort()
self.assertDictEqual(result_to_compare, expected)
def test_commutation_set_property_is_created(self):
qr = QuantumRegister(3, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr)
dag = circuit_to_dag(circuit)
self.assertIsNone(self.pset['commutation_set'])
self.pass_.run(dag)
self.assertIsNotNone(self.pset['commutation_set'])
def test_all_gates(self):
qr = QuantumRegister(2, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.x(qr[0])
circuit.y(qr[0])
circuit.t(qr[0])
circuit.s(qr[0])
circuit.rz(0.5, qr[0])
circuit.u1(0.5, qr[0])
circuit.u2(0.5, 0.6, qr[0])
circuit.u3(0.5, 0.6, 0.7, qr[0])
circuit.cx(qr[0], qr[1])
circuit.cy(qr[0], qr[1])
circuit.cz(qr[0], qr[1])
dag = circuit_to_dag(circuit)
self.pass_.run(dag)
expected = {'qr[0]': [[1], [5], [6], [7], [8, 9, 10, 11], [12], [13], [14], [15], [16], [2]], 'qr[1]': [[3], [14], [15], [16], [4]]}
self.assertCommutationSet(self.pset['commutation_set'], expected)
def test_non_commutative_circuit(self):
qr = QuantumRegister(3, 'qr')
circuit = QuantumCircuit(qr)
circuit.h(qr)
dag = circuit_to_dag(circuit)
self.pass_.run(dag)
expected = {'qr[0]': [[1], [7], [2]], 'qr[1]': [[3], [8], [4]], 'qr[2]': [[5], [9], [6]]}
self.assertCommutationSet(self.pset['commutation_set'], expected)
def test_non_commutative_circuit_2(self):
qr = QuantumRegister(3, 'qr')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.h(qr[2])
circuit.cx(qr[1], qr[2])
dag = circuit_to_dag(circuit)
self.pass_.run(dag)
expected = {'qr[0]': [[1], [7], [2]], 'qr[1]': [[3], [7], [9], [4]], 'qr[2]': [[5], [8], [9], [6]]}
self.assertCommutationSet(self.pset['commutation_set'], expected)
def test_commutative_circuit(self):
qr = QuantumRegister(3, 'qr')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.h(qr[2])
circuit.cx(qr[2], qr[1])
dag = circuit_to_dag(circuit)
self.pass_.run(dag)
expected = {'qr[0]': [[1], [7], [2]], 'qr[1]': [[3], [7, 9], [4]], 'qr[2]': [[5], [8], [9], [6]]}
self.assertCommutationSet(self.pset['commutation_set'], expected)
def test_commutative_circuit_2(self):
qr = QuantumRegister(3, 'qr')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.z(qr[0])
circuit.h(qr[2])
circuit.cx(qr[2], qr[1])
dag = circuit_to_dag(circuit)
self.pass_.run(dag)
expected = {'qr[0]': [[1], [7, 8], [2]], 'qr[1]': [[3], [7, 10], [4]], 'qr[2]': [[5], [9], [10], [6]]}
self.assertCommutationSet(self.pset['commutation_set'], expected)
def test_commutative_circuit_3(self):
qr = QuantumRegister(3, 'qr')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.h(qr[2])
circuit.z(qr[0])
circuit.cx(qr[2], qr[1])
circuit.cx(qr[0], qr[1])
circuit.x(qr[2])
circuit.z(qr[0])
circuit.cx(qr[1], qr[2])
dag = circuit_to_dag(circuit)
self.pass_.run(dag)
expected = {'qr[0]': [[1], [7, 9, 11, 13], [2]], 'qr[1]': [[3], [7, 10, 11], [14], [4]], 'qr[2]': [[5], [8], [10], [12, 14], [6]]}
self.assertCommutationSet(self.pset['commutation_set'], expected)
def test_jordan_wigner_type_circuit(self):
qr = QuantumRegister(6, 'qr')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.cx(qr[1], qr[2])
circuit.cx(qr[2], qr[3])
circuit.cx(qr[3], qr[4])
circuit.cx(qr[4], qr[5])
circuit.z(qr[5])
circuit.cx(qr[4], qr[5])
circuit.cx(qr[3], qr[4])
circuit.cx(qr[2], qr[3])
circuit.cx(qr[1], qr[2])
circuit.cx(qr[0], qr[1])
dag = circuit_to_dag(circuit)
self.pass_.run(dag)
expected = {'qr[0]': [[1], [13, 23], [2]], 'qr[1]': [[3], [13], [14, 22], [23], [4]], 'qr[2]': [[5], [14], [15, 21], [22], [6]], 'qr[3]': [[7], [15], [16, 20], [21], [8]], 'qr[4]': [[9], [16], [17, 19], [20], [10]], 'qr[5]': [[11], [17], [18], [19], [12]]}
self.assertCommutationSet(self.pset['commutation_set'], expected)
def test_all_commute_circuit(self):
qr = QuantumRegister(5, 'qr')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.cx(qr[2], qr[1])
circuit.cx(qr[4], qr[3])
circuit.cx(qr[2], qr[3])
circuit.z(qr[0])
circuit.z(qr[4])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[2], qr[1])
circuit.cx(qr[4], qr[3])
circuit.cx(qr[2], qr[3])
dag = circuit_to_dag(circuit)
self.pass_.run(dag)
expected = {'qr[0]': [[1], [11, 15, 17], [2]], 'qr[1]': [[3], [11, 12, 17, 18], [4]], 'qr[2]': [[5], [12, 14, 18, 20], [6]], 'qr[3]': [[7], [13, 14, 19, 20], [8]], 'qr[4]': [[9], [13, 16, 19], [10]]}
self.assertCommutationSet(self.pset['commutation_set'], expected) |
class RandomColorDistortion():
def __init__(self, s: float=1.0):
self.color_distort = compose_color_distortion(s=s)
def __call__(self, x):
return self.color_distort(x) |
def train(cfg, local_rank, distributed, tblogger=None, transfer_weight=False, adjust_lr=False, skip_val=False, no_head=False):
model = build_detection_model(cfg)
device = torch.device('cuda')
model.to(device)
optimizer = make_optimizer(cfg, model)
scheduler = make_lr_scheduler(cfg, optimizer)
if distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank], output_device=local_rank, broadcast_buffers=False, find_unused_parameters=True)
arguments = {}
arguments['iteration'] = 0
arguments['person_pool'] = MemoryPool()
output_dir = cfg.OUTPUT_DIR
save_to_disk = (get_rank() == 0)
checkpointer = ActionCheckpointer(cfg, model, optimizer, scheduler, output_dir, save_to_disk)
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, model_weight_only=transfer_weight, adjust_scheduler=adjust_lr, no_head=no_head)
arguments.update(extra_checkpoint_data)
data_loader = make_data_loader(cfg, is_train=True, is_distributed=distributed, start_iter=arguments['iteration'])
checkpoint_period = cfg.SOLVER.CHECKPOINT_PERIOD
val_period = cfg.SOLVER.EVAL_PERIOD
mem_active = has_memory(cfg.MODEL.HIT_STRUCTURE)
if (not skip_val):
dataset_names_val = cfg.DATASETS.TEST
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed)
else:
dataset_names_val = []
data_loaders_val = []
do_train(model, data_loader, optimizer, scheduler, checkpointer, device, checkpoint_period, arguments, tblogger, val_period, dataset_names_val, data_loaders_val, distributed, mem_active)
return model |
class Discriminator(nn.Module):
def __init__(self, preprocess_GAN_mode, input_channel, batch_size=64, image_size=64, conv_dim=64):
super(Discriminator, self).__init__()
self.imsize = image_size
layer1 = []
layer2 = []
layer3 = []
last = []
layer1.append(SpectralNorm(nn.Conv2d(input_channel, conv_dim, 4, 2, 1)))
layer1.append(nn.LeakyReLU(0.1))
curr_dim = conv_dim
layer2.append(SpectralNorm(nn.Conv2d(curr_dim, (curr_dim * 2), 4, 2, 1)))
layer2.append(nn.LeakyReLU(0.1))
curr_dim = (curr_dim * 2)
layer3.append(SpectralNorm(nn.Conv2d(curr_dim, (curr_dim * 2), 4, 2, 1)))
layer3.append(nn.LeakyReLU(0.1))
curr_dim = (curr_dim * 2)
if (self.imsize == 65):
layer4 = []
layer4.append(SpectralNorm(nn.Conv2d(curr_dim, (curr_dim * 2), 4, 2, 1)))
layer4.append(nn.LeakyReLU(0.1))
self.l4 = nn.Sequential(*layer4)
curr_dim = (curr_dim * 2)
self.l1 = nn.Sequential(*layer1)
self.l2 = nn.Sequential(*layer2)
self.l3 = nn.Sequential(*layer3)
last.append(nn.Conv2d(curr_dim, 1, 4))
self.last = nn.Sequential(*last)
self.attn1 = Self_Attn(256, 'relu')
self.attn2 = Self_Attn(512, 'relu')
if (preprocess_GAN_mode == 1):
self.preprocess_additional = nn.BatchNorm2d(input_channel)
elif (preprocess_GAN_mode == 2):
self.preprocess_additional = nn.Tanh()
elif (preprocess_GAN_mode == 3):
self.preprocess_additional = (lambda x: (2 * ((x / 255) - 0.5)))
else:
raise ValueError('preprocess_GAN_mode should be 1:bn or 2:tanh or 3:-1 - 1')
def forward(self, x):
x = self.preprocess_additional(x)
out = self.l1(x)
out = self.l2(out)
out = self.l3(out)
(out, p1) = self.attn1(out)
out = self.l4(out)
(out, p2) = self.attn2(out)
out = self.last(out)
return [out, p1, p2] |
def custom_figure(n_panels=2, width=8.0, panel_aspect_ratio=1.0, extra_top_space=False, reduce_vertical_sep=False):
if isinstance(n_panels, collections.Sequence):
(n_panels_h, n_panels_v) = n_panels
else:
n_panels_h = n_panels
n_panels_v = 1
_margin_t_absolute = (margin_t_absolute_extra if extra_top_space else margin_t_absolute)
margin_l = (margin_l_absolute / width)
margin_r = (margin_r_absolute / width)
margin_l_subsequent = margin_l
if (n_panels_h > 2):
margin_l_subsequent = margin_r
margin_sep = (margin_sep_absolute / width)
if (n_panels_h > 2):
margin_sep = 0
margin_sep_total = ((margin_r + margin_sep) + margin_l_subsequent)
panel_width = ((((1.0 - margin_l) - margin_r) - ((n_panels_h - 1) * margin_sep_total)) / n_panels_h)
wspace = (margin_sep_total / panel_width)
panel_height_absolute = ((panel_width * width) / panel_aspect_ratio)
height = ((n_panels_v * ((panel_height_absolute + _margin_t_absolute) + margin_b_absolute)) + ((n_panels_v - 1) * margin_sep_absolute))
panel_height = (panel_height_absolute / height)
margin_t = (_margin_t_absolute / height)
margin_b = (margin_b_absolute / height)
if reduce_vertical_sep:
margin_sep_total = (margin_sep_small_absolute / height)
else:
margin_sep_total = ((margin_t + margin_b) + (margin_sep_absolute / height))
hspace = (margin_sep_total / panel_height)
fig = plt.figure(figsize=(width, height))
plt.subplots_adjust(left=margin_l, right=(1.0 - margin_r), bottom=margin_b, top=(1.0 - margin_t), wspace=wspace, hspace=hspace)
return fig |
def get_fbank(path_or_fp: Union[(str, BinaryIO)], n_bins=80) -> np.ndarray:
(sound, sample_rate) = get_waveform(path_or_fp, normalization=False)
features = _get_kaldi_fbank(sound, sample_rate, n_bins)
if (features is None):
features = _get_torchaudio_fbank(sound, sample_rate, n_bins)
if (features is None):
raise ImportError('Please install pyKaldi or torchaudio to enable online filterbank feature extraction')
return features |
def load_classifier(name='resnet101', n=2):
model = torchvision.models.__dict__[name](pretrained=True)
filters = model.fc.weight.shape[1]
model.fc.bias = nn.Parameter(torch.zeros(n), requires_grad=True)
model.fc.weight = nn.Parameter(torch.zeros(n, filters), requires_grad=True)
model.fc.out_features = n
return model |
def initialize_hyperparameters(PATHS: dict, load_target: str, config_name: str='default', n_envs: int=1) -> dict:
if (load_target is None):
hyperparams = load_hyperparameters_json(PATHS=PATHS, from_scratch=True, config_name=config_name)
hyperparams['agent_name'] = PATHS['model'].split('/')[(- 1)]
else:
hyperparams = load_hyperparameters_json(PATHS=PATHS)
check_batch_size(n_envs, hyperparams['batch_size'], hyperparams['m_batch_size'])
hyperparams['n_steps'] = int((hyperparams['batch_size'] / n_envs))
write_hyperparameters_json(hyperparams, PATHS)
print_hyperparameters(hyperparams)
return hyperparams |
def timed_run(f: FunctionType, timeout_seconds: int=3600) -> Tuple[(Any, Number, bool)]:
start_time = time.time()
with Timeout(timeout_seconds) as timeout_ctx:
res = f()
duration = (time.time() - start_time)
timed_out = (timeout_ctx.state == timeout_ctx.TIMED_OUT)
if timed_out:
res = None
return (res, duration, timed_out) |
_arguments_as_properties('lost_lang', 'known_lang', 'capacity', 'num_cognates')
class Evaluator():
def __init__(self, model, data_loader):
self.model = model
self.data_loader = data_loader
self._settings = list()
def add_setting(self, mode=None, edit=None):
assert (mode in ['mle', 'flow'])
assert (edit in [True, False])
lost_size = self.data_loader.size(self.lost_lang)
known_size = self.data_loader.size(self.known_lang)
if (mode == 'mle'):
self._settings.append(EvalSetting(self.lost_lang, self.known_lang, lost_size, known_size, mode, None, None))
else:
for c in self.capacity:
self._settings.append(EvalSetting(self.lost_lang, self.known_lang, lost_size, known_size, mode, edit, c))
def __str__(self):
table = pt()
table.field_names = ('lost', 'known', 'lost_size', 'known_size', 'mode', 'edit', 'capacity')
for s in self._settings:
table.add_row([getattr(s, field) for field in table.field_names])
table.align = 'l'
return str(table)
def evaluate(self, epoch, num_cognates):
self.model.eval()
table = pt()
table.field_names = ('lost', 'known', 'mode', 'edit', 'capacity', 'score')
eval_scores = dict()
for s in self._settings:
batch = self.data_loader.entire_batch
model_ret = self.model(batch, mode=s.mode, num_cognates=num_cognates, edit=s.edit, capacity=s.capacity)
almt = (model_ret.valid_log_probs if (s.mode == 'mle') else model_ret.flow)
preds = almt.get_best()
acc = self._evaluate_one_setting(preds)
score = (acc / len(preds))
fmt_score = f'{acc}/{len(preds)}={score:.3f}'
table.add_row(([getattr(s, field) for field in table.field_names[:(- 1)]] + [fmt_score]))
eval_scores[str(s)] = score
table.align = 'l'
table.title = f'Epoch: {epoch}'
log_pp(table)
return eval_scores
def _evaluate_one_setting(self, preds):
acc = 0
for (lost, known) in preds.items():
if is_cognate(lost, known):
acc += 1
return acc |
def _handle_path(path, sess, low_profile=False):
if isinstance(path, str):
f = np.load(path)
(m, s) = (f['mu'][:], f['sigma'][:])
f.close()
else:
files = path
if low_profile:
(m, s) = calculate_activation_statistics_from_files(files, sess)
else:
x = path
(m, s) = calculate_activation_statistics(x, sess)
del x
return (m, s) |
def num_dependent_clauses(const_pt):
dep_clauses = []
clause_tags = None
if (settings.LANGUAGE in ['zh-hant', 'fr']):
lang = settings.LANGUAGE
else:
lang = 'default'
clause_tags = SUBORD_CLAUSE_LANGUAGE_MAP[lang]
for clause_tag in clause_tags:
for leaf in _leaves(const_pt, clause_tag):
dep_clauses.append(leaf.leaves())
return len(dep_clauses) |
class ConfigTester(unittest.TestCase):
def test_outputs_single_attribute(self):
outputs = CustomOutput(images=np.random.rand(1, 3, 4, 4))
assert isinstance(outputs.images, np.ndarray)
assert (outputs.images.shape == (1, 3, 4, 4))
assert isinstance(outputs['images'], np.ndarray)
assert (outputs['images'].shape == (1, 3, 4, 4))
assert isinstance(outputs[0], np.ndarray)
assert (outputs[0].shape == (1, 3, 4, 4))
outputs = CustomOutput(images=[PIL.Image.new('RGB', (4, 4))])
assert isinstance(outputs.images, list)
assert isinstance(outputs.images[0], PIL.Image.Image)
assert isinstance(outputs['images'], list)
assert isinstance(outputs['images'][0], PIL.Image.Image)
assert isinstance(outputs[0], list)
assert isinstance(outputs[0][0], PIL.Image.Image)
def test_outputs_dict_init(self):
outputs = CustomOutput({'images': np.random.rand(1, 3, 4, 4)})
assert isinstance(outputs.images, np.ndarray)
assert (outputs.images.shape == (1, 3, 4, 4))
assert isinstance(outputs['images'], np.ndarray)
assert (outputs['images'].shape == (1, 3, 4, 4))
assert isinstance(outputs[0], np.ndarray)
assert (outputs[0].shape == (1, 3, 4, 4))
outputs = CustomOutput({'images': [PIL.Image.new('RGB', (4, 4))]})
assert isinstance(outputs.images, list)
assert isinstance(outputs.images[0], PIL.Image.Image)
assert isinstance(outputs['images'], list)
assert isinstance(outputs['images'][0], PIL.Image.Image)
assert isinstance(outputs[0], list)
assert isinstance(outputs[0][0], PIL.Image.Image)
def test_outputs_serialization(self):
outputs_orig = CustomOutput(images=[PIL.Image.new('RGB', (4, 4))])
serialized = pkl.dumps(outputs_orig)
outputs_copy = pkl.loads(serialized)
assert (dir(outputs_orig) == dir(outputs_copy))
assert (dict(outputs_orig) == dict(outputs_copy))
assert (vars(outputs_orig) == vars(outputs_copy))
_torch
def test_torch_pytree(self):
import torch
import torch.utils._pytree
data = np.random.rand(1, 3, 4, 4)
x = CustomOutput(images=data)
self.assertFalse(torch.utils._pytree._is_leaf(x))
expected_flat_outs = [data]
expected_tree_spec = torch.utils._pytree.TreeSpec(CustomOutput, ['images'], [torch.utils._pytree.LeafSpec()])
(actual_flat_outs, actual_tree_spec) = torch.utils._pytree.tree_flatten(x)
self.assertEqual(expected_flat_outs, actual_flat_outs)
self.assertEqual(expected_tree_spec, actual_tree_spec)
unflattened_x = torch.utils._pytree.tree_unflatten(actual_flat_outs, actual_tree_spec)
self.assertEqual(x, unflattened_x) |
def update_config(config, data_sets):
config.max_num_sents = 0
config.max_sent_size = 0
config.max_ques_size = 0
config.max_ques_sub_size = 0
config.max_word_size = 0
config.max_para_size = 0
for data_set in data_sets:
data = data_set.data
shared = data_set.shared
for idx in data_set.valid_idxs:
q = data['q'][idx]
sents = data['x'][idx]
config.max_para_size = max(config.max_para_size, sum(map(len, sents)))
config.max_num_sents = max(config.max_num_sents, len(sents))
config.max_sent_size = max(config.max_sent_size, max(map(len, sents)))
config.max_word_size = max(config.max_word_size, max((len(word) for sent in sents for word in sent)))
if (len(q) > 0):
config.max_ques_size = max(config.max_ques_size, len(q))
config.max_word_size = max(config.max_word_size, max((len(word) for word in q)))
if (config.mode == 'train'):
config.max_num_sents = min(config.max_num_sents, config.num_sents_th)
config.max_sent_size = min(config.max_sent_size, config.sent_size_th)
config.max_para_size = min(config.max_para_size, config.para_size_th)
config.max_word_size = min(config.max_word_size, config.word_size_th)
config.char_vocab_size = len(data_sets[0].shared['char2idx'])
config.word_emb_size = len(next(iter(data_sets[0].shared['word2vec'].values())))
config.word_vocab_size = len(data_sets[0].shared['word2idx'])
if config.single:
config.max_num_sents = 1
if config.squash:
config.max_sent_size = config.max_para_size
config.max_num_sents = 1 |
def render_batch(visualize_fn, input, target, output):
batch_size = input.shape[0]
(fig, axes) = plt.subplots(nrows=batch_size, ncols=3, figsize=(12, (4 * batch_size)))
plt.subplots_adjust(left=0.05, bottom=0, right=0.95, top=1, hspace=0)
for i in range(batch_size):
ax = (axes if (batch_size == 1) else axes[i])
ax[0].imshow(input[i])
visualize_fn(ax[1], input[i], target[i])
visualize_fn(ax[2], input[i], output[i])
fig.canvas.draw()
data = (np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') / 255.0)
data = data.reshape((fig.canvas.get_width_height()[::(- 1)] + (3,)))
plt.close(fig)
return data |
def create_model(arch, heads, head_conv):
num_layers = (int(arch[(arch.find('_') + 1):]) if ('_' in arch) else 0)
arch = (arch[:arch.find('_')] if ('_' in arch) else arch)
get_model = _model_factory[arch]
model = get_model(num_layers=num_layers, heads=heads, head_conv=head_conv)
return model |
def labels2clusters(labels):
lb2idxs = {}
for (idx, lb) in enumerate(labels):
if (lb not in lb2idxs):
lb2idxs[lb] = []
lb2idxs[lb].append(idx)
clusters = [idxs for (_, idxs) in lb2idxs.items()]
return clusters |
_tf
class TFCTRLModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((TFCTRLModel, TFCTRLLMHeadModel, TFCTRLForSequenceClassification) if is_tf_available() else ())
all_generative_model_classes = ((TFCTRLLMHeadModel,) if is_tf_available() else ())
pipeline_model_mapping = ({'feature-extraction': TFCTRLModel, 'text-classification': TFCTRLForSequenceClassification, 'text-generation': TFCTRLLMHeadModel, 'zero-shot': TFCTRLForSequenceClassification} if is_tf_available() else {})
test_head_masking = False
test_onnx = False
def is_pipeline_test_to_skip(self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name):
if (pipeline_test_casse_name == 'ZeroShotClassificationPipelineTests'):
return True
return False
def setUp(self):
self.model_tester = TFCTRLModelTester(self)
self.config_tester = ConfigTester(self, config_class=CTRLConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_ctrl_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*config_and_inputs)
def test_ctrl_lm_head(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_lm_head(*config_and_inputs)
def test_ctrl_sequence_classification_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_for_sequence_classification(*config_and_inputs)
def test_model_common_attributes(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
list_lm_models = [TFCTRLLMHeadModel]
list_other_models_with_output_ebd = [TFCTRLForSequenceClassification]
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if (model_class in list_lm_models):
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for (k, v) in name.items():
assert isinstance(v, tf.Variable)
elif (model_class in list_other_models_with_output_ebd):
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert (name is None)
else:
x = model.get_output_embeddings()
assert (x is None)
name = model.get_bias()
assert (name is None)
def test_model_from_pretrained(self):
for model_name in TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFCTRLModel.from_pretrained(model_name)
self.assertIsNotNone(model) |
def summarize_evaluation(evaluation: pd.DataFrame) -> dict:
if ((evaluation is None) or (len(evaluation) == 0)):
warnings.warn('No completions to evaluate.')
return None
return {'accuracy': evaluation.correct.mean(), 'contains_answer': evaluation.contains_answer.mean(), 'correct_format': evaluation.correct_format.mean(), 'complete': evaluation.complete.mean()} |
class HistoricalContainer(metaclass=ABCMeta):
def __init__(self) -> None:
self._record_dict: _Save_Type = OrderedDict()
self._current_epoch: int = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def trainer(self):
return self._trainer
def record_dict(self) -> _Save_Type:
return self._record_dict
def get_record_dict(self, epoch=None) -> Union[(_Record_Type, _Save_Type)]:
if (epoch is None):
return self.record_dict
assert (epoch in self._record_dict.keys()), 'epoch {} not saved in {}'.format(epoch, ', '.join(list(self._record_dict.keys())))
return self.record_dict[epoch]
def current_epoch(self) -> int:
return self._current_epoch
def summary(self) -> pd.DataFrame:
validated_table = pd.DataFrame(self.record_dict).T
if (len(self.record_dict) < self.current_epoch):
missing_table = pd.DataFrame(index=(set(range(self.current_epoch)) - set(self.record_dict.keys())))
validated_table = validated_table.append(missing_table, sort=True)
return validated_table
def add(self, input_dict: _Record_Type, epoch=None) -> None:
if epoch:
self._current_epoch = epoch
self._record_dict[self._current_epoch] = input_dict
self._current_epoch += 1
def reset(self) -> None:
self._record_dict: _Save_Type = OrderedDict()
self._current_epoch = 0
def state_dict(self) -> Dict[(str, Any)]:
return self.__dict__
def load_state_dict(self, state_dict: Dict[(str, Any)]) -> None:
self.__dict__.update(state_dict)
def __repr__(self):
return str(pd.DataFrame(self.record_dict).T) |
def write_podspec(f, rules, args):
rule_dir = build_rule_directory(rules)['abseil']
spec = re.sub('\\$\\{(\\w+)\\}', (lambda x: args[x.group(1)]), SPEC_TEMPLATE).lstrip()
f.write(spec)
write_podspec_map(f, rule_dir, 0)
f.write('end\n') |
def add_ground_truth_to_proposals_single_image(gt_boxes, proposals):
device = proposals.objectness_logits.device
gt_logit_value = math.log(((1.0 - 1e-10) / (1 - (1.0 - 1e-10))))
gt_logits = (gt_logit_value * torch.ones(len(gt_boxes), device=device))
gt_proposal = Instances(proposals.image_size)
gt_proposal.proposal_boxes = gt_boxes
gt_proposal.objectness_logits = gt_logits
new_proposals = Instances.cat([proposals, gt_proposal])
return new_proposals |
class Up(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, (in_channels // 2))
else:
self.up = nn.ConvTranspose2d(in_channels, (in_channels // 2), kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = (x2.size()[2] - x1.size()[2])
diffX = (x2.size()[3] - x1.size()[3])
x1 = F.pad(x1, [(diffX // 2), (diffX - (diffX // 2)), (diffY // 2), (diffY - (diffY // 2))])
x = torch.cat([x2, x1], dim=1)
return self.conv(x) |
class TestTorchOP(unittest.TestCase):
def setUpClass(self):
pass
def tearDownClass(self):
pass
def test_1(self):
n = Net('div')
example_in = torch.rand(3, 256)
example_in2 = torch.rand(256)
traced_model = torch.jit.trace(n, (example_in, example_in2))
torch.jit.save(traced_model, '{}.pt'.format(file_name))
ref_out = traced_model(example_in, example_in2).detach().numpy()
graph = compile('{}.pt'.format(file_name))
graph.save(file_name)
newgraph = Graph()
newgraph.graph_init((file_name + '/conf.yaml'), (file_name + '/model.bin'))
out = newgraph.inference([example_in.numpy(), example_in2.numpy()])
np.testing.assert_almost_equal(ref_out, [*out.values()][0], decimal=5)
os.remove('{}.pt'.format(file_name))
shutil.rmtree(file_name)
def test_2(self):
n = Net('mul')
example_in = torch.rand(3, 256)
example_in2 = torch.rand(256)
traced_model = torch.jit.trace(n, (example_in, example_in2))
torch.jit.save(traced_model, '{}.pt'.format(file_name))
ref_out = traced_model(example_in, example_in2).detach().numpy()
graph = compile('{}.pt'.format(file_name))
graph.save(file_name)
newgraph = Graph()
newgraph.graph_init((file_name + '/conf.yaml'), (file_name + '/model.bin'))
out = newgraph.inference([example_in.numpy(), example_in2.numpy()])
np.testing.assert_almost_equal(ref_out, [*out.values()][0], decimal=5)
os.remove('{}.pt'.format(file_name))
shutil.rmtree(file_name) |
def score_amr_pairs(f1, f2, justinstance=False, justattribute=False, justrelation=False):
total_match_num = total_test_num = total_gold_num = 0
for (sent_num, (cur_amr1, cur_amr2)) in enumerate(generate_amr_lines(f1, f2), start=1):
(best_match_num, test_triple_num, gold_triple_num) = get_amr_match(cur_amr1, cur_amr2, sent_num=sent_num, justinstance=justinstance, justattribute=justattribute, justrelation=justrelation)
total_match_num += best_match_num
total_test_num += test_triple_num
total_gold_num += gold_triple_num
match_triple_dict.clear()
if (not single_score):
(yield compute_f(best_match_num, test_triple_num, gold_triple_num))
if verbose:
print('Total match number, total triple number in AMR 1, and total triple number in AMR 2:', file=DEBUG_LOG)
print(total_match_num, total_test_num, total_gold_num, file=DEBUG_LOG)
print('', file=DEBUG_LOG)
if single_score:
(yield compute_f(total_match_num, total_test_num, total_gold_num)) |
class YGate(Gate):
def __init__(self, label=None):
super().__init__('y', 1, [], label=label)
def _define(self):
definition = []
q = QuantumRegister(1, 'q')
rule = [(U3Gate(pi, (pi / 2), (pi / 2)), [q[0]], [])]
for inst in rule:
definition.append(inst)
self.definition = definition
def inverse(self):
return YGate()
def to_matrix(self):
return numpy.array([[0, (- 1j)], [1j, 0]], dtype=complex) |
def get_target_feature(model, preprocess, tokenizer_funct, device, target_images=None, target_prompts=None):
if (target_images is not None):
with torch.no_grad():
curr_images = [preprocess(i).unsqueeze(0) for i in target_images]
curr_images = torch.concatenate(curr_images).to(device)
all_target_features = model.encode_image(curr_images)
else:
texts = tokenizer_funct(target_prompts).to(device)
all_target_features = model.encode_text(texts)
return all_target_features |
def main():
with codecs.open('results.csv', 'w', 'utf-8') as fp:
writer = csv.writer(fp)
writer.writerow(['experiment', 'model', 'error', 'elapsed'])
start_time = time.time()
run_experiment(writer, 'counts', generate_data_counts)
run_experiment(writer, 'quad', generate_data_quad)
run_experiment(writer, 'sqrt', generate_data_fn(1, 1.0, 100.0, math.sqrt))
run_experiment(writer, 'log', generate_data_fn(1, 1.0, 100.0, math.log))
run_experiment(writer, 'pow', generate_data_fn(1, 1.0, 10.0, (lambda x: (x ** 2))))
run_experiment(writer, 'ratio', generate_data_ratio)
run_experiment(writer, 'diff', generate_data_diff)
run_experiment(writer, 'r_poly', generate_data_fn(1, 1.0, 10.0, (lambda x: (1 / ((5 * x) + (8 * (x ** 2)))))))
run_experiment(writer, 'poly', generate_data_fn(1, 0.0, 2.0, (lambda x: ((1 + (5 * x)) + (8 * (x ** 2))))))
run_experiment(writer, 'r_diff', generate_data_fn(4, 1.0, 10.0, (lambda a, b, c, d: ((a - b) / (c - d)))))
elapsed_time = (time.time() - start_time)
print('Elapsed time: {}'.format(hms_string(elapsed_time))) |
class FangraphsBattingStats(FangraphsStatsBase):
COMMON = 'c'
LINE_BREAK = '-1'
NAME = '0'
TEAM = '1'
SEASON = '2'
AGE = '3'
G = '4'
GAMES = G
AB = '5'
AT_BATS = AB
PA = '6'
PLATE_APPEARANCES = PA
H = '7'
HITS = H
SINGLES = '8'
DOUBLES = '9'
TRIPLES = '10'
HR = '11'
HOME_RUNS = HR
R = '12'
RUNS = R
RBI = '13'
BB = '14'
WALKS = BB
IBB = '15'
INTENTIONAL_WALKS = IBB
SO = '16'
STRIKE_OUTS = SO
HBP = '17'
HIT_BY_PITCH = HBP
SF = '18'
SACRIFICE_FLY = SF
SH = '19'
SACRIFICE_HIT = SH
GDP = '20'
GROUNDED_DOUBLE_PLAY = GDP
SB = '21'
STOLEN_BASES = SB
CS = '22'
CAUGHT_STEALING = CS
AVG = '23'
BATTING_AVERAGE = AVG
GB = '24'
GROUND_BALLS = GB
FB = '25'
FLY_BALLS = FB
LD = '26'
IFFB = '27'
PITCHES = '28'
BALLS = '29'
STRIKES = '30'
IFH = '31'
BU = '32'
BUH = '33'
BB_PCT = '34'
WALK_PERCENTAGE = BB_PCT
K_PCT = '35'
STRIKE_PERCENTAGE = K_PCT
BB_K = '36'
WALKS_PER_STRIKOUT = BB_K
OBP = '37'
ON_BASE_PERCENTAGE = OBP
SLG = '38'
SLUGGING = SLG
OPS = '39'
ON_BASE_PLUS_SLUGGING = OPS
ISO = '40'
ISOLATED_POWER = ISO
BABIP = '41'
BATTING_AVERAGE_FOR_BALLS_IN_PLAY = BABIP
GB_FB = '42'
GROUND_BALLS_PER_FLY_BALL = GB_FB
LD_PCT = '43'
GB_PCT = '44'
GROUND_BALL_PERCENTAGE = GB_PCT
FB_PCT = '45'
FLY_BALL_PERCENTAGE = FB_PCT
IFFB_PCT = '46'
HR_FB = '47'
HOME_RUNS_PER_FLY_BALL = HR_FB
IFH_PCT = '48'
INFIELD_HIT_PERCENTAGE = IFH_PCT
BUH_PCT = '49'
WOBA = '50'
WEIGHTED_ON_BASE_AVERAGE = WOBA
WRAA = '51'
WRC = '52'
WEIGHTED_RUNS_CREATED = WRC
BATTING = '53'
BAT = BATTING
FIELDING = '54'
FLD = FIELDING
REPLACEMENT = '55'
REP = REPLACEMENT
POSITIONAL = '56'
POS = POSITIONAL
RAR = '57'
RUNS_ABOVE_REPLACEMENT = RAR
WAR = '58'
WINS_ABOVE_REPLACEMENT = WAR
DOLLARS = '59'
DOL = DOLLARS
DOLLARS_VALUE = DOLLARS
SPD = '60'
WRC_PLUS = '61'
WEIGHTED_RUNS_CREATED_PLUS = WRC_PLUS
WPA = '62'
WIN_PROBABILITY_ADDED = WPA
NEGATIVE_WPA = '63'
NEGATIVE_WIN_PROBABILITY_ADDED = NEGATIVE_WPA
POSITIVE_WPA = '64'
POSITIVE_WIN_PROBABILITY_ADDED = POSITIVE_WPA
RE24 = '65'
REW = '66'
PLI = '67'
PHLI = '68'
PH = '69'
WPA_LI = '70'
CLUTCH = '71'
FB_PCT_PITCH = '72'
FB_PCT_2 = FB_PCT_PITCH
FBV = '73'
SL_PCT = '74'
SLV = '75'
CT_PCT = '76'
CTV = '77'
CB_PCT = '78'
CBV = '79'
CH_PCT = '80'
CHV = '81'
SF_PCT = '82'
SFV = '83'
KN_PCT = '84'
KNV = '85'
XX_PCT = '86'
PO_PCT = '87'
WFB = '88'
WSL = '89'
WCT = '90'
WCB = '91'
WCH = '92'
WSF = '93'
WKN = '94'
WFB_C = '95'
WSL_C = '96'
WCT_C = '97'
WCB_C = '98'
WCH_C = '99'
WSF_C = '100'
WKN_C = '101'
O_SWING_PCT = '102'
OSWING_PCT = O_SWING_PCT
Z_SWING_PCT = '103'
ZSWING_PCT = Z_SWING_PCT
SWING_PCT = '104'
O_CONTACT_PCT = '105'
OCONTACT_PCT = O_CONTACT_PCT
Z_CONTACT_PCT = '106'
ZCONTACT_PCT = Z_CONTACT_PCT
CONTACT_PCT = '107'
ZONE_PCT = '108'
F_STRIKE_PCT = '109'
FSTRIKE_PCT = F_STRIKE_PCT
SWSTR_PCT = '110'
BASE_RUNNING = '111'
BSR = BASE_RUNNING
FA_PCT_SC = '112'
FT_PCT_SC = '113'
FC_PCT_SC = '114'
FS_PCT_SC = '115'
FO_PCT_SC = '116'
SI_PCT_SC = '117'
SL_PCT_SC = '118'
CU_PCT_SC = '119'
KC_PCT_SC = '120'
EP_PCT_SC = '121'
CH_PCT_SC = '122'
SC_PCT_SC = '123'
KN_PCT_SC = '124'
UN_PCT_SC = '125'
VFA_SC = '126'
VFT_SC = '127'
VFC_SC = '128'
VFS_SC = '129'
VFO_SC = '130'
VSI_SC = '131'
VSL_SC = '132'
VCU_SC = '133'
VKC_SC = '134'
VEP_SC = '135'
VCH_SC = '136'
VSC_SC = '137'
VKN_SC = '138'
FA_X_SC = '139'
FT_X_SC = '140'
FC_X_SC = '141'
FS_X_SC = '142'
FO_X_SC = '143'
SI_X_SC = '144'
SL_X_SC = '145'
CU_X_SC = '146'
KC_X_SC = '147'
EP_X_SC = '148'
CH_X_SC = '149'
SC_X_SC = '150'
KN_X_SC = '151'
FA_Z_SC = '152'
FT_Z_SC = '153'
FC_Z_SC = '154'
FS_Z_SC = '155'
FO_Z_SC = '156'
SI_Z_SC = '157'
SL_Z_SC = '158'
CU_Z_SC = '159'
KC_Z_SC = '160'
EP_Z_SC = '161'
CH_Z_SC = '162'
SC_Z_SC = '163'
KN_Z_SC = '164'
WFA_SC = '165'
WFT_SC = '166'
WFC_SC = '167'
WFS_SC = '168'
WFO_SC = '169'
WSI_SC = '170'
WSL_SC = '171'
WCU_SC = '172'
WKC_SC = '173'
WEP_SC = '174'
WCH_SC = '175'
WSC_SC = '176'
WKN_SC = '177'
WFA_C_SC = '178'
WFT_C_SC = '179'
WFC_C_SC = '180'
WFS_C_SC = '181'
WFO_C_SC = '182'
WSI_C_SC = '183'
WSL_C_SC = '184'
WCU_C_SC = '185'
WKC_C_SC = '186'
WEP_C_SC = '187'
WCH_C_SC = '188'
WSC_C_SC = '189'
WKN_C_SC = '190'
O_SWING_PCT_SC = '191'
Z_SWING_PCT_SC = '192'
SWING_PCT_SC = '193'
O_CONTACT_PCT_SC = '194'
Z_CONTACT_PCT_SC = '195'
CONTACT_PCT_SC = '196'
ZONE_PCT_SC = '197'
PACE = '198'
DEFENSE = '199'
DEF = DEFENSE
WSB = '200'
UBR = '201'
AGE_RNG = '202'
OFFENSE = '203'
OFF = OFFENSE
LEAGUE = '204'
LG = LEAGUE
WGDP = '205'
wGDP = WGDP
PULL_PCT = '206'
CENT_PCT = '207'
OPPO_PCT = '208'
SOFT_PCT = '209'
MED_PCT = '210'
HARD_PCT = '211'
TTO_PCT = '212'
CH_PCT_PI = '213'
CS_PCT_PI = '214'
CU_PCT_PI = '215'
FA_PCT_PI = '216'
FC_PCT_PI = '217'
FS_PCT_PI = '218'
KN_PCT_PI = '219'
SB_PCT_PI = '220'
SI_PCT_PI = '221'
SL_PCT_PI = '222'
XX_PCT_PI = '223'
VCH_PI = '224'
VCS_PI = '225'
VCU_PI = '226'
VFA_PI = '227'
VFC_PI = '228'
VFS_PI = '229'
VKN_PI = '230'
VSB_PI = '231'
VSI_PI = '232'
VSL_PI = '233'
VXX_PI = '234'
CH_X_PI = '235'
CS_X_PI = '236'
CU_X_PI = '237'
FA_X_PI = '238'
FC_X_PI = '239'
FS_X_PI = '240'
KN_X_PI = '241'
SB_X_PI = '242'
SI_X_PI = '243'
SL_X_PI = '244'
XX_X_PI = '245'
CH_Z_PI = '246'
CS_Z_PI = '247'
CU_Z_PI = '248'
FA_Z_PI = '249'
FC_Z_PI = '250'
FS_Z_PI = '251'
KN_Z_PI = '252'
SB_Z_PI = '253'
SI_Z_PI = '254'
SL_Z_PI = '255'
XX_Z_PI = '256'
WCH_PI = '257'
WCS_PI = '258'
WCU_PI = '259'
WFA_PI = '260'
WFC_PI = '261'
WFS_PI = '262'
WKN_PI = '263'
WSB_PI = '264'
WSI_PI = '265'
WSL_PI = '266'
WXX_PI = '267'
WCH_C_PI = '268'
WCS_C_PI = '269'
WCU_C_PI = '270'
WFA_C_PI = '271'
WFC_C_PI = '272'
WFS_C_PI = '273'
WKN_C_PI = '274'
WSB_C_PI = '275'
WSI_C_PI = '276'
WSL_C_PI = '277'
WXX_C_PI = '278'
O_SWING_PCT_PI = '279'
OSWING_PCT_PI = O_SWING_PCT_PI
Z_SWING_PCT_PI = '280'
ZSWING_PCT_PI = Z_SWING_PCT_PI
SWING_PCT_PI = '281'
O_CONTACT_PCT_PI = '282'
OCONTACT_PCT_PI = O_CONTACT_PCT_PI
Z_CONTACT_PCT_PI = '283'
ZCONTACT_PCT_PI = Z_CONTACT_PCT_PI
CONTACT_PCT_PI = '284'
ZONE_PCT_PI = '285'
PACE_PI = '286'
FRAMING = '287'
FRM = FRAMING
AVG_PLUS = '288'
BB_PCT_PLUS = '289'
K_PCT_PLUS = '290'
OBP_PLUS = '291'
SLG_PLUS = '292'
ISO_PLUS = '293'
BABIP_PLUS = '294'
LD_PCT_PLUS = '295'
LD_PLUS_PCT = LD_PCT_PLUS
GB_PCT_PLUS = '296'
FB_PCT_PLUS = '297'
HR_FB_PCT_PLUS = '298'
PULL_PCT_PLUS = '299'
CENT_PCT_PLUS = '300'
OPPO_PCT_PLUS = '301'
SOFT_PCT_PLUS = '302'
MED_PCT_PLUS = '303'
HARD_PCT_PLUS = '304'
EV = '305'
LA = '306'
BARRELS = '307'
BARREL_PCT = '308'
MAXEV = '309'
HARDHIT = '310'
HARDHIT_PCT = '311'
EVENTS = '312'
CSTR_PCT = '313'
CSW_PCT = '314'
XBA = '315'
XSLG = '316'
XWOBA = '317'
LEGACY_WAR = '318' |
class Normalize(object):
def __init__(self, mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, x):
if (isinstance(x, torch.Tensor) and (len(x.shape) == 3)):
x = F.normalize(x, self.mean, self.std, self.inplace)
return x
def __repr__(self):
return (self.__class__.__name__ + '()') |
class DepthWise(Module):
def __init__(self, in_c, out_c, residual=False, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=1):
super(DepthWise, self).__init__()
self.residual = residual
self.layers = nn.Sequential(ConvBlock(in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1)), ConvBlock(groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride), LinearBlock(groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1)))
def forward(self, x):
short_cut = None
if self.residual:
short_cut = x
x = self.layers(x)
if self.residual:
output = (short_cut + x)
else:
output = x
return output |
class ModelTest(unittest.TestCase):
def check_parameter_count(self, model, target_in_m):
count = (model.count_params() / (10 ** 6))
msg = '{} params #{}M suppose to be #{}M.'.format(model.name, count, target_in_m)
self.assertAlmostEqual(target_in_m, count, msg=msg, delta=0.1)
def check_penultimate_shape(self, model, target_shape):
layer = model.get_layer('last_relu')
if (K.image_data_format() == 'channels_first'):
shape = layer.input_shape[2:]
else:
shape = layer.input_shape[1:3]
self.assertEqual(shape, target_shape)
def test_cifar_10(self):
model = nasnet.cifar10()
self.check_parameter_count(model, 3.3)
self.check_penultimate_shape(model, (8, 8))
aux_model = nasnet.cifar10(aux_output=True)
self.check_parameter_count(aux_model, 4.9)
self.assertEqual(len(aux_model.output), 2)
def test_mobile(self):
model = nasnet.mobile()
self.check_parameter_count(model, 5.3)
self.check_penultimate_shape(model, (7, 7))
aux_model = nasnet.mobile(aux_output=True)
self.check_parameter_count(aux_model, 7.7)
self.assertEqual(len(aux_model.output), 2)
def test_large(self):
model = nasnet.large()
self.check_parameter_count(model, 88.9)
self.check_penultimate_shape(model, (11, 11))
aux_model = nasnet.large(aux_output=True)
self.check_parameter_count(aux_model, 93.5)
self.assertEqual(len(aux_model.output), 2)
def test_channel_first(self):
K.set_image_data_format('channels_first')
model = nasnet.cifar10()
self.check_parameter_count(model, 3.3)
self.check_penultimate_shape(model, (8, 8))
K.set_image_data_format('channels_last') |
class ConfigManger():
DEFAULT_CONFIG = ''
def __init__(self, DEFAULT_CONFIG_PATH: str=None, verbose=True, integrality_check=True) -> None:
self.parsed_args: Dict[(str, Any)] = YAMLArgParser(verbose=verbose)
if (DEFAULT_CONFIG_PATH is None):
warnings.warn('No default yaml is provided, only used for parser input arguments.', UserWarning)
return
self.SET_DEFAULT_CONFIG_PATH(DEFAULT_CONFIG_PATH)
if self.parsed_args.get('Config'):
if Path(self.parsed_args['Config']).is_dir():
self.parsed_args['Config'] = os.path.join(self.parsed_args['Config'], 'config.yaml')
self.default_config: Dict[(str, Any)] = yaml_load(self.parsed_args.get('Config', self.DEFAULT_CONFIG), verbose=verbose)
self.merged_config: Dict[(str, Any)] = dict_merge(self.default_config, self.parsed_args)
if integrality_check:
self._check_integrality(self.merged_config)
if verbose:
print('Merged args:')
pprint(self.merged_config)
def SET_DEFAULT_CONFIG_PATH(cls, default_config_path: str) -> None:
path: Path = Path(default_config_path)
assert path.exists(), path
assert path.is_file(), path
assert (path.with_suffix('.yaml') or path.with_suffix('.yml'))
cls.DEFAULT_CONFIG = str(default_config_path)
def _check_integrality(merged_dict=Dict[(str, Any)]):
assert merged_dict.get('Arch'), f'Merged dict integrity check failed,{merged_dict.keys()}'
assert merged_dict.get('Optim'), f'Merged dict integrity check failed,{merged_dict.keys()}'
assert merged_dict.get('Scheduler'), f'Merged dict integrity check failed,{merged_dict.keys()}'
assert merged_dict.get('Trainer'), f'Merged dict integrity check failed,{merged_dict.keys()}'
def config(self):
try:
config = self.merged_config
except AttributeError:
config = self.parsed_args
from collections import defaultdict
return defaultdict((lambda : None), config) |
class BLS2017Model(nn.Module):
def __init__(self, num_filters=192):
super(BLS2017Model, self).__init__()
self.conv1 = nn.Conv2d(3, num_filters, 9, stride=4, padding=4)
self.gdn1 = gdn.GDN(num_filters)
self.conv2 = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2)
self.gdn2 = gdn.GDN(num_filters)
self.conv3 = nn.Conv2d(num_filters, num_filters, 5, stride=2, padding=2)
self.entropy_bottleneck = entropy_model.EntropyBottleneck(num_filters)
self.deconv1 = nn.ConvTranspose2d(num_filters, num_filters, 5, stride=2, padding=2, output_padding=1)
self.igdn2 = gdn.GDN(num_filters, inverse=True)
self.deconv2 = nn.ConvTranspose2d(num_filters, num_filters, 5, stride=2, padding=2, output_padding=1)
self.igdn3 = gdn.GDN(num_filters, inverse=True)
self.deconv3 = nn.ConvTranspose2d(num_filters, 3, 9, stride=4, padding=4, output_padding=3)
def encode(self, x):
x = self.conv1(x)
x = self.gdn1(x)
x = self.conv2(x)
x = self.gdn2(x)
x = self.conv3(x)
return x
def decode(self, x):
x = self.deconv1(x)
x = self.igdn2(x)
x = self.deconv2(x)
x = self.igdn3(x)
x = self.deconv3(x)
return x
def forward(self, x):
y = self.encode(x)
(y_tilde, likelihoods) = self.entropy_bottleneck(y)
x_tilde = self.decode(y_tilde)
return (x_tilde, likelihoods) |
def where(condition, input, other):
if is_encrypted_tensor(condition):
return ((condition * input) + ((1 - condition) * other))
elif torch.is_tensor(condition):
condition = condition.float()
return ((input * condition) + (other * (1 - condition))) |
def mIoU_parser():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset', type=str, default=(basedir + '/Dataset'), help='path to dataset')
parser.add_argument('--set_name', type=str, default='val.txt', help='name for set')
parser.add_argument('--GT_dir_name', type=str, default='SegmentationClass', help='name for ground truth directory')
parser.add_argument('--Pred_dir_name', type=str, default='CRF_masks', help='name for prediction directory')
parser.add_argument('--classes', type=int, default=21, help='number of classes')
args = parser.parse_args()
print('{:{}}: {}'.format('Dataset', SPACE, args.dataset))
print('{:{}}: {}'.format('Set name', SPACE, args.set_name))
print('{:{}}: {}'.format('ground truth directory name', SPACE, args.GT_dir_name))
print('{:{}}: {}'.format('Prediction directory name', SPACE, args.Pred_dir_name))
print('{:{}}: {}'.format('Classes', SPACE, args.classes))
if (not os.path.isdir(args.dataset)):
parser.error('Wrong dataset path')
if (not os.path.isfile(((args.dataset + '/') + args.set_name))):
parser.error('Wrong set name')
if (not os.path.isdir(((args.dataset + '/') + args.GT_dir_name))):
parser.error('Wrong ground truth directory name')
if (not os.path.isdir(((args.dataset + '/') + args.Pred_dir_name))):
parser.error('Wrong prediction directory name')
return args |
class Merge_Run(nn.Module):
def __init__(self, in_channels, out_channels, init='xavier', ksize=3, stride=1, pad=1, dilation=1):
super(Merge_Run, self).__init__()
self.body1 = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, pad), nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.body2 = nn.Sequential(nn.Conv2d(in_channels, out_channels, ksize, stride, 2, 2), nn.LeakyReLU(negative_slope=0.2, inplace=True))
self.body3 = nn.Sequential(nn.Conv2d((in_channels * 2), out_channels, ksize, stride, pad), nn.LeakyReLU(negative_slope=0.2, inplace=True))
def forward(self, x):
out1 = self.body1(x)
out2 = self.body2(x)
c = torch.cat([out1, out2], dim=1)
c_out = self.body3(c)
out = (c_out + x)
return out |
.parametrize('emitter_type', ['GradientArborescenceEmitter', 'EvolutionStrategyEmitter'], ids=['GAEmitter', 'ESEmitter'])
.parametrize('wrong_array,offsets', [('solution_batch', [(0, 1), (1, 0)]), ('objective_batch', [(1,)]), ('measures_batch', [(0, 1), (1, 0)]), ('jacobian_batch', [(0, 0, 1), (0, 1, 0), (1, 0, 0)]), ('status_batch', [(1,)]), ('value_batch', [(1,)])])
def test_tell_arguments_incorrect_shape(emitter_type, wrong_array, offsets):
batch_size = 3
archive = GridArchive(solution_dim=1, dims=[10], ranges=[((- 1.0), 1.0)])
if (emitter_type == 'GradientArborescenceEmitter'):
emitter = GradientArborescenceEmitter(archive, x0=np.array([0]), sigma0=1.0, lr=0.1, batch_size=batch_size)
elif (emitter_type == 'EvolutionStrategyEmitter'):
emitter = EvolutionStrategyEmitter(archive, x0=np.array([0]), sigma0=1.0, ranker='imp', batch_size=batch_size)
solution_batch = np.ones((batch_size, archive.solution_dim))
objective_batch = np.ones(batch_size)
measures_batch = np.ones((batch_size, archive.measure_dim))
jacobian_batch = np.ones((batch_size, (archive.measure_dim + 1), archive.solution_dim))
status_batch = np.ones(batch_size)
value_batch = np.ones(batch_size)
for offset in offsets:
if (wrong_array == 'solution_batch'):
solution_batch = np.ones(((batch_size + offset[0]), (archive.solution_dim + offset[1])))
elif (wrong_array == 'objective_batch'):
objective_batch = np.ones((batch_size + offset[0]))
elif (wrong_array == 'measures_batch'):
measures_batch = np.ones(((batch_size + offset[0]), (archive.measure_dim + offset[1])))
elif (wrong_array == 'jacobian_batch'):
jacobian_batch = np.ones(((batch_size + offset[0]), ((archive.measure_dim + 1) + offset[1]), (archive.solution_dim + offset[2])))
elif (wrong_array == 'status_batch'):
status_batch = np.ones((batch_size + offset[0]))
elif (wrong_array == 'value_batch'):
value_batch = np.ones((batch_size + offset[0]))
if isinstance(emitter, GradientArborescenceEmitter):
with pytest.raises(ValueError):
emitter.tell_dqd(solution_batch, objective_batch, measures_batch, jacobian_batch, {'status': status_batch, 'value': value_batch})
if (wrong_array == 'jacobian_batch'):
return
with pytest.raises(ValueError):
emitter.tell(solution_batch, objective_batch, measures_batch, {'status': status_batch, 'value': value_batch}) |
def attention(x):
params = dict(activation='relu', padding='valid', kernel_regularizer=l2(1e-05))
x = Conv2D(8, kernel_size=3, **params)(x)
x = Conv2D(16, kernel_size=3, **params)(x)
x = Conv2D(32, kernel_size=3, **params)(x)
x = Conv2D(1, kernel_size=3)(x)
x = MaxPooling2D(pool_size=8)(x)
x = SampleSoftmax(squeeze_channels=True, smooth=0.0001)(x)
return x |
def define_net_r(opt):
network_type = opt.pop('type')
net_r = dynamic_instantiation(_arch_modules, network_type, opt)
return net_r |
def get_charset(lang):
global _CHARSETS
cls_or_obj = _CHARSETS[lang]
if isinstance(cls_or_obj, type):
_CHARSETS[lang] = cls_or_obj()
return _CHARSETS[lang] |
class GeneralDataset(Dataset):
def __init__(self, root, transform=None, target_transform=None, top_k=(1, 5), keep_rgb=False):
self.data_set = datasets.ImageFolder(root)
self.classes = self.data_set.classes
self.root = root
self.transform = transform
self.target_transform = target_transform
self.keep_rgb = keep_rgb
self._update_evaluator(top_k)
def __getitem__(self, index: int):
(image, target) = self.data_set.__getitem__(index)
image = default_converter(image, rgb=self.keep_rgb)
if (self.transform is not None):
image = self.transform(image)
if (self.target_transform is not None):
target = self.target_transform(target)
return (image, target)
def __len__(self) -> int:
return self.data_set.__len__()
def _update_evaluator(self, top_k):
self.evaluator = GeneralEvaluator(self.classes, top_k=top_k)
def get_classes(self):
return self.classes
def __repr__(self):
return (((self.__class__.__name__ + ' (') + self.root) + ')') |
def get_and_print_layers_to_use_halut(model: torch.nn.Module) -> list[str]:
all_layers = []
def layers(module: torch.nn.Module, prefix: str='') -> None:
if isinstance(module, (HalutLinear, HalutConv2d)):
all_layers.append(prefix[:(- 1)])
for (name, child) in module._modules.items():
if (child is not None):
layers(child, ((prefix + name) + '.'))
layers(model)
del layers
print(all_layers)
return all_layers |
def test_shape_validation_during_creation():
tensor = torch.tensor(np.random.rand(3))
with pytest.raises(ValueError):
box_tensor = MinDeltaBoxTensor(tensor)
tensor = torch.tensor(np.random.rand(3, 11))
with pytest.raises(ValueError):
box_tensor = MinDeltaBoxTensor(tensor)
tensor = torch.tensor(np.random.rand(3, 3, 3))
with pytest.raises(ValueError):
box_tensor = MinDeltaBoxTensor(tensor) |
def get_d_UB(l, u, func, dfunc):
diff = (lambda d, l: (((func(d) - func(l)) / (d - l)) - dfunc(d)))
max_iter = 1000
ub = (- l)
d = (ub / 2)
device = l.device
lb = torch.zeros(l.shape, device=device)
keep_search = torch.ones(l.shape, device=device).byte()
for i in range(max_iter):
t = diff(d[keep_search], l[keep_search])
idx = ((t < 0) + (t.abs() > 0.01))
keep_search[keep_search] = (idx > 0)
if (keep_search.sum() == 0):
break
t = t[(idx > 0)]
idx = (t > 0)
keep_search_copy = keep_search.data.clone()
keep_search_copy[keep_search_copy] = idx
ub[keep_search_copy] = d[keep_search_copy]
d[keep_search_copy] = ((d[keep_search_copy] + lb[keep_search_copy]) / 2)
idx = (t < 0)
keep_search_copy = keep_search.data.clone()
keep_search_copy[keep_search_copy] = idx
lb[keep_search_copy] = d[keep_search_copy]
d[keep_search_copy] = ((d[keep_search_copy] + ub[keep_search_copy]) / 2)
return d |
class _ClassInfo(_BlockInfo):
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
if (class_or_struct == 'struct'):
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
initial_indent = Match('^( *)\\S', clean_lines.raw_lines[linenum])
if initial_indent:
self.class_indent = len(initial_indent.group(1))
else:
self.class_indent = 0
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += (line.count('{') - line.count('}'))
if (not depth):
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
indent = Match('^( *)\\}', clean_lines.elided[linenum])
if (indent and (len(indent.group(1)) != self.class_indent)):
if self.is_struct:
parent = ('struct ' + self.name)
else:
parent = ('class ' + self.name)
error(filename, linenum, 'whitespace/indent', 3, ('Closing brace should be aligned with beginning of %s' % parent)) |
def area(x, y):
ymax = np.max(y)
xmax = np.max(x)
bin_mask = np.zeros((ymax, xmax))
(rr, cc) = polygon(y, x)
bin_mask[(rr, cc)] = 1
area = np.sum(bin_mask)
return area |
class CIFAR100(DATASET):
_target_: str = 'dataset_loaders.load_CIFAR100'
name: str = 'CIFAR100'
IN_CHANNEL: int = 3
N_CLASSES: int = 100
IMG_SIZE: Tuple[int] = field(default_factory=(lambda : (32, 32))) |
def mkdir(directory: str) -> None:
return pathlib.Path(directory).mkdir(parents=True, exist_ok=True) |
class ROSDataManagerConfig(base_datamanager.VanillaDataManagerConfig):
_target: Type = field(default_factory=(lambda : ROSDataManager))
dataparser: ROSDataParserConfig = ROSDataParserConfig()
publish_training_posearray: bool = True
data_update_freq: float = 5.0
num_training_images: int = 500 |
class CheckpointFunction(torch.autograd.Function):
def forward(ctx, run_function, preserve_rng_state, *args):
check_backward_validity(args)
ctx.run_function = run_function
ctx.preserve_rng_state = preserve_rng_state
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
if preserve_rng_state:
ctx.fwd_cpu_state = torch.get_rng_state()
ctx.had_cuda_in_fwd = False
if torch.cuda._initialized:
ctx.had_cuda_in_fwd = True
(ctx.fwd_gpu_devices, ctx.fwd_gpu_states) = get_device_states(*args)
ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*args)
return outputs
def backward(ctx, *args):
if (not torch.autograd._is_checkpoint_valid()):
raise RuntimeError('Checkpointing is not compatible with .grad(), please use .backward() if possible')
inputs = ctx.saved_tensors
rng_devices = []
if (ctx.preserve_rng_state and ctx.had_cuda_in_fwd):
rng_devices = ctx.fwd_gpu_devices
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
with torch.random.fork_rng(devices=rng_devices, enabled=ctx.preserve_rng_state):
if ctx.preserve_rng_state:
torch.set_rng_state(ctx.fwd_cpu_state)
if ctx.had_cuda_in_fwd:
set_device_states(ctx.fwd_gpu_devices, ctx.fwd_gpu_states)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
detached_inputs = detach_variable(inputs)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
torch.autograd.backward(outputs, args)
grads = tuple(((inp.grad if isinstance(inp, torch.Tensor) else inp) for inp in detached_inputs))
return ((None, None) + grads) |
def render_batch(npy_dir, execute_python='./scripts/visualize_motion.sh', mode='sequence'):
os.system(f'{execute_python} {npy_dir} {mode}') |
def test_components_vs_sklearn():
def check_components(Estimator, n_components, shape):
X = DATA[shape]
pca = Estimator(n_components, **KWDS[Estimator]).fit(X)
skpca = SKPCA(n_components).fit(X)
assert_columns_allclose_upto_sign(pca.components_.T, skpca.components_.T)
for Estimator in ESTIMATORS:
for shape in SHAPES:
for n_components in N_COMPONENTS:
(yield (check_components, Estimator, n_components, shape)) |
def base_cli_dir_args(image: pathlib.Path, mask: pathlib.Path) -> typing.List[str]:
return f'{image.parent} -m {mask.parent}'.split() |
class DistogramHead(nn.Module):
def __init__(self, c_z, no_bins, **kwargs):
super(DistogramHead, self).__init__()
self.c_z = c_z
self.no_bins = no_bins
self.linear = Linear(self.c_z, self.no_bins, init='final')
def forward(self, z):
logits = self.linear(z)
logits = (logits + logits.transpose((- 2), (- 3)))
return logits |
def _config_validation(config):
if (config == None):
return None
if (isinstance(config, dict) != True):
with open(config, 'r') as conf_file:
import yaml
config = yaml.safe_load(conf_file)
from schema import Schema
conf_schema = Schema({'pattern_switch': Schema({str: bool}, error='The format of the pattern config is wrong.')})
return conf_schema.validate(config) |
class ObjectData():
def __init__(self, id, x, y):
self.id = id
self.x = x
self.y = y
self.type = 'other' |
class SUBDATA(data.Dataset):
def __init__(self):
self.data = np.load('./data/sub_data_training.npy', allow_pickle=True)
def __getitem__(self, index):
return (self.data[index][0], self.data[index][1], self.data[index][2], self.data[index][3], self.data[index][4], self.data[index][5], self.data[index][6], self.data[index][7])
def __len__(self):
return ((len(self.data) // 8) * 8) |
def train(args, train_dataset, model, tokenizer, teacher=None):
if (args.local_rank in [(- 1), 0]):
tb_writer = SummaryWriter()
args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu))
train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset))
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if (args.max_steps > 0):
t_total = args.max_steps
args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1)
else:
t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total)
if (os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt')))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt')))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError('Please install apex from to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
if (args.n_gpu > 1):
model = nn.DataParallel(model)
if (args.local_rank != (- 1)):
model = nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_dataset))
logger.info(' Num Epochs = %d', args.num_train_epochs)
logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1)))
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
if os.path.exists(args.model_name_or_path):
try:
checkpoint_suffix = args.model_name_or_path.split('-')[(- 1)].split('/')[0]
global_step = int(checkpoint_suffix)
epochs_trained = (global_step // (len(train_dataloader) // args.gradient_accumulation_steps))
steps_trained_in_current_epoch = (global_step % (len(train_dataloader) // args.gradient_accumulation_steps))
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
except ValueError:
logger.info(' Starting fine-tuning.')
(tr_loss, logging_loss) = (0.0, 0.0)
model.zero_grad()
train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0]))
set_seed(args)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0]))
for (step, batch) in enumerate(epoch_iterator):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
model.train()
if (teacher is not None):
teacher.eval()
batch = tuple((t.to(args.device) for t in batch))
inputs = {'input_ids': batch[0], 'attention_mask': batch[1], 'start_positions': batch[3], 'end_positions': batch[4]}
if (args.model_type != 'distilbert'):
inputs['token_type_ids'] = (None if (args.model_type == 'xlm') else batch[2])
if (args.model_type in ['xlnet', 'xlm']):
inputs.update({'cls_index': batch[5], 'p_mask': batch[6]})
if args.version_2_with_negative:
inputs.update({'is_impossible': batch[7]})
outputs = model(**inputs)
(loss, start_logits_stu, end_logits_stu) = outputs
if (teacher is not None):
if ('token_type_ids' not in inputs):
inputs['token_type_ids'] = (None if (args.teacher_type == 'xlm') else batch[2])
with torch.no_grad():
(start_logits_tea, end_logits_tea) = teacher(input_ids=inputs['input_ids'], token_type_ids=inputs['token_type_ids'], attention_mask=inputs['attention_mask'])
assert (start_logits_tea.size() == start_logits_stu.size())
assert (end_logits_tea.size() == end_logits_stu.size())
loss_fct = nn.KLDivLoss(reduction='batchmean')
loss_start = (loss_fct(nn.functional.log_softmax((start_logits_stu / args.temperature), dim=(- 1)), nn.functional.softmax((start_logits_tea / args.temperature), dim=(- 1))) * (args.temperature ** 2))
loss_end = (loss_fct(nn.functional.log_softmax((end_logits_stu / args.temperature), dim=(- 1)), nn.functional.softmax((end_logits_tea / args.temperature), dim=(- 1))) * (args.temperature ** 2))
loss_ce = ((loss_start + loss_end) / 2.0)
loss = ((args.alpha_ce * loss_ce) + (args.alpha_squad * loss))
if (args.n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (((step + 1) % args.gradient_accumulation_steps) == 0):
if args.fp16:
nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
global_step += 1
if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)):
if ((args.local_rank == (- 1)) and args.evaluate_during_training):
results = evaluate(args, model, tokenizer)
for (key, value) in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step)
logging_loss = tr_loss
if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if (not os.path.exists(output_dir)):
os.makedirs(output_dir)
model_to_save = (model.module if hasattr(model, 'module') else model)
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info('Saving model checkpoint to %s', output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
logger.info('Saving optimizer and scheduler states to %s', output_dir)
if ((args.max_steps > 0) and (global_step > args.max_steps)):
epoch_iterator.close()
break
if ((args.max_steps > 0) and (global_step > args.max_steps)):
train_iterator.close()
break
if (args.local_rank in [(- 1), 0]):
tb_writer.close()
return (global_step, (tr_loss / global_step)) |
def adjust_improvedgt_folders(kitti_folder='kitti_download'):
path_getter = gp.GetPath()
dataset_folder_path = path_getter.get_data_path()
gt_path = os.path.join(dataset_folder_path, kitti_folder)
gt_path = os.path.join(gt_path, 'Depth_improved')
assert os.path.isdir(gt_path), 'Path to data does not exist'
folders = dl.DirLister.get_directories(gt_path)
folders = dl.DirLister.include_dirs_by_name(folders, 'proj_depth')
for f in folders:
(ground_path, camera) = os.path.split(f)
ground_path = os.path.split(ground_path)[0]
ground_path = os.path.split(ground_path)[0]
target_path = os.path.join(ground_path, camera, 'data')
if (not os.path.isdir(target_path)):
os.makedirs(target_path)
else:
continue
for filepath in glob.glob(os.path.join(f, '*')):
shutil.move(filepath, target_path)
print(target_path)
for f in folders:
remove_path = os.path.split(f)[0]
remove_path = os.path.split(remove_path)[0]
print(remove_path)
shutil.rmtree(remove_path, ignore_errors=True) |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--model', required=True, help='sentencepiece model to use for decoding')
parser.add_argument('--input', required=True, help='input file to decode')
parser.add_argument('--input_format', choices=['piece', 'id'], default='piece')
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if (args.input_format == 'piece'):
def decode(input):
return ''.join(sp.DecodePieces(input))
elif (args.input_format == 'id'):
def decode(input):
return ''.join(sp.DecodeIds(input))
else:
raise NotImplementedError
def tok2int(tok):
return (int(tok) if (tok != '<<unk>>') else 0)
with open(args.input, 'r', encoding='utf-8') as h:
for line in h:
if (args.input_format == 'id'):
print(decode(list(map(tok2int, line.rstrip().split()))))
elif (args.input_format == 'piece'):
print(decode(line.rstrip().split())) |
class RSSMPosterior(nn.Module):
c: Config
def __call__(self, prior, obs_inputs):
inputs = jnp.concatenate([prior['det_out'], obs_inputs], (- 1))
hl = nn.relu(nn.Dense(self.c.cell_embed_size)(inputs))
hl = nn.relu(nn.Dense(self.c.cell_embed_size)(hl))
mean = nn.Dense(self.c.cell_stoch_size)(hl)
stddev = (nn.softplus(nn.Dense(self.c.cell_stoch_size)((hl + 0.54))) + self.c.cell_min_stddev)
dist = tfd.MultivariateNormalDiag(mean, stddev)
sample = dist.sample(seed=self.make_rng('sample'))
return dict(mean=mean, stddev=stddev, sample=sample, det_out=prior['det_out'], det_state=prior['det_state'], output=jnp.concatenate([sample, prior['det_out']], (- 1))) |
def get_llm_packages():
llm_packages = []
for (dirpath, _, _) in os.walk(os.path.join(llm_home, 'bigdl')):
print(dirpath)
package = dirpath.split((llm_home + os.sep))[1].replace(os.sep, '.')
if any((fnmatch.fnmatchcase(package, pat=pattern) for pattern in exclude_patterns)):
print('excluding', package)
else:
llm_packages.append(package)
print('including', package)
return llm_packages |
def constrain_norm(grads: P, preconditioned_grads: P, learning_rate: chex.Numeric, norm_constraint: chex.Numeric=0.001) -> P:
sq_norm_grads = tree_inner_product(preconditioned_grads, grads)
sq_norm_scaled_grads = (sq_norm_grads * (learning_rate ** 2))
sq_norm_scaled_grads = utils.distribute.pmean_if_pmap(sq_norm_scaled_grads)
norm_scale_factor = jnp.sqrt((norm_constraint / sq_norm_scaled_grads))
coefficient = jnp.minimum(norm_scale_factor, 1)
constrained_grads = multiply_tree_by_scalar(preconditioned_grads, coefficient)
return constrained_grads |
.skip(reason='make_bag test needs to be updated')
def test_make_bag_regression():
data = synthetic_regression()
X_orig = data['full']['X']
y_orig = data['full']['y']
X = np.array(X_orig)
y = np.array(y_orig)
w = np.ones_like(y, dtype=np.float64)
test_size = 0.2
(X_train, X_val, y_train, y_val, w_train, w_val, _, _) = make_bag(X, y, w, test_size=test_size, random_state=1, is_classification=False)
num_samples = X.shape[0]
num_features = X.shape[1]
num_test_expected = ceil((test_size * num_samples))
num_train_expected = (num_samples - num_test_expected)
assert (X_train.shape == (num_features, num_train_expected))
assert (X_val.shape == (num_features, num_test_expected))
assert (y_train.shape == (num_train_expected,))
assert (y_val.shape == (num_test_expected,))
assert (w_train.shape == (num_train_expected,))
assert (w_val.shape == (num_test_expected,))
X_all = np.concatenate((X_train.T, X_val.T))
np.array_equal(np.sort(X, axis=0), np.sort(X_all, axis=0)) |
def get_total(records):
record_vals = [item for sublist in records.values() for item in sublist]
total_mean_fps = (sum([r['fps_mean'] for r in record_vals]) / len(record_vals))
total_mean_std = (sum([r['fps_std'] for r in record_vals]) / len(record_vals))
return (total_mean_fps, total_mean_std) |
def kronecker_product(a, b):
siz1 = torch.Size((torch.tensor(a.shape[(- 2):]) * torch.tensor(b.shape[(- 2):])))
res = (a.unsqueeze((- 1)).unsqueeze((- 3)) * b.unsqueeze((- 2)).unsqueeze((- 4)))
siz0 = res.shape[:(- 4)]
out = res.reshape((siz0 + siz1))
return out |
class PairClassifiers(nn.Module):
def __init__(self, fdim, num_classes):
super().__init__()
self.c1 = nn.Linear(fdim, num_classes)
self.c2 = nn.Linear(fdim, num_classes)
def forward(self, x):
z1 = self.c1(x)
if (not self.training):
return z1
z2 = self.c2(x)
return (z1, z2) |
class PreActResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=200):
super(PreActResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.bn = nn.BatchNorm2d((512 * block.expansion))
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.linear = nn.Linear((512 * block.expansion), num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = (planes * block.expansion)
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.relu(self.bn(out))
out = self.avgpool(out)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def noam_schedule(step, warmup_step=4000):
if (step <= warmup_step):
return (step / warmup_step)
return ((warmup_step ** 0.5) * (step ** (- 0.5))) |
def parse_requirements(fname='requirements.txt', with_version=True):
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
if line.startswith('-r '):
target = line.split(' ')[1]
for info in parse_require_file(target):
(yield info)
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif ('+' in line):
info['package'] = line
else:
pat = (('(' + '|'.join(['>=', '<=', '==', '>', '<'])) + ')')
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if (len(parts) > 1):
(op, rest) = parts[1:]
if (';' in rest):
(version, platform_deps) = map(str.strip, rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest
info['version'] = (op, version)
(yield info)
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if (line and (not line.startswith('#'))):
for info in parse_line(line):
(yield info)
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if (with_version and ('version' in info)):
parts.extend(info['version'])
if (not sys.version.startswith('3.4')):
platform_deps = info.get('platform_deps')
if (platform_deps is not None):
parts.append((';' + platform_deps))
item = ''.join(parts)
(yield item)
packages = list(gen_packages_items())
return packages |
def mixed_volume(mixture, points, checkin=True):
from phcpy.phcpy2c3 import py2c_celcon_initialize_supports as init
from phcpy.phcpy2c3 import py2c_celcon_set_type_of_mixture as setmix
from phcpy.phcpy2c3 import py2c_celcon_append_lifted_point as applft
from phcpy.phcpy2c3 import py2c_celcon_mixed_volume_of_supports as mixvol
if checkin:
if (not check_mixture(mixture, points)):
print('incorrect type of mixture')
return (- 1)
nbr = len(mixture)
init(nbr)
setmix(nbr, str(mixture))
for k in range(nbr):
for point in points[k]:
lpt = list(point)
lpt.append(0)
applft(len(lpt), (k + 1), str(lpt))
return mixvol() |
def _check_model_old_version(model):
if hasattr(model.WN[0], 'res_layers'):
return True
else:
return False |
def icnr(x, scale=2, init=nn.init.kaiming_normal_):
(ni, nf, h, w) = x.shape
ni2 = int((ni / (scale ** 2)))
k = init(torch.zeros([ni2, nf, h, w])).transpose(0, 1)
k = k.contiguous().view(ni2, nf, (- 1))
k = k.repeat(1, 1, (scale ** 2))
k = k.contiguous().view([nf, ni, h, w]).transpose(0, 1)
x.data.copy_(k) |
def setup_custom_environment(custom_module):
if custom_module.endswith('.py'):
module = _import_file('detectron2.utils.env.custom_module', custom_module)
else:
module = importlib.import_module(custom_module)
assert (hasattr(module, 'setup_environment') and callable(module.setup_environment)), "Custom environment module defined in {} does not have the required callable attribute 'setup_environment'.".format(custom_module)
module.setup_environment() |
def test_move_right(board: Board, another_board: Board) -> None:
(board, reward) = move_right(board)
expected_board = jnp.array([[0, 0, 0, 2], [0, 0, 2, 2], [0, 0, 1, 2], [0, 0, 0, 2]])
assert jnp.array_equal(board, expected_board)
assert (reward == 8)
(board, reward) = move_right(another_board)
expected_board = jnp.array([[0, 0, 2, 2], [2, 3, 1, 2], [2, 3, 1, 2], [0, 0, 2, 3]])
assert jnp.array_equal(board, expected_board)
assert (reward == 4) |
class Logger(object):
def __init__(self, filename):
self.terminal = sys.stdout
self.log = open(filename, 'a')
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
pass |
def prepare_within_day_indices_for_ground_truth(offset: int) -> np.ndarray:
return np.add([1, 2, 3, 6, 9, 12], (11 + offset)) |
class Metrics():
def __init__(self, residues: dict[(str, float)], isotope_error_range: list[int], cum_mass_threshold: float=0.5, ind_mass_threshold: float=0.1) -> None:
self.residues = residues
self.isotope_error_range = isotope_error_range
self.cum_mass_threshold = cum_mass_threshold
self.ind_mass_threshold = ind_mass_threshold
def _split_sequences(seq: (list[str] | list[list[str]])) -> list[list[str]]:
return [(re.split('(?<=.)(?=[A-Z])', x) if isinstance(x, str) else x) for x in seq]
def _split_peptide(peptide: (str | list[str])) -> list[str]:
if (not isinstance(peptide, str)):
return peptide
return re.split('(?<=.)(?=[A-Z])', peptide)
def matches_precursor(self, seq: (str | list[str]), prec_mass: float, prec_charge: int, prec_tol: int=50) -> tuple[(bool, list[float])]:
seq_mass = self._mass(seq, charge=prec_charge)
delta_mass_ppm = [self._calc_mass_error(seq_mass, prec_mass, prec_charge, isotope) for isotope in range(self.isotope_error_range[0], (self.isotope_error_range[1] + 1))]
return (any(((abs(d) < prec_tol) for d in delta_mass_ppm)), delta_mass_ppm)
def compute_aa_er(self, peptides_truth: (list[str] | list[list[str]]), peptides_predicted: (list[str] | list[list[str]])) -> float:
peptides_truth = self._split_sequences(peptides_truth)
peptides_predicted = self._split_sequences(peptides_predicted)
return float(jiwer.wer([' '.join(x) for x in peptides_truth], [' '.join(x) for x in peptides_predicted]))
def compute_precision_recall(self, targets: (list[str] | list[list[str]]), predictions: (list[str] | list[list[str]]), confidence: (list[float] | None)=None, threshold: (float | None)=None) -> tuple[(float, float, float, float)]:
targets = self._split_sequences(targets)
predictions = self._split_sequences(predictions)
(n_targ_aa, n_pred_aa, n_match_aa) = (0, 0, 0)
(n_pred_pep, n_match_pep) = (0, 0)
if ((confidence is None) or (threshold is None)):
threshold = 0
confidence = np.ones(len(predictions))
for i in range(len(targets)):
targ = self._split_peptide(targets[i])
pred = self._split_peptide(predictions[i])
conf = confidence[i]
if (pred[0] == ''):
pred = []
n_targ_aa += len(targ)
if ((conf >= threshold) and (len(pred) > 0)):
n_pred_aa += len(pred)
n_pred_pep += 1
n_match = self._novor_match(targ, pred)
n_match_aa += n_match
if ((len(pred) == len(targ)) and (len(targ) == n_match)):
n_match_pep += 1
pep_recall = (n_match_pep / len(targets))
aa_recall = (n_match_aa / n_targ_aa)
if (n_pred_pep == 0):
pep_precision = 1.0
aa_prec = 1.0
else:
pep_precision = (n_match_pep / n_pred_pep)
aa_prec = (n_match_aa / n_pred_aa)
return (aa_prec, aa_recall, pep_recall, pep_precision)
def calc_auc(self, targs: (list[str] | list[list[str]]), preds: (list[str] | list[list[str]]), conf: list[float]) -> float:
(x, y) = self._get_pr_curve(targs, preds, conf)
(recall, precision) = (np.array(x)[::(- 1)], np.array(y)[::(- 1)])
width = (recall[1:] - recall[:(- 1)])
height = np.minimum(precision[1:], precision[:(- 1)])
top = np.maximum(precision[1:], precision[:(- 1)])
side = (top - height)
return ((width * height).sum() + (0.5 * (side * width).sum()))
def _get_pr_curve(self, targs: (list[str] | list[list[str]]), preds: (list[str] | list[list[str]]), conf: np.ndarray, N: int=20) -> tuple[(list[float], list[float])]:
(x, y) = ([], [])
t_idx = np.argsort(np.array(conf))
t_idx = t_idx[(~ conf[t_idx].isna())]
t_idx = (list(t_idx[((t_idx.shape[0] * np.arange(N)) / N).astype(int)]) + [t_idx[(- 1)]])
for t in conf[t_idx]:
(_, _, recall, precision) = self.compute_precision_recall(targs, preds, conf, t)
x.append(recall)
y.append(precision)
return (x, y)
def _mass(self, seq: (str | list[str]), charge: (int | None)=None) -> float:
seq = self._split_peptide(seq)
calc_mass = (sum([self.residues[aa] for aa in seq]) + H2O_MASS)
if (charge is not None):
calc_mass = ((calc_mass / charge) + PROTON_MASS_AMU)
return calc_mass
def _calc_mass_error(self, mz_theoretical: float, mz_measured: float, charge: int, isotope: int=0) -> float:
return (((mz_theoretical - (mz_measured - ((isotope * CARBON_MASS_DELTA) / charge))) / mz_measured) * (10 ** 6))
def _novor_match(self, a: list[str], b: list[str]) -> int:
n = 0
mass_a: list[float] = [self.residues[x] for x in a]
mass_b: list[float] = [self.residues[x] for x in b]
cum_mass_a = np.cumsum(mass_a)
cum_mass_b = np.cumsum(mass_b)
(i, j) = (0, 0)
while ((i < len(a)) and (j < len(b))):
if (abs((cum_mass_a[i] - cum_mass_b[j])) < self.cum_mass_threshold):
n += int((abs((mass_a[i] - mass_b[j])) < self.ind_mass_threshold))
i += 1
j += 1
elif (cum_mass_a[i] > cum_mass_b[j]):
i += 1
else:
j += 1
return n |
def _dataset_exists(path, annotation, image_dir):
if (not osp.exists(path)):
logger.debug('Config dataset_dir {} is not exits, dataset config is not valid'.format(path))
return False
if annotation:
annotation_path = osp.join(path, annotation)
if (not osp.isfile(annotation_path)):
logger.debug('Config annotation {} is not a file, dataset config is not valid'.format(annotation_path))
return False
if image_dir:
image_path = osp.join(path, image_dir)
if (not osp.isdir(image_path)):
logger.warning('Config image_dir {} is not a directory, dataset config is not valid'.format(image_path))
return False
return True |
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
learning_rate = tf.train.polynomial_decay(learning_rate, global_step, num_train_steps, end_learning_rate=0.0, power=1.0, cycle=False)
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = (global_steps_float / warmup_steps_float)
warmup_learning_rate = (init_lr * warmup_percent_done)
is_warmup = tf.cast((global_steps_int < warmup_steps_int), tf.float32)
learning_rate = (((1.0 - is_warmup) * learning_rate) + (is_warmup * warmup_learning_rate))
optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=0.01, beta_1=0.9, beta_2=0.999, epsilon=1e-06, exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=global_step)
new_global_step = (global_step + 1)
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return (train_op, learning_rate) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.