code stringlengths 101 5.91M |
|---|
class MPClassFuncOnDemand():
def __init__(self, class_handle, class_func_name, **class_kwargs):
self.class_handle = class_handle
self.class_func_name = class_func_name
self.class_kwargs = class_kwargs
self.class_func = None
self.s2c = multiprocessing.Queue()
self.c2s = multiprocessing.Queue()
self.lock = multiprocessing.Lock()
io.add_process_messages_callback(self.io_callback)
def io_callback(self):
while (not self.c2s.empty()):
(func_args, func_kwargs) = self.c2s.get()
if (self.class_func is None):
self.class_func = getattr(self.class_handle(**self.class_kwargs), self.class_func_name)
self.s2c.put(self.class_func(*func_args, **func_kwargs))
def __call__(self, *args, **kwargs):
with self.lock:
self.c2s.put((args, kwargs))
return self.s2c.get()
def __getstate__(self):
return {'s2c': self.s2c, 'c2s': self.c2s, 'lock': self.lock} |
def nondefault_trainer_args(opt):
parser = argparse.ArgumentParser()
parser = Trainer.add_argparse_args(parser)
args = parser.parse_args([])
return sorted((k for k in vars(args) if (getattr(opt, k) != getattr(args, k)))) |
class FixedBernoulli(torch.distributions.Bernoulli):
def log_probs(self, actions):
return super.log_prob(actions).view(actions.size(0), (- 1)).sum((- 1)).unsqueeze((- 1))
def entropy(self):
return super().entropy().sum((- 1))
def mode(self):
return torch.gt(self.probs, 0.5).float() |
class ModelParallelTransformerDecoder(TransformerDecoder):
def build_decoder_layer(self, args, no_encoder_attn=False):
return ModelParallelTransformerDecoderLayer(args, no_encoder_attn)
def output_layer(self, features, **kwargs):
if (not self.share_input_output_embed):
raise NotImplementedError('Model parallel training currently requires --share-decoder-input-output-embed')
features = copy_to_model_parallel_region(features)
x = self.output_projection(features)
if (getattr(self.args, 'criterion') != 'vocab_parallel_cross_entropy'):
x = gather_from_model_parallel_region(x).contiguous()
return x |
def closure_sgd():
global i, out_avg, sgd_out, sgd_expm_out, check_point
net_input = net_input_saved
out = net(net_input)
if (out_avg is None):
out_avg = out.detach()
else:
out_avg = ((out_avg * exp_weight) + (out.detach() * (1 - exp_weight)))
total_loss = mse((out * mask_var), (img_var * mask_var))
total_loss.backward()
out_np = out.detach().cpu().numpy()[0]
out_avg_np = out_avg.detach().cpu().numpy()[0]
psrn_gt = compare_psnr(img_np, out_np)
psrn_gt_sm = compare_psnr(img_np, out_avg_np)
sgd_psnr_list.append(psrn_gt)
sgd_expm_psnr_list.append(psrn_gt_sm)
if (i == (check_point - 1)):
sgd_out = out_np
sgd_expm_out = out_avg_np
if ((i % show_every) == 0):
np_plot(out.detach().cpu().numpy()[0], ('Iter: %d; gt %.2f' % (i, psrn_gt)))
i += 1
return total_loss |
def check_k0(freqs, k0=None, rtol=0.01, atol=1e-07):
k0 = (k0 if (k0 is not None) else get_k0(freqs))
df = (freqs[1] - freqs[0])
f0 = (k0 * df)
assert (abs((f0 - freqs[0])) < ((rtol * df) + atol)) |
def ortho_weight(ndim):
W = numpy.random.randn(ndim, ndim)
(u, s, v) = numpy.linalg.svd(W)
return u.astype('float32') |
class FuseMatMulRequantizeDequantizeNewAPITransformer(GraphRewriterBase):
def __init__(self, model, device='cpu'):
super().__init__(model)
self.device = device
self.graph_analyzer = GraphAnalyzer()
self.graph_analyzer.graph = self.model
self.graph_info = self.graph_analyzer.parse_graph()
self.eps = 1e-05
def do_transformation(self):
fuse_pattern = [['_QuantizedMatMul'], ['Requantize', 'RequantizePerChannel'], ['Dequantize'], ('Softmax',)]
uint8_type = dtypes.quint8.as_datatype_enum
int8_type = dtypes.qint8.as_datatype_enum
float32_type = dtypes.float32.as_datatype_enum
qint32_type = dtypes.qint32.as_datatype_enum
target_nodes = self.graph_analyzer.query_fusion_pattern_nodes(fuse_pattern)
for i in target_nodes:
quantized_node_name = i[0]
quantized_node = self.graph_info[quantized_node_name].node
requantize_node_name = i[1]
requantize_node = self.graph_info[requantize_node_name].node
requested_output_min_name = requantize_node.input[3]
requested_output_max_name = requantize_node.input[4]
deq_node_name = i[2]
quantized_node_op = i[(- 1)][0]
attr_fused_ops = ''.join((x for x in quantized_node.attr['fused_ops'].SerializeToString().decode('UTF-8', 'ignore').strip() if x.isprintable()))
if ('BiasAddAdd' not in attr_fused_ops):
continue
new_node = node_def_pb2.NodeDef()
new_node.op = quantized_node_op
new_node.name = requantize_node_name
for (_, value) in enumerate(quantized_node.input):
new_node.input.append(value)
new_node.input.append(requested_output_min_name)
new_node.input.append(requested_output_max_name)
if ('T1' in quantized_node.attr):
new_node.attr['T1'].CopyFrom(quantized_node.attr['T1'])
if ('T2' in quantized_node.attr):
new_node.attr['T2'].CopyFrom(quantized_node.attr['T2'])
if ('U' in quantized_node.attr):
new_node.attr['U'].CopyFrom(quantized_node.attr['U'])
if ('transpose_b' in quantized_node.attr):
new_node.attr['transpose_b'].CopyFrom(quantized_node.attr['transpose_b'])
if ('transpose_a' in quantized_node.attr):
new_node.attr['transpose_a'].CopyFrom(quantized_node.attr['transpose_a'])
if ('input_quant_mode' in quantized_node.attr):
new_node.attr['input_quant_mode'].CopyFrom(quantized_node.attr['input_quant_mode'])
if ('output_quant_mode' in quantized_node.attr):
new_node.attr['output_quant_mode'].CopyFrom(quantized_node.attr['output_quant_mode'])
top_node_name = Helper.node_name_from_input(quantized_node.input[0])
max_filter_node = None
min_filter_node = None
if (':2' not in new_node.input[7]):
max_filter_node = self.graph_info[new_node.input[7]].node
if (':1' not in new_node.input[6]):
min_filter_node = self.graph_info[new_node.input[6]].node
last_node = self.graph_info[new_node.input[0]].node
weight_node = self.graph_info[Helper.node_name_from_input(new_node.input[1])].node
bias_node = self.graph_info[Helper.node_name_from_input(new_node.input[2])].node
if (not (last_node.op == 'QuantizedConcatV2')):
max_input_node = self.graph_info[last_node.input[(- 1)]].node
min_input_node = self.graph_info[last_node.input[(- 2)]].node
type_bias = float32_type
if ((not (last_node.op == 'QuantizedConcatV2')) and (max_input_node.op == 'Enter')):
min_input_parent_name = Helper.node_name_from_input(min_input_node.input[0])
max_input_parent_name = Helper.node_name_from_input(max_input_node.input[0])
min_input_parent_node = self.graph_info[min_input_parent_name].node
max_input_parent_node = self.graph_info[max_input_parent_name].node
if ((min_input_parent_node.op != 'Const') or (max_input_parent_node.op != 'Const')):
continue
min_input_node = min_input_parent_node
max_input_node = max_input_parent_node
if (max_filter_node and min_filter_node and (max_filter_node.op == 'Enter')):
min_filter_parent_name = Helper.node_name_from_input(min_filter_node.input[0])
max_filter_parent_name = Helper.node_name_from_input(max_filter_node.input[0])
min_filter_parent_node = self.graph_info[min_filter_parent_name].node
max_filter_parent_node = self.graph_info[max_filter_parent_name].node
if ((min_filter_parent_node.op != 'Const') or (max_filter_parent_node.op != 'Const')):
continue
min_filter_node = min_filter_parent_node
max_filter_node = max_filter_parent_node
if (weight_node.op == 'Enter'):
weight_parent_name = Helper.node_name_from_input(weight_node.input[0])
weight_parent_node = self.graph_info[weight_parent_name].node
if (weight_parent_node.op != 'Const'):
continue
weight_node = weight_parent_node
bias_enter_node = None
if (bias_node.op == 'Enter'):
bias_enter_node = bias_node
bias_parent_name = Helper.node_name_from_input(bias_node.input[0])
bias_parent_node = self.graph_info[bias_parent_name].node
if (bias_parent_node.op != 'Const'):
continue
bias_node = bias_parent_node
if (max_filter_node and min_filter_node and (max_filter_node.op == 'Const') and (weight_node.op == 'Const') and (not (last_node.op == 'QuantizedConcatV2'))):
min_input_value = min_input_node.attr['value'].tensor.float_val[0]
max_input_value = max_input_node.attr['value'].tensor.float_val[0]
if (requantize_node.op.find('PerChannel') != (- 1)):
max_filter_tensor = tensor_util.MakeNdarray(max_filter_node.attr['value'].tensor)
min_filter_tensor = tensor_util.MakeNdarray(min_filter_node.attr['value'].tensor)
else:
max_filter_value = max_filter_node.attr['value'].tensor.float_val[0]
min_filter_value = min_filter_node.attr['value'].tensor.float_val[0]
weights_tensor = tensor_util.MakeNdarray(weight_node.attr['value'].tensor)
bias_tensor = tensor_util.MakeNdarray(bias_node.attr['value'].tensor)
is_min_first = bool((quantized_node.attr['input_quant_mode'].s == b'MIN_FIRST'))
input_range = ((max_input_value - min_input_value) if is_min_first else max(abs(max_input_value), abs(min_input_value)))
if ((- self.eps) <= input_range <= self.eps):
input_range += self.eps
if ((- self.eps) <= (max_input_value - min_input_value) <= self.eps):
max_input_value += self.eps
if (requantize_node.op.find('PerChannel') != (- 1)):
int32_bias = Helper.generate_int32_bias_for_matmul_per_channel(bias_tensor, weights_tensor, max_input_value, min_input_value, max_filter_tensor, min_filter_tensor)
else:
int32_bias = Helper.generate_int32_bias_for_matmul(bias_tensor, weights_tensor, input_range, max_input_value, min_input_value, max_filter_value, min_filter_value)
bias_node.attr['dtype'].CopyFrom(attr_value_pb2.AttrValue(type=(float32_type if (self.device == 'gpu') else qint32_type)))
bias_node.attr['value'].CopyFrom(attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto((bias_tensor if (self.device == 'gpu') else int32_bias), (dtypes.float32 if (self.device == 'gpu') else dtypes.int32), bias_tensor.shape)))
bias_node.attr['value'].tensor.dtype = (float32_type if (self.device == 'gpu') else qint32_type)
type_bias = (float32_type if (self.device == 'gpu') else qint32_type)
if bias_enter_node:
bias_enter_node.attr['T'].CopyFrom(attr_value_pb2.AttrValue(type=(float32_type if (self.device == 'gpu') else qint32_type)))
else:
type_bias = float32_type
new_node.attr['Tbias'].CopyFrom(attr_value_pb2.AttrValue(type=type_bias))
Helper.set_attr_string_list(new_node, 'fused_ops', [b'BiasAdd', b'Add', b'Dequantize'])
Helper.set_attr_type_list(new_node, 'Thost_inputs', [uint8_type, int8_type, type_bias, float32_type, float32_type, float32_type, float32_type, float32_type, float32_type, float32_type])
Helper.set_attr_type_list(new_node, 'Thost_outputs', [float32_type])
new_node.attr['Tout'].CopyFrom(attr_value_pb2.AttrValue(type=float32_type))
self.graph_analyzer.remove_node(requantize_node_name)
if self.graph_info[deq_node_name].outputs:
self.graph_analyzer.replace_single_node(new_node, [top_node_name], quantized_node_name, self.graph_info[deq_node_name].outputs, deq_node_name)
self.graph_analyzer.remove_node(deq_node_name)
else:
self.graph_analyzer.remove_node(deq_node_name)
new_node.name = deq_node_name
self.graph_analyzer.replace_single_node(new_node, [top_node_name], quantized_node_name, [], deq_node_name)
self.graph_analyzer.remove_node(quantized_node_name)
return self.graph_analyzer.dump_graph() |
class DiagonalGaussianDensity(Density):
def __init__(self, mean, stddev, num_fixed_samples=0):
super().__init__()
assert (mean.shape == stddev.shape)
self.register_buffer('mean', mean)
self.register_buffer('stddev', stddev)
if (num_fixed_samples > 0):
self.register_buffer('_fixed_samples', self.sample(num_fixed_samples))
def shape(self):
return self.mean.shape
def p_parameters(self):
return []
def q_parameters(self):
return []
def _fix_random_u(self):
return (self, self.sample(num_samples=1)[0])
def fix_u(self, u):
assert (not u)
return self
def _elbo(self, z, detach_q_params, detach_q_samples):
log_prob = diagonal_gaussian_log_prob(z, self.mean.expand_as(z), self.stddev.expand_as(z))
return {'log-p': log_prob, 'log-q': z.new_zeros((z.shape[0], 1)), 'z': z}
def _sample(self, num_samples):
return diagonal_gaussian_sample(self.mean.expand(num_samples, *self.shape), self.stddev.expand(num_samples, *self.shape))
def _fixed_sample(self, noise):
return (noise if (noise is not None) else self._fixed_samples) |
def load_data(tr: Training, verbose=0):
list_train_ds = tf.data.Dataset.list_files([str((((tr.train_data_dir + '/') + ds) + '/*/images/*')) for ds in tr.train_datasets])
list_test_ds = tf.data.Dataset.list_files([str((((tr.test_data_dir + '/') + ds) + '/*/images/*')) for ds in tr.test_datasets])
train_data = dataloader.dataloader(tr.train_data_dir, tr.train_datasets, tr.hyperparameters.POLICY)
test_data = dataloader.dataloader(tr.test_data_dir, tr.test_datasets, tr.hyperparameters.POLICY)
if verbose:
for f in list_train_ds.take(5):
print(f.numpy())
print()
for f in list_test_ds.take(5):
print(f.numpy())
print()
print(('Number of train samples: %d' % len(train_data.labels)))
print(('Number of test samples: %d' % len(test_data.labels)))
def process_train_path(file_path):
(cmd_input, label) = train_data.get_label(tf.strings.regex_replace(file_path, '[/\\\\]', '/'))
img = utils.load_img(file_path, tr.hyperparameters.IS_CROP)
img = data_augmentation.augment_img(img)
if tr.hyperparameters.FLIP_AUG:
(img, cmd_input, label) = data_augmentation.flip_sample(img, cmd_input, label)
if tr.hyperparameters.CMD_AUG:
cmd_input = data_augmentation.augment_cmd(cmd_input)
return ((img, cmd_input), label)
def process_test_path(file_path):
(cmd_input, label) = test_data.get_label(tf.strings.regex_replace(file_path, '[/\\\\]', '/'))
img = utils.load_img(file_path, tr.hyperparameters.IS_CROP)
return ((img, cmd_input), label)
labeled_ds = list_train_ds.map(process_train_path, num_parallel_calls=4)
for ((image, cmd_input), label) in labeled_ds.take(1):
shape = image.numpy().shape
tr.NETWORK_IMG_HEIGHT = shape[0]
tr.NETWORK_IMG_WIDTH = shape[1]
print('Image shape: ', shape)
print('Command: ', cmd_input.numpy())
print('Label: ', label.numpy())
tr.train_ds = utils.prepare_for_training(ds=labeled_ds, batch_sz=tr.hyperparameters.TRAIN_BATCH_SIZE, shuffle_buffer_sz=(100 * tr.hyperparameters.TRAIN_BATCH_SIZE), prefetch_buffer_sz=(10 * tr.hyperparameters.TRAIN_BATCH_SIZE))
test_ds = list_test_ds.map(process_test_path, num_parallel_calls=4)
test_ds = test_ds.batch(tr.hyperparameters.TEST_BATCH_SIZE)
tr.test_ds = test_ds.prefetch(buffer_size=(10 * tr.hyperparameters.TRAIN_BATCH_SIZE)) |
_model
def vit_base_r26_s32_224(pretrained=False, **kwargs):
backbone = _resnetv2((2, 2, 2, 2), **kwargs)
model_kwargs = dict(embed_dim=768, depth=12, num_heads=12, **kwargs)
model = _create_vision_transformer_hybrid('vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **model_kwargs)
return model |
def main():
args = init_args()
(x_val, y_val, z_val) = blh2xyz(args.b, args.l, args.h)
message = 'X: {:.5f}, Y: {:.5f}, Z: {:.5f}'
print(message.format(x_val, y_val, z_val))
return 0 |
def test_get_metrics_returns_dict():
(ground_truth, retrieved) = return_ground_incorrect_retrievals()
assert isinstance(get_all_metrics(ground_truth, retrieved), dict)
assert (len(get_all_metrics(ground_truth, retrieved).values()) == 3) |
_module()
class Rotate(GeomTransform):
def __init__(self, prob: float=1.0, level: Optional[int]=None, min_mag: float=0.0, max_mag: float=30.0, reversal_prob: float=0.5, img_border_value: Union[(int, float, tuple)]=128, mask_border_value: int=0, seg_ignore_label: int=255, interpolation: str='bilinear') -> None:
assert (0.0 <= min_mag <= 180.0), f'min_mag for Rotate should be in range [0,180], got {min_mag}.'
assert (0.0 <= max_mag <= 180.0), f'max_mag for Rotate should be in range [0,180], got {max_mag}.'
super().__init__(prob=prob, level=level, min_mag=min_mag, max_mag=max_mag, reversal_prob=reversal_prob, img_border_value=img_border_value, mask_border_value=mask_border_value, seg_ignore_label=seg_ignore_label, interpolation=interpolation)
def _get_homography_matrix(self, results: dict, mag: float) -> np.ndarray:
img_shape = results['img_shape']
center = (((img_shape[1] - 1) * 0.5), ((img_shape[0] - 1) * 0.5))
cv2_rotation_matrix = cv2.getRotationMatrix2D(center, (- mag), 1.0)
return np.concatenate([cv2_rotation_matrix, np.array([0, 0, 1]).reshape((1, 3))]).astype(np.float32)
def _transform_img(self, results: dict, mag: float) -> None:
results['img'] = mmcv.imrotate(results['img'], mag, border_value=self.img_border_value, interpolation=self.interpolation)
def _transform_masks(self, results: dict, mag: float) -> None:
results['gt_masks'] = results['gt_masks'].rotate(results['img_shape'], mag, border_value=self.mask_border_value, interpolation=self.interpolation)
def _transform_seg(self, results: dict, mag: float) -> None:
results['gt_seg_map'] = mmcv.imrotate(results['gt_seg_map'], mag, border_value=self.seg_ignore_label, interpolation='nearest') |
class DDPMMeanLoss(nn.Module):
def __init__(self, *, reduce_mean: bool=True, likelihood_weighting: bool=False, eps_weighting: bool=False):
super().__init__()
self.reduce_mean = reduce_mean
self.likelihood_weighting = likelihood_weighting
self.eps_weighting = eps_weighting
def forward(self, x, model, sde: VPSDE, t, **model_kwargs):
assert isinstance(sde, VPSDE), 'DDPM training only works for VPSDEs.'
noise = torch.randn_like(x)
perturbed_data = ((x * sde.sqrt_alphas_cumprod.to(x.device)[(t, None, None, None)]) + (sde.sqrt_1m_alphas_cumprod.to(x.device)[(t, None, None, None)] * noise))
mu = ((perturbed_data - ((sde.discrete_betas.to(x.device)[(t, None, None, None)] * noise) / sde.sqrt_1m_alphas_cumprod.to(x.device)[(t, None, None, None)])) / torch.sqrt(sde.alphas.to(x.device)[(t, None, None, None)]))
mu_pred = model(perturbed_data, t, **model_kwargs)
if self.likelihood_weighting:
weighting = (1.0 / (2.0 * sde.discrete_betas.to(x.device)[(t, None, None, None)]))
elif self.eps_weighting:
weighting = ((sde.alphas.to(x.device)[(t, None, None, None)] * (1.0 - sde.alphas_cumprod.to(x.device)[(t, None, None, None)])) / (sde.discrete_betas.to(x.device)[(t, None, None, None)] ** 2.0))
else:
weighting = 1.0
losses = (torch.square((mu_pred - mu)) * weighting)
if self.reduce_mean:
loss = torch.mean(losses)
else:
loss = torch.mean(torch.sum(losses.reshape(losses.shape[0], (- 1)), dim=(- 1)))
return loss |
def to_str(segment):
assert (len(segment) == 3)
return '[{0:.3f}, {1:.3f}, {2}]'.format(segment[0], segment[1], segment[2]) |
def train(args, net, loss_function, data_iterator):
ctx = args.ctx[0]
local_cfg = cfg.STATIC_GRAPH
net.initialize(init=mx.init.Xavier(magnitude=3), ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), local_cfg.MODEL.TRAIN.OPTIMIZER, {'learning_rate': local_cfg.MODEL.TRAIN.LR, 'wd': local_cfg.MODEL.TRAIN.WD})
train_loss_logger = MetricLogger(['iter', 'loss'], ['%d', '%.4f'], os.path.join(args.save_dir, ('train_loss%d.csv' % args.save_id)))
valid_loss_logger = MetricLogger(['iter', 'loss', 'f1', 'acc', 'is_best'], ['%d', '%.4f', '%.4f', '%.4f', '%d'], os.path.join(args.save_dir, ('valid_loss%d.csv' % args.save_id)))
test_loss_logger = MetricLogger(['iter', 'loss', 'f1', 'acc'], ['%d', '%.4f', '%.4f', '%.4f'], os.path.join(args.save_dir, ('test_loss%d.csv' % args.save_id)))
best_valid_f1 = 0
best_valid_iter_info = []
best_test_iter_info = []
no_better_valid = 0
iter_id = 1
epoch = 1
train_moving_avg_loss = 0.0
data_iterator.begin_epoch('train')
for iter in range(1, local_cfg.MODEL.TRAIN.MAX_ITER):
if data_iterator.epoch_finished:
print(('Epoch %d finished! It has %d iterations.' % (epoch, iter_id)))
data_iterator.begin_epoch('train')
iter_id = 1
epoch += 1
else:
iter_id += 1
(layer0_features_nd, end_points_l, indptr_l, indices_in_merged_l, labels_nd, node_ids_l) = data_iterator.sample()
with mx.autograd.record():
if net._output_inner_result:
(logits, gate_l, sharpness_l, attend_weights_wo_gate_l) = net(layer0_features_nd, end_points_l, indptr_l, indices_in_merged_l)
else:
logits = net(layer0_features_nd, end_points_l, indptr_l, indices_in_merged_l)
loss = loss_function(logits, labels_nd)
loss = nd.mean(loss)
loss.backward()
if (iter == 1):
logging.info(('Total Param Number: %d' % gluon_total_param_num(net)))
gluon_log_net_info(net, save_path=os.path.join(args.save_dir, ('net_info%d.txt' % args.save_id)))
if (local_cfg.MODEL.TRAIN.GRAD_CLIP <= 0):
gnorm = get_global_norm([v.grad() for v in net.collect_params().values()])
else:
gnorm = gluon.utils.clip_global_norm([v.grad() for v in net.collect_params().values()], max_norm=local_cfg.MODEL.TRAIN.GRAD_CLIP)
trainer.step(batch_size=1)
iter_train_loss = loss.asscalar()
train_moving_avg_loss += iter_train_loss
logging.info(('[iter=%d]: loss=%.4f, gnorm=%g' % (iter, iter_train_loss, gnorm)))
train_loss_logger.log(iter=iter, loss=iter_train_loss)
if ((iter % local_cfg.MODEL.TRAIN.VALID_ITER) == 0):
(valid_loss, valid_f1, valid_accuracy) = eval_classification(net=net, loss_function=loss_function, data_iterator=data_iterator, num_class=data_iterator.num_class, mode='valid')
logging.info(('Iter %d, Epoch %d,: train_moving_loss=%.4f, valid loss=%.4f, f1=%.4f, accuracy=%.4f' % (iter, epoch, (train_moving_avg_loss / local_cfg.MODEL.TRAIN.VALID_ITER), valid_loss, valid_f1, valid_accuracy)))
train_moving_avg_loss = 0.0
if (valid_f1 > best_valid_f1):
logging.info('> Best Iter')
is_best = True
best_valid_f1 = valid_f1
best_iter = iter
best_valid_iter_info = [best_iter, valid_loss, valid_f1, valid_accuracy]
no_better_valid = 0
net.save_params(filename=os.path.join(args.save_dir, ('best_valid%d.params' % args.save_id)))
(test_loss, test_f1, test_accuracy) = eval_classification(net=net, loss_function=loss_function, data_iterator=data_iterator, num_class=data_iterator.num_class, mode='test')
test_loss_logger.log(iter=iter, loss=test_loss, f1=test_f1, acc=test_accuracy)
best_test_iter_info = [best_iter, test_loss, test_f1, test_accuracy]
logging.info(('Iter %d, Epoch %d: test loss=%.4f, f1=%.4f, accuracy=%.4f' % (iter, epoch, test_loss, test_f1, test_accuracy)))
else:
is_best = False
no_better_valid += 1
if (no_better_valid > local_cfg.MODEL.TRAIN.EARLY_STOPPING_PATIENCE):
logging.info('Early stopping threshold reached. Stop training.')
valid_loss_logger.log(iter=iter, loss=valid_loss, f1=valid_f1, acc=valid_accuracy, is_best=is_best)
break
elif (no_better_valid > local_cfg.MODEL.TRAIN.DECAY_PATIENCE):
new_lr = max((trainer.learning_rate * local_cfg.MODEL.TRAIN.LR_DECAY_FACTOR), local_cfg.MODEL.TRAIN.MIN_LR)
if (new_lr < trainer.learning_rate):
logging.info(('Change the LR to %g' % new_lr))
trainer.set_learning_rate(new_lr)
no_better_valid = 0
valid_loss_logger.log(iter=iter, loss=valid_loss, f1=valid_f1, acc=valid_accuracy, is_best=is_best)
logging.info(('Best Valid: [Iter, Loss, F1, ACC] = %s' % str(best_valid_iter_info)))
logging.info(('Best Test : [Iter, Loss, F1, ACC] = %s' % str(best_test_iter_info)))
valid_loss_logger.log(iter=best_valid_iter_info[0], loss=best_valid_iter_info[1], f1=best_valid_iter_info[2], acc=best_valid_iter_info[3], is_best=True)
test_loss_logger.log(iter=best_test_iter_info[0], loss=best_test_iter_info[1], f1=best_test_iter_info[2], acc=best_test_iter_info[3])
if ((args.emails is not None) and (len(args.emails) > 0)):
for email_address in args.emails.split(','):
send_msg(title=os.path.basename(args.save_dir), text=((((('Test: [Iter, Loss, F1, ACC] = %s\n' % str(best_test_iter_info)) + ('Valid: [Iter, Loss, F1, ACC] = %s\n' % str(best_valid_iter_info))) + ('Save Dir: %s\n' % args.save_dir)) + '\nConfig:\n') + ordered_dump()), dst_address=email_address)
return |
class TextualResEncoder(nn.Module):
def __init__(self, input_nc=3, ngf=32, z_nc=256, img_f=256, L=6, layers=5, norm='none', activation='ReLU', use_spect=True, use_coord=False, image_dim=256, text_dim=256, multi_peak=True, pool_attention='max'):
super(TextualResEncoder, self).__init__()
self.layers = layers
self.z_nc = z_nc
self.L = L
norm_layer = get_norm_layer(norm_type=norm)
nonlinearity = get_nonlinearity_layer(activation_type=activation)
self.block0 = ResBlockEncoderOptimized(input_nc, ngf, norm_layer, nonlinearity, use_spect, use_coord)
self.word_attention = ImageTextAttention(idf=image_dim, cdf=text_dim, multi_peak=multi_peak, pooling=pool_attention)
mult = 1
for i in range((layers - 1)):
mult_prev = mult
mult = min((2 ** (i + 2)), (img_f // ngf))
block = ResBlock((ngf * mult_prev), (ngf * mult), (ngf * mult_prev), norm_layer, nonlinearity, 'down', use_spect, use_coord)
setattr(self, ('encoder' + str(i)), block)
for i in range(self.L):
block = ResBlock((ngf * mult), (ngf * mult), (ngf * mult), norm_layer, nonlinearity, 'none', use_spect, use_coord)
setattr(self, ('infer_prior' + str(i)), block)
self.posterior = ResBlock((2 * text_dim), (2 * z_nc), ((ngf * mult) * 2), norm_layer, nonlinearity, 'none', use_spect, use_coord)
self.prior = ResBlock((2 * text_dim), (2 * z_nc), ((ngf * mult) * 2), norm_layer, nonlinearity, 'none', use_spect, use_coord)
def forward(self, img_m, sentence_embedding, word_embeddings, text_mask, image_mask, img_c=None):
if (type(img_c) != type(None)):
img = torch.cat([img_m, img_c], dim=0)
else:
img = img_m
out = self.block0(img)
feature = [out]
for i in range((self.layers - 1)):
model = getattr(self, ('encoder' + str(i)))
out = model(out)
feature.append(out)
image_mask = task.scale_img(image_mask, size=[feature[(- 1)].size(2), feature[(- 1)].size(3)])
if (image_mask.size(1) == 3):
image_mask = image_mask.chunk(3, dim=1)[0]
if (type(img_c) != type(None)):
(f_m_g, f_m_rec) = feature[(- 1)].chunk(2)
img_mask_g = image_mask
img_mask_rec = (1 - img_mask_g)
weighted_word_embedding_rec = self.word_attention(f_m_rec, word_embeddings, mask=text_mask, image_mask=img_mask_rec, inverse_attention=False)
weighted_word_embedding_g = self.word_attention(f_m_g, word_embeddings, mask=text_mask, image_mask=img_mask_g, inverse_attention=True)
weighted_word_embedding = torch.cat([weighted_word_embedding_g, weighted_word_embedding_rec])
(distribution, f_text) = self.two_paths(out, sentence_embedding, weighted_word_embedding)
return (distribution, feature, f_text)
else:
f_m = feature[(- 1)]
weighted_word_embedding = self.word_attention(f_m, word_embeddings, mask=text_mask, image_mask=image_mask, inverse_attention=True)
(distribution, f_text) = self.one_path(out, sentence_embedding, weighted_word_embedding)
return (distribution, feature, f_text)
def one_path(self, f_in, sentence_embedding, weighted_word_embedding):
f_m = f_in
distribution = []
for i in range(self.L):
infer_prior = getattr(self, ('infer_prior' + str(i)))
f_m = infer_prior(f_m)
(ix, iw) = (f_m.size(2), f_m.size(3))
sentence_dim = sentence_embedding.size(1)
sentence_embedding_replication = sentence_embedding.view((- 1), sentence_dim, 1, 1).repeat(1, 1, ix, iw)
f_text = torch.cat([sentence_embedding_replication, weighted_word_embedding], dim=1)
o = self.prior(f_text)
(q_mu, q_std) = torch.split(o, self.z_nc, dim=1)
distribution.append([q_mu, F.softplus(q_std)])
return (distribution, f_text)
def two_paths(self, f_in, sentence_embedding, weighted_word_embedding):
(f_m, f_c) = f_in.chunk(2)
(weighted_word_embedding_m, weighted_word_embedding_c) = weighted_word_embedding.chunk(2)
distributions = []
(ix, iw) = (f_c.size(2), f_c.size(3))
sentence_dim = sentence_embedding.size(1)
sentence_embedding_replication = sentence_embedding.view((- 1), sentence_dim, 1, 1).repeat(1, 1, ix, iw)
f_text_c = torch.cat([sentence_embedding_replication, weighted_word_embedding_c], dim=1)
o = self.posterior(f_text_c)
(p_mu, p_std) = torch.split(o, self.z_nc, dim=1)
(distribution, f_text_m) = self.one_path(f_m, sentence_embedding, weighted_word_embedding_m)
distributions.append([p_mu, F.softplus(p_std), distribution[0][0], distribution[0][1]])
return (distributions, torch.cat([f_text_m, f_text_c], dim=0)) |
_model
def fbnetv3_d(pretrained=False, **kwargs):
model = _gen_fbnetv3('fbnetv3_d', pretrained=pretrained, **kwargs)
return model |
def data_provider(dataset_name, train_data_paths, valid_data_paths, batch_size, img_width, is_training=True):
if (dataset_name not in datasets_map):
raise ValueError(('Name of dataset unknown %s' % dataset_name))
train_data_list = train_data_paths.split(',')
valid_data_list = valid_data_paths.split(',')
if (dataset_name == 'mnist'):
test_input_param = {'paths': valid_data_list, 'minibatch_size': batch_size, 'input_data_type': 'float32', 'is_output_sequence': True, 'name': (dataset_name + 'test iterator')}
test_input_handle = datasets_map[dataset_name].InputHandle(test_input_param)
test_input_handle.begin(do_shuffle=False)
if is_training:
train_input_param = {'paths': train_data_list, 'minibatch_size': batch_size, 'input_data_type': 'float32', 'is_output_sequence': True, 'name': (dataset_name + ' train iterator')}
train_input_handle = datasets_map[dataset_name].InputHandle(train_input_param)
train_input_handle.begin(do_shuffle=True)
return (train_input_handle, test_input_handle)
else:
return test_input_handle |
class TestMSAColumnGlobalAttention(unittest.TestCase):
def test_shape(self):
batch_size = consts.batch_size
n_seq = consts.n_seq
n_res = consts.n_res
c_m = consts.c_m
c = 44
no_heads = 4
msagca = MSAColumnGlobalAttention(c_m, c, no_heads)
x = torch.rand((batch_size, n_seq, n_res, c_m))
shape_before = x.shape
x = msagca(x, chunk_size=None)
shape_after = x.shape
self.assertTrue((shape_before == shape_after))
_utils.skip_unless_alphafold_installed()
def test_compare(self):
def run_msa_col_global_att(msa_act, msa_mask):
config = compare_utils.get_alphafold_config()
c_e = config.model.embeddings_and_evoformer.evoformer
msa_col = alphafold.model.modules.MSAColumnGlobalAttention(c_e.msa_column_attention, config.model.global_config, name='msa_column_global_attention')
act = msa_col(msa_act=msa_act, msa_mask=msa_mask)
return act
f = hk.transform(run_msa_col_global_att)
n_res = consts.n_res
n_seq = consts.n_seq
c_e = consts.c_e
msa_act = np.random.rand(n_seq, n_res, c_e)
msa_mask = np.random.randint(low=0, high=2, size=(n_seq, n_res))
params = compare_utils.fetch_alphafold_module_weights(('alphafold/alphafold_iteration/evoformer/extra_msa_stack/' + 'msa_column_global_attention'))
params = tree_map((lambda n: n[0]), params, jax.numpy.DeviceArray)
out_gt = f.apply(params, None, msa_act, msa_mask).block_until_ready()
out_gt = torch.as_tensor(np.array(out_gt.block_until_ready()))
model = compare_utils.get_global_pretrained_openfold()
out_repro = model.extra_msa_stack.blocks[0].msa_att_col(torch.as_tensor(msa_act, dtype=torch.float32).cuda(), chunk_size=4, mask=torch.as_tensor(msa_mask, dtype=torch.float32).cuda()).cpu()
self.assertTrue(torch.max((torch.abs((out_gt - out_repro)) < consts.eps))) |
class DDIMScheduler(SchedulerMixin, ConfigMixin):
_compatibles = [e.name for e in KarrasDiffusionSchedulers]
order = 1
_to_config
def __init__(self, num_train_timesteps: int=1000, beta_start: float=0.0001, beta_end: float=0.02, beta_schedule: str='linear', trained_betas: Optional[Union[(np.ndarray, List[float])]]=None, clip_sample: bool=True, set_alpha_to_one: bool=True, steps_offset: int=0, prediction_type: str='epsilon', thresholding: bool=False, dynamic_thresholding_ratio: float=0.995, clip_sample_range: float=1.0, sample_max_value: float=1.0, timestep_spacing: str='leading', rescale_betas_zero_snr: bool=False):
if (trained_betas is not None):
self.betas = torch.tensor(trained_betas, dtype=torch.float32)
elif (beta_schedule == 'linear'):
self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32)
elif (beta_schedule == 'scaled_linear'):
self.betas = (torch.linspace((beta_start ** 0.5), (beta_end ** 0.5), num_train_timesteps, dtype=torch.float32) ** 2)
elif (beta_schedule == 'squaredcos_cap_v2'):
self.betas = betas_for_alpha_bar(num_train_timesteps)
else:
raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}')
if rescale_betas_zero_snr:
self.betas = rescale_zero_terminal_snr(self.betas)
self.alphas = (1.0 - self.betas)
self.alphas_cumprod = torch.cumprod(self.alphas, dim=0)
self.final_alpha_cumprod = (torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0])
self.init_noise_sigma = 1.0
self.num_inference_steps = None
self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::(- 1)].copy().astype(np.int64))
def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int]=None) -> torch.FloatTensor:
return sample
def _get_variance(self, timestep, prev_timestep):
alpha_prod_t = self.alphas_cumprod[timestep]
alpha_prod_t_prev = (self.alphas_cumprod[prev_timestep] if (prev_timestep >= 0) else self.final_alpha_cumprod)
beta_prod_t = (1 - alpha_prod_t)
beta_prod_t_prev = (1 - alpha_prod_t_prev)
variance = ((beta_prod_t_prev / beta_prod_t) * (1 - (alpha_prod_t / alpha_prod_t_prev)))
return variance
def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor:
dtype = sample.dtype
(batch_size, channels, *remaining_dims) = sample.shape
if (dtype not in (torch.float32, torch.float64)):
sample = sample.float()
sample = sample.reshape(batch_size, (channels * np.prod(remaining_dims)))
abs_sample = sample.abs()
s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1)
s = torch.clamp(s, min=1, max=self.config.sample_max_value)
s = s.unsqueeze(1)
sample = (torch.clamp(sample, (- s), s) / s)
sample = sample.reshape(batch_size, channels, *remaining_dims)
sample = sample.to(dtype)
return sample
def set_timesteps(self, num_inference_steps: int, device: Union[(str, torch.device)]=None):
if (num_inference_steps > self.config.num_train_timesteps):
raise ValueError(f'`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`: {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle maximal {self.config.num_train_timesteps} timesteps.')
self.num_inference_steps = num_inference_steps
if (self.config.timestep_spacing == 'linspace'):
timesteps = np.linspace(0, (self.config.num_train_timesteps - 1), num_inference_steps).round()[::(- 1)].copy().astype(np.int64)
elif (self.config.timestep_spacing == 'leading'):
step_ratio = (self.config.num_train_timesteps // self.num_inference_steps)
timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::(- 1)].copy().astype(np.int64)
timesteps += self.config.steps_offset
elif (self.config.timestep_spacing == 'trailing'):
step_ratio = (self.config.num_train_timesteps / self.num_inference_steps)
timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, (- step_ratio))).astype(np.int64)
timesteps -= 1
else:
raise ValueError(f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'.")
self.timesteps = torch.from_numpy(timesteps).to(device)
def step(self, model_output: torch.FloatTensor, timestep: int, sample: torch.FloatTensor, eta: float=0.0, use_clipped_model_output: bool=False, generator=None, variance_noise: Optional[torch.FloatTensor]=None, return_dict: bool=True) -> Union[(DDIMSchedulerOutput, Tuple)]:
if (self.num_inference_steps is None):
raise ValueError("Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler")
prev_timestep = (timestep - (self.config.num_train_timesteps // self.num_inference_steps))
alpha_prod_t = self.alphas_cumprod[timestep]
alpha_prod_t_prev = (self.alphas_cumprod[prev_timestep] if (prev_timestep >= 0) else self.final_alpha_cumprod)
beta_prod_t = (1 - alpha_prod_t)
if (self.config.prediction_type == 'epsilon'):
pred_original_sample = ((sample - ((beta_prod_t ** 0.5) * model_output)) / (alpha_prod_t ** 0.5))
pred_epsilon = model_output
elif (self.config.prediction_type == 'sample'):
pred_original_sample = model_output
pred_epsilon = ((sample - ((alpha_prod_t ** 0.5) * pred_original_sample)) / (beta_prod_t ** 0.5))
elif (self.config.prediction_type == 'v_prediction'):
pred_original_sample = (((alpha_prod_t ** 0.5) * sample) - ((beta_prod_t ** 0.5) * model_output))
pred_epsilon = (((alpha_prod_t ** 0.5) * model_output) + ((beta_prod_t ** 0.5) * sample))
else:
raise ValueError(f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or `v_prediction`')
if self.config.thresholding:
pred_original_sample = self._threshold_sample(pred_original_sample)
elif self.config.clip_sample:
pred_original_sample = pred_original_sample.clamp((- self.config.clip_sample_range), self.config.clip_sample_range)
variance = self._get_variance(timestep, prev_timestep)
std_dev_t = (eta * (variance ** 0.5))
if use_clipped_model_output:
pred_epsilon = ((sample - ((alpha_prod_t ** 0.5) * pred_original_sample)) / (beta_prod_t ** 0.5))
pred_sample_direction = ((((1 - alpha_prod_t_prev) - (std_dev_t ** 2)) ** 0.5) * pred_epsilon)
prev_sample = (((alpha_prod_t_prev ** 0.5) * pred_original_sample) + pred_sample_direction)
if (eta > 0):
if ((variance_noise is not None) and (generator is not None)):
raise ValueError('Cannot pass both generator and variance_noise. Please make sure that either `generator` or `variance_noise` stays `None`.')
if (variance_noise is None):
variance_noise = randn_tensor(model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype)
variance = (std_dev_t * variance_noise)
prev_sample = (prev_sample + variance)
if (not return_dict):
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
def add_noise(self, original_samples: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor) -> torch.FloatTensor:
alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
timesteps = timesteps.to(original_samples.device)
sqrt_alpha_prod = (alphas_cumprod[timesteps] ** 0.5)
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while (len(sqrt_alpha_prod.shape) < len(original_samples.shape)):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze((- 1))
sqrt_one_minus_alpha_prod = ((1 - alphas_cumprod[timesteps]) ** 0.5)
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while (len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape)):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze((- 1))
noisy_samples = ((sqrt_alpha_prod * original_samples) + (sqrt_one_minus_alpha_prod * noise))
return noisy_samples
def get_velocity(self, sample: torch.FloatTensor, noise: torch.FloatTensor, timesteps: torch.IntTensor) -> torch.FloatTensor:
alphas_cumprod = self.alphas_cumprod.to(device=sample.device, dtype=sample.dtype)
timesteps = timesteps.to(sample.device)
sqrt_alpha_prod = (alphas_cumprod[timesteps] ** 0.5)
sqrt_alpha_prod = sqrt_alpha_prod.flatten()
while (len(sqrt_alpha_prod.shape) < len(sample.shape)):
sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze((- 1))
sqrt_one_minus_alpha_prod = ((1 - alphas_cumprod[timesteps]) ** 0.5)
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
while (len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape)):
sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze((- 1))
velocity = ((sqrt_alpha_prod * noise) - (sqrt_one_minus_alpha_prod * sample))
return velocity
def __len__(self):
return self.config.num_train_timesteps |
def test_setr_up_head(capsys):
with pytest.raises(AssertionError):
SETRUPHead(num_classes=19, kernel_size=2)
with pytest.raises(AssertionError):
SETRUPHead(in_channels=(4, 4), channels=2, num_classes=19)
head = SETRUPHead(in_channels=4, channels=2, norm_cfg=dict(type='SyncBN'), num_classes=19, init_cfg=dict(type='Kaiming'))
super(SETRUPHead, head).init_weights()
img_size = (4, 4)
patch_size = 2
head = SETRUPHead(in_channels=4, channels=2, num_classes=19, num_convs=1, up_scale=4, kernel_size=1, norm_cfg=dict(type='BN'))
(h, w) = ((img_size[0] // patch_size), (img_size[1] // patch_size))
x = [torch.randn(1, 4, h, w)]
if torch.cuda.is_available():
(head, x) = to_cuda(head, x)
out = head(x)
assert (out.shape == (1, head.num_classes, (h * 4), (w * 4)))
x = [torch.randn(1, 4, h, (w * 2))]
if torch.cuda.is_available():
(head, x) = to_cuda(head, x)
out = head(x)
assert (out.shape == (1, head.num_classes, (h * 4), (w * 8))) |
_module()
class FCNHead(BaseDecodeHead):
def __init__(self, num_convs=2, kernel_size=3, concat_input=True, dilation=1, **kwargs):
assert ((num_convs >= 0) and (dilation > 0) and isinstance(dilation, int))
self.num_convs = num_convs
self.concat_input = concat_input
self.kernel_size = kernel_size
super(FCNHead, self).__init__(**kwargs)
if (num_convs == 0):
assert (self.in_channels == self.channels)
conv_padding = ((kernel_size // 2) * dilation)
convs = []
for i in range(num_convs):
_in_channels = (self.in_channels if (i == 0) else self.channels)
convs.append(ConvModule(_in_channels, self.channels, kernel_size=kernel_size, padding=conv_padding, dilation=dilation, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg))
if (len(convs) == 0):
self.convs = nn.Identity()
else:
self.convs = nn.Sequential(*convs)
if self.concat_input:
self.conv_cat = ConvModule((self.in_channels + self.channels), self.channels, kernel_size=kernel_size, padding=(kernel_size // 2), conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
def _forward_feature(self, inputs):
x = self._transform_inputs(inputs)
feats = self.convs(x)
if self.concat_input:
feats = self.conv_cat(torch.cat([x, feats], dim=1))
return feats
def forward(self, inputs):
output = self._forward_feature(inputs)
output = self.cls_seg(output)
return output |
def get_dataset_params(is_gcloud=False, tfrecord_dir=constants.NVIDIA_CELEBA_HQ_DATASET_PATH_GCLOUD, **kwargs):
if is_gcloud:
return CelebAHQDatasetParams(gcs_bucket=constants.GCLOUD_BUCKET, tfrecord_dir=tfrecord_dir, **kwargs)
else:
return CelebAHQDatasetParams(**kwargs) |
def bg_white(seg, raw, blur_level=3, gaussian=81):
seg = cv2.blur(seg, (blur_level, blur_level))
empty = np.ones_like(seg)
seg_bg = ((empty - seg) * 255)
seg_bg = cv2.GaussianBlur(seg_bg, (gaussian, gaussian), 0)
background_mask = cv2.cvtColor((255 - cv2.cvtColor(seg, cv2.COLOR_BGR2GRAY)), cv2.COLOR_GRAY2BGR)
masked_fg = ((raw * (1 / 255)) * (seg * (1 / 255)))
masked_bg = ((seg_bg * (1 / 255)) * (background_mask * (1 / 255)))
frame = np.uint8((cv2.add(masked_bg, masked_fg) * 255))
return frame |
def test_disaggregated_scores_are_determinstic():
no_aggregation = calculate_rouge(PRED, TGT, bootstrap_aggregation=False, rouge_keys=['rouge2', 'rougeL'])
assert isinstance(no_aggregation, defaultdict)
no_aggregation_just_r2 = calculate_rouge(PRED, TGT, bootstrap_aggregation=False, rouge_keys=['rouge2'])
assert (pd.DataFrame(no_aggregation['rouge2']).fmeasure.mean() == pd.DataFrame(no_aggregation_just_r2['rouge2']).fmeasure.mean()) |
def _find_my_group(grouped_ranks):
index = _find_my_group_index(grouped_ranks)
return grouped_ranks[index] |
_module()
class OBBRetinaHead(OBBAnchorHead):
def __init__(self, num_classes, in_channels, stacked_convs=4, conv_cfg=None, norm_cfg=None, anchor_generator=dict(type='AnchorGenerator', octave_base_scale=4, scales_per_octave=3, ratios=[0.5, 1.0, 2.0], strides=[8, 16, 32, 64, 128]), **kwargs):
self.stacked_convs = stacked_convs
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
super(OBBRetinaHead, self).__init__(num_classes, in_channels, bbox_type='obb', reg_dim=5, anchor_generator=anchor_generator, **kwargs)
def _init_layers(self):
self.relu = nn.ReLU(inplace=True)
self.cls_convs = nn.ModuleList()
self.reg_convs = nn.ModuleList()
for i in range(self.stacked_convs):
chn = (self.in_channels if (i == 0) else self.feat_channels)
self.cls_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.reg_convs.append(ConvModule(chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg))
self.retina_cls = nn.Conv2d(self.feat_channels, (self.num_anchors * self.cls_out_channels), 3, padding=1)
self.retina_reg = nn.Conv2d(self.feat_channels, (self.num_anchors * self.reg_dim), 3, padding=1)
def init_weights(self):
for m in self.cls_convs:
normal_init(m.conv, std=0.01)
for m in self.reg_convs:
normal_init(m.conv, std=0.01)
bias_cls = bias_init_with_prob(0.01)
normal_init(self.retina_cls, std=0.01, bias=bias_cls)
normal_init(self.retina_reg, std=0.01)
def forward_single(self, x):
cls_feat = x
reg_feat = x
for cls_conv in self.cls_convs:
cls_feat = cls_conv(cls_feat)
for reg_conv in self.reg_convs:
reg_feat = reg_conv(reg_feat)
cls_score = self.retina_cls(cls_feat)
bbox_pred = self.retina_reg(reg_feat)
return (cls_score, bbox_pred) |
class NfCfg():
depths: Tuple[(int, int, int, int)]
channels: Tuple[(int, int, int, int)]
alpha: float = 0.2
gamma_in_act: bool = False
stem_type: str = '3x3'
stem_chs: Optional[int] = None
group_size: Optional[int] = 8
attn_layer: Optional[str] = 'se'
attn_kwargs: dict = field(default_factory=(lambda : dict(reduction_ratio=0.5, divisor=8)))
attn_gain: float = 2.0
width_factor: float = 0.75
bottle_ratio: float = 2.25
efficient: bool = True
num_features: int = 1280
ch_div: int = 8
skipinit: bool = False
act_layer: str = 'silu' |
class PositionSortLauncher():
def __init__(self):
pass
def _no_opposite(self, handle):
return (not _has_opposite(self.env, handle))
def _by_cities(self):
self._city_n = ([(- 1)] * len(self.env.agents))
timer = 0
for (handle, agent) in enumerate(self.env.agents):
(cur_x, cur_y) = agent.initial_position
for prev in range(handle):
(x, y) = self.env.agents[prev].initial_position
if ((abs((x - cur_x)) < 10) and (abs((y - cur_y)) < 10)):
self._city_n[handle] = self._city_n[prev]
if (self._city_n[handle] == (- 1)):
self._city_n[handle] = timer
timer += 1
def reset(self, env):
self.env = env
self.order = [i for i in range(len(self.env.agents))]
self._by_cities()
self.order.sort(key=(lambda handle: self._city_n[handle]))
self.ready_to_depart = ([0] * len(self.env.agents))
self.cur_pos = 0
self.send_more = 0
self.max_city = (- 1)
def update(self):
for handle in range(len(self.env.agents)):
if (((self.env.agents[handle].status == RailAgentStatus.DONE_REMOVED) or self.env.obs_builder.deadlock_checker.is_deadlocked(handle)) and (self.ready_to_depart[handle] == 1)):
self.ready_to_depart[handle] = 2
self.send_more += 1
if (self.send_more >= (((- self.max_city) * 2) - 2)):
self.max_city += 1
for pos in range(len(self.env.agents)):
handle = self.order[pos]
if ((self.ready_to_depart[handle] == 0) and (self._city_n[handle] <= self.max_city) and self._no_opposite(handle)):
self.send_more -= 1
self.ready_to_depart[handle] = 1
def is_ready(self, handle):
return (self.ready_to_depart[handle] != 0) |
def m_dreg_looser(model, x, K=1):
S = compute_microbatch_split(x, K)
x_split = zip(*[_x.split(S) for _x in x])
(lw, zss) = zip(*[_m_dreg_looser(model, _x, K) for _x in x_split])
lw = torch.cat(lw, 2)
zss = torch.cat(zss, 2)
with torch.no_grad():
grad_wt = (lw - torch.logsumexp(lw, 1, keepdim=True)).exp()
if zss.requires_grad:
zss.register_hook((lambda grad: (grad_wt.unsqueeze((- 1)) * grad)))
return (grad_wt * lw).mean(0).sum() |
def torch2numpy(input):
assert isinstance(input, torch.Tensor), type(input)
return input.detach().cpu().numpy() |
def process_feature(feature: example_pb2.Feature, typename: str, typename_mapping: Dict, key: str) -> np.ndarray:
field = feature.ListFields()[0]
(inferred_typename, value) = (field[0].name, field[1].value)
if (typename is not None):
tf_typename = typename_mapping[typename]
if (tf_typename != inferred_typename):
reversed_mapping = {v: k for (k, v) in typename_mapping.items()}
raise TypeError(f"Incompatible type '{typename}' for `{key}` (should be '{reversed_mapping[inferred_typename]}').")
if (inferred_typename == 'bytes_list'):
value = np.frombuffer(value[0], dtype=np.uint8)
elif (inferred_typename == 'float_list'):
value = np.array(value, dtype=np.float32)
elif (inferred_typename == 'int64_list'):
value = np.array(value, dtype=np.int64)
return value |
def save_checkpoint(obj, directory, step_num, use_thread=False):
if use_thread:
warnings.warn('use_threads set to True, but done synchronously still')
os.makedirs(directory, exist_ok=True)
torch.save(obj, checkpoint_name(directory), pickle_module=pickle)
torch.save(obj, checkpoint_name(directory, step_num), pickle_module=pickle) |
def prepare_t5(tokenizer, data_dir, max_input_length, max_output_length, lower=True):
train_file = f'{data_dir}/train'
dev_file = f'{data_dir}/dev'
test_file = f'{data_dir}/test'
train_out = f'{data_dir}/train_{max_input_length}_{max_output_length}.t5'
dev_out = f'{data_dir}/dev_{max_input_length}_{max_output_length}.t5'
test_out = f'{data_dir}/test_{max_input_length}_{max_output_length}.t5'
if path.exists((train_file + '.source')):
print(f'prepare {train_out}')
(train_examples, train_eval) = process_file_t5(train_file, tokenizer, lower=lower)
build_features_t5(train_examples, 'train', train_out, tokenizer, max_input_length=max_input_length, max_output_length=max_output_length)
if path.exists((dev_file + '.source')):
print(f'prepare {dev_out}')
(dev_examples, dev_eval) = process_file_t5(dev_file, tokenizer, lower=lower)
build_features_t5(dev_examples, 'dev', dev_out, tokenizer, max_input_length=max_input_length, max_output_length=max_output_length)
if path.exists((test_file + '.source')):
print(f'prepare {test_out}')
(test_examples, test_eval) = process_file_t5(test_file, tokenizer, lower=lower)
build_features_t5(test_examples, 'test', test_out, tokenizer, max_input_length=max_input_length, max_output_length=max_output_length) |
class simpleMLP(nn.Module):
def __init__(self, i_c=1, n_c=10):
super(simpleMLP, self).__init__()
self.flatten = Expression((lambda tensor: tensor.view(tensor.shape[0], (- 1))))
self.fc1 = nn.Linear((28 * 28), 256, bias=True)
self.fc2 = nn.Linear(256, 128, bias=True)
self.fc3 = nn.Linear(128, n_c, bias=True)
def forward(self, x_i, _eval=False):
x_o = self.flatten(x_i)
x_o = torch.relu(self.fc1(x_o))
x_o = torch.relu(self.fc2(x_o))
return self.fc3(x_o) |
_features_generator('morgan_count')
def morgan_counts_features_generator(mol: Molecule, radius: int=MORGAN_RADIUS, num_bits: int=MORGAN_NUM_BITS) -> np.ndarray:
mol = (Chem.MolFromSmiles(mol) if (type(mol) == str) else mol)
features_vec = AllChem.GetHashedMorganFingerprint(mol, radius, nBits=num_bits)
features = np.zeros((1,))
DataStructs.ConvertToNumpyArray(features_vec, features)
return features |
def initgen(mesh_size, freq=3, boundary='Periodic', dtype=None, device=None, batch_size=1):
xs = []
for k in range(batch_size):
xs.append(_initgen(mesh_size, freq=freq, boundary=boundary, dtype=dtype, device=device))
x = torch.stack(xs, dim=0)
if (batch_size == 1):
return x[0]
else:
return x |
class RankingAndFitnessSelection(Selection[(List[S], List[S])]):
def __init__(self, max_population_size: int, reference_point: S, dominance_comparator: Comparator=DominanceComparator()):
super(RankingAndFitnessSelection, self).__init__()
self.max_population_size = max_population_size
self.dominance_comparator = dominance_comparator
self.reference_point = reference_point
def hypesub(self, l, A, actDim, bounds, pvec, alpha, k):
h = [0 for _ in range(l)]
Adim = [a[(actDim - 1)] for a in A]
indices_sort = sorted(range(len(Adim)), key=Adim.__getitem__)
S = [A[j] for j in indices_sort]
pvec = [pvec[j] for j in indices_sort]
for i in range(1, (len(S) + 1)):
if (i < len(S)):
extrusion = (S[i][(actDim - 1)] - S[(i - 1)][(actDim - 1)])
else:
extrusion = (bounds[(actDim - 1)] - S[(i - 1)][(actDim - 1)])
if (actDim == 1):
if (i > k):
break
if (all(alpha) >= 0):
for p in pvec[0:i]:
h[p] = (h[p] + (extrusion * alpha[(i - 1)]))
elif (extrusion > 0):
h = [(h[j] + (extrusion * self.hypesub(l, S[0:i], (actDim - 1), bounds, pvec[0:i], alpha, k)[j])) for j in range(l)]
return h
def compute_hypervol_fitness_values(self, population: List[S], reference_point: S, k: int):
points = [ind.objectives for ind in population]
bounds = reference_point.objectives
population_size = len(points)
if (k < 0):
k = population_size
actDim = len(bounds)
pvec = range(population_size)
alpha = []
for i in range(1, (k + 1)):
alpha.append((np.prod([(float((k - j)) / (population_size - j)) for j in range(1, i)]) / i))
f = self.hypesub(population_size, points, actDim, bounds, pvec, alpha, k)
for i in range(len(population)):
population[i].attributes['fitness'] = f[i]
return population
def execute(self, front: List[S]) -> List[S]:
if (front is None):
raise Exception('The front is null')
elif (len(front) == 0):
raise Exception('The front is empty')
ranking = FastNonDominatedRanking(self.dominance_comparator)
ranking.compute_ranking(front)
ranking_index = 0
new_solution_list = []
while (len(new_solution_list) < self.max_population_size):
if (len(ranking.get_subfront(ranking_index)) < (self.max_population_size - len(new_solution_list))):
subfront = ranking.get_subfront(ranking_index)
new_solution_list = (new_solution_list + subfront)
ranking_index += 1
else:
subfront = ranking.get_subfront(ranking_index)
parameter_K = (len(subfront) - (self.max_population_size - len(new_solution_list)))
while (parameter_K > 0):
subfront = self.compute_hypervol_fitness_values(subfront, self.reference_point, parameter_K)
subfront = sorted(subfront, key=(lambda x: x.attributes['fitness']), reverse=True)
subfront = subfront[:(- 1)]
parameter_K = (parameter_K - 1)
new_solution_list = (new_solution_list + subfront)
return new_solution_list
def get_name(self) -> str:
return 'Ranking and fitness selection' |
def train_AugTune(args, io):
train_loader = DataLoader(ModelNet40(args, partition='train'), num_workers=8, batch_size=args.batch_size, shuffle=True, drop_last=True)
test_loader = DataLoader(ModelNet40(args, partition='test'), num_workers=8, batch_size=args.test_batch_size, shuffle=True, drop_last=False)
device = torch.device(('cuda' if args.cuda else 'cpu'))
if (args.model == 'pointnet'):
model = PointNet(args).to(device)
elif (args.model == 'dgcnn'):
model = DGCNN(args).to(device)
else:
raise Exception('Not implemented')
print(str(model))
model = nn.DataParallel(model)
print("Let's use", torch.cuda.device_count(), 'GPUs!')
if args.use_sgd:
print('Use SGD')
opt = optim.SGD(model.parameters(), lr=(args.lr * 100), momentum=args.momentum, weight_decay=0.0001)
else:
print('Use Adam')
opt = optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.0001)
scheduler = CosineAnnealingLR(opt, args.epochs, eta_min=args.lr)
criterion = cal_loss
best_test_acc = 0
for epoch in range(args.epochs):
train_loss = 0.0
count = 0.0
model.train()
train_pred = []
train_true = []
for (origin, data, label) in train_loader:
(origin, data, label) = (origin.to(device), data.to(device), label.to(device).squeeze())
origin = origin.permute(0, 2, 1)
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
with torch.no_grad():
pred_origin = model(origin)
pred_data = model(data)
c_origin = (pred_origin.exp() * F.one_hot(label, pred_origin.shape[(- 1)])).sum(1)
c_data = (pred_data.exp() * F.one_hot(label, pred_data.shape[(- 1)])).sum(1)
c_target = torch.max(((1 - args.l) * c_origin), c_data)
alpha = ((c_target - c_data) / ((c_origin - c_data) + 0.0001)).unsqueeze(1)
alpha = torch.clamp(alpha, min=0, max=1).reshape((- 1), 1, 1)
data = ((alpha * origin) + ((1 - alpha) * data))
data = normalize_point_cloud_batch(data)
data = translate_pointcloud_batch(data)
opt.zero_grad()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
opt.step()
preds = logits.max(dim=1)[1]
count += batch_size
train_loss += (loss.item() * batch_size)
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
scheduler.step()
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
outstr = ('Train %d, loss: %.6f, train acc: %.6f, train avg acc: %.6f' % (epoch, ((train_loss * 1.0) / count), metrics.accuracy_score(train_true, train_pred), metrics.balanced_accuracy_score(train_true, train_pred)))
io.cprint(outstr)
test_loss = 0.0
count = 0.0
model.eval()
test_pred = []
test_true = []
for (data, label) in test_loader:
(data, label) = (data.to(device), label.to(device).squeeze())
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
logits = model(data)
loss = criterion(logits, label)
preds = logits.max(dim=1)[1]
count += batch_size
test_loss += (loss.item() * batch_size)
test_true.append(label.cpu().numpy())
test_pred.append(preds.detach().cpu().numpy())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
test_acc = metrics.accuracy_score(test_true, test_pred)
avg_per_class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
outstr = ('Test %d, loss: %.6f, test acc: %.6f, test avg acc: %.6f' % (epoch, ((test_loss * 1.0) / count), test_acc, avg_per_class_acc))
io.cprint(outstr)
if (test_acc >= best_test_acc):
best_test_acc = test_acc
torch.save(model.state_dict(), ('checkpoints/%s/models/model.t7' % args.exp_name)) |
def _prepare_output_docstrings(output_type, config_class):
docstrings = output_type.__doc__
lines = docstrings.split('\n')
i = 0
while ((i < len(lines)) and (re.search('^\\s*(Args|Parameters):\\s*$', lines[i]) is None)):
i += 1
if (i < len(lines)):
docstrings = '\n'.join(lines[(i + 1):])
docstrings = _convert_output_args_doc(docstrings)
full_output_type = f'{output_type.__module__}.{output_type.__name__}'
intro = (TF_RETURN_INTRODUCTION if output_type.__name__.startswith('TF') else PT_RETURN_INTRODUCTION)
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
return (intro + docstrings) |
class TestFoldBatchnorm(unittest.TestCase):
tf.compat.v1.disable_eager_execution()
x = tf.compat.v1.placeholder(tf.float32, [1, 224, 224, 3], name='input')
conv_weights = tf.compat.v1.get_variable('weight', [3, 3, 3, 32], initializer=tf.compat.v1.random_normal_initializer())
conv_bias = tf.compat.v1.get_variable('bias', [32], initializer=tf.compat.v1.random_normal_initializer())
beta = tf.compat.v1.get_variable(name='beta', shape=[32], initializer=tf.compat.v1.random_normal_initializer())
gamma = tf.compat.v1.get_variable(name='gamma', shape=[32], initializer=tf.compat.v1.random_normal_initializer())
conv1 = tf.nn.conv2d(x, conv_weights, strides=[1, 1, 1, 1], padding='SAME')
conv_bias = tf.nn.bias_add(conv1, conv_bias)
normed = tf.compat.v1.layers.batch_normalization(conv_bias)
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
output_graph_def = graph_util.convert_variables_to_constants(sess=sess, input_graph_def=sess.graph_def, output_node_names=[normed.name.split(':')[0]])
output_graph_def = QuantizeGraphHelper.remove_training_nodes(output_graph_def, protected_nodes=[normed.name.split(':')[0]])
graph_def = copy.deepcopy(output_graph_def)
fold_graph_def = FoldBatchNormNodesOptimizer(output_graph_def).do_transformation()
def test_fold_output_values(self):
input_data = np.random.randn(1, 224, 224, 3)
graph = tf.compat.v1.Graph()
fold_graph = tf.compat.v1.Graph()
with graph.as_default():
tf.compat.v1.import_graph_def(self.graph_def, name='')
with tf.compat.v1.Session(graph=graph) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
x = graph.get_tensor_by_name('input:0')
normed = graph.get_tensor_by_name('batch_normalization/FusedBatchNormV3:0')
y = sess.run(normed, feed_dict={x: input_data})
with fold_graph.as_default():
tf.compat.v1.import_graph_def(self.fold_graph_def, name='')
with tf.compat.v1.Session(graph=fold_graph) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
x = fold_graph.get_tensor_by_name('input:0')
normed = fold_graph.get_tensor_by_name('batch_normalization/FusedBatchNormV3:0')
y_fold = sess.run(normed, feed_dict={x: input_data})
assert np.allclose(y, y_fold, rtol=1e-05, atol=1e-05)
def test_do_transform(self):
for node in self.fold_graph_def.node:
assert (node.op not in ['FusedBatchNormV3']) |
def get_wsi_loader(data_dir, batch_size=1, shuffle=False, num_threads=2, train_eval_test='val', splitter_path='./', device_id=0, num_gpus=1, seed=1, bag_size=1024, label_csv_path='./', split_num=0):
eii = ExternalInputCallable(data_dir=data_dir, batch_size=batch_size, split_num=split_num, splitter_path=splitter_path, shuffle=shuffle, device_id=device_id, num_gpus=num_gpus, train_eval_test=train_eval_test, bag_size=bag_size, label_csv_path=label_csv_path)
img_size = 224
if (train_eval_test == 'train'):
pipe = TrainPipeline(batch_size=(batch_size * bag_size), eii=eii, num_threads=num_threads, device_id=device_id, seed=(seed + device_id), img_size=img_size)
elif (train_eval_test == 'val'):
pipe = ValPipeline(batch_size=(batch_size * bag_size), eii=eii, num_threads=num_threads, device_id=device_id, seed=(seed + device_id), img_size=img_size)
else:
pipe = ValPipeline(batch_size=(batch_size * bag_size), eii=eii, num_threads=num_threads, device_id=device_id, seed=(seed + device_id), use_gpu=True, img_size=img_size)
pipe.build()
loader = DALIClassificationIterator(pipe, size=(eii.size * bag_size), auto_reset=True, last_batch_padded=True, prepare_first_batch=False)
return loader |
class AlignTextModelTester():
def __init__(self, parent, batch_size=12, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act='gelu', hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, scope=None):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_token_type_ids = use_token_type_ids
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = self.get_config()
return (config, input_ids, token_type_ids, input_mask)
def get_config(self):
return AlignTextConfig(vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range)
def create_and_check_model(self, config, input_ids, token_type_ids, input_mask):
model = AlignTextModel(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids)
result = model(input_ids, token_type_ids=token_type_ids)
result = model(input_ids)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size))
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(config, input_ids, token_type_ids, input_mask) = config_and_inputs
inputs_dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return (config, inputs_dict) |
class TestCompletionDataset(unittest.TestCase):
def setUpClass(self):
self.tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
class TestArgs():
train_on_inputs = False
task = 'completion'
max_seq_length = 512
max_source_length = 256
dataset_name = 'tatsu-lab/alpaca'
self.test_args = TestArgs()
self.sample_datasets = DatasetDict()
raw_datasets = load_dataset('tatsu-lab/alpaca')
self.sample_datasets['train'] = raw_datasets['train'].select(range(100))
def test_process(self):
(raw_datasets, preprocess_fn) = data_utils.preprocess_dataset(self.sample_datasets, self.tokenizer, self.test_args, self.test_args)
column_names = list(raw_datasets['train'].features)
tokenized_datasets = raw_datasets.map(preprocess_fn, batched=True, remove_columns=column_names)
self.assertTrue(isinstance(tokenized_datasets, DatasetDict)) |
(frozen=True)
class Task():
id: Optional[int]
name: str
description: str
version: str
problem: str
origin: str
config: dict
assets: List[Asset]
measures: Dict[(str, Measure)]
def measure(self, name: str) -> pd.DataFrame:
return self.measures[name].values
def scalar_measure(self, name: str) -> Union[(Number, str, dict)]:
return self.measures[name].values['val'].iloc[0]
def data(self, aliases: Iterable[str]) -> List[object]:
from powerlift.bench.store import BytesParser
outputs = []
alias_map = self.config['aliases']
name_to_asset = {asset.name: asset for asset in self.assets}
for alias in aliases:
name = alias_map[alias]
asset = name_to_asset[name]
parsed = BytesParser.deserialize(asset.mimetype, asset.embedded)
outputs.append(parsed)
return outputs |
def get_stat_in_paths(paths, dict_name, scalar_name):
if (len(paths) == 0):
return np.array([[]])
if (type(paths[0][dict_name]) == dict):
return [path[dict_name][scalar_name] for path in paths]
return [[info[scalar_name] for info in path[dict_name]] for path in paths] |
def read_csv(filename, loss_name='val/loss'):
import codecs
import csv
fit_out = {}
with codecs.open(filename, encoding='utf-8-sig') as f:
for row in csv.DictReader(f, skipinitialspace=True):
if row[loss_name]:
fit_out[row['epoch']] = {'val_loss': row[loss_name]}
return fit_out |
class Lexicon(lazydict):
def __init__(self, path=''):
self._path = path
def path(self):
return self._path
def load(self):
dict.update(self, (x.split(' ')[:2] for x in _read(self._path) if (len(x.split(' ')) > 1))) |
class CamembertConfig(RobertaConfig):
pretrained_config_archive_map = CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = 'camembert' |
class MobileBertForQuestionAnswering(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def evaluate(model):
model.compile(run_eagerly=False)
postprocess = LabelShift(label_shift=1)
from neural_compressor import METRICS
metrics = METRICS('tensorflow')
metric = metrics['topk']()
latency_list = []
def eval_func(dataloader, metric):
warmup = 5
iteration = None
if (FLAGS.benchmark and (FLAGS.mode == 'performance')):
iteration = FLAGS.iters
for (idx, (inputs, labels)) in enumerate(dataloader):
start = time.time()
predictions = model.predict_on_batch(inputs)
end = time.time()
latency_list.append((end - start))
(predictions, labels) = postprocess((predictions, labels))
metric.update(predictions, labels)
if (iteration and (idx >= iteration)):
break
latency = (np.array(latency_list[warmup:]).mean() / eval_dataloader.batch_size)
return latency
latency = eval_func(eval_dataloader, metric)
if FLAGS.benchmark:
logger.info('\n{} mode benchmark result:'.format(FLAGS.mode))
for (i, res) in enumerate(latency_list):
logger.debug('Iteration {} result {}:'.format(i, res))
if (FLAGS.benchmark and (FLAGS.mode == 'performance')):
logger.info('Batch size = {}'.format(eval_dataloader.batch_size))
logger.info('Latency: {:.3f} ms'.format((latency * 1000)))
logger.info('Throughput: {:.3f} images/sec'.format((1.0 / latency)))
acc = metric.result()
return acc |
def factors(n):
lst = []
i = 1
while (i <= n):
if ((n % i) == 0):
lst.append(i)
i += 1
return lst |
(t='double', spline='Spline', returns='double')
def scale_factor(t=(- 1)):
if (not enable_Hubble):
return 1
if (t == (- 1)):
t = universals.t
spline = temporal_splines.t_a
if (spline is None):
abort('The function a(t) has not been tabulated. Have you called init_time?')
return spline.eval(t) |
def processed(f):
(f)
def wrapper(*args, **kwargs):
func = Process(target=f, args=args, kwargs=kwargs)
func.daemon = False
func.start()
return func
return wrapper |
def dataset_renderer_worker(log_ids: List[str], start_idx: int, end_idx: int, worker_id: int, kwargs: Mapping[(str, Any)]) -> None:
logging.info(f'Worker {worker_id} started...')
local_dataset_dir = kwargs['local_dataset_dir']
config = kwargs['config']
dataloader = kwargs['dataloader']
use_gpu = (isinstance(config, BevRenderingConfig) and (config.recompute_segmentation or (config.projection_method == 'ray_tracing')))
if use_gpu:
if (not torch.cuda.is_available()):
raise RuntimeError('CUDA is not supported on your platform.')
num_gpus = torch.cuda.device_count()
gpu_id = (worker_id // (config.num_processes // num_gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
logging.info('Creating Argoverse dataloader...')
chunk_sz = (end_idx - start_idx)
for idx in range(start_idx, end_idx):
if ((idx % 10) == 0):
pct_completed = (((idx - start_idx) / chunk_sz) * 100)
logging.info(f'Completed {pct_completed:.2f}%')
log_id = log_ids[idx]
try:
log_dir = ((Path(local_dataset_dir) / 'logs') / log_id)
render_log_dataset(config=config, local_dataset_dir=local_dataset_dir, log_id=log_id, log_dir=log_dir, dataloader=dataloader)
except Exception as e:
logging.exception(f'Extraction failed for {log_id}') |
class CiderScorer(object):
def copy(self):
new = CiderScorer(n=self.n)
new.ctest = copy.copy(self.ctest)
new.crefs = copy.copy(self.crefs)
return new
def copy_empty(self):
new = CiderScorer(df_mode='corpus', n=self.n, sigma=self.sigma)
new.df_mode = self.df_mode
new.ref_len = self.ref_len
new.document_frequency = self.document_frequency
return new
def __init__(self, df_mode='corpus', test=None, refs=None, n=4, sigma=6.0):
self.n = n
self.sigma = sigma
self.crefs = []
self.ctest = []
self.df_mode = df_mode
self.ref_len = None
if (self.df_mode != 'corpus'):
pkl_file = cPickle.load(open(os.path.join('data', (df_mode + '.p')), 'rb'), **(dict(encoding='latin1') if six.PY3 else {}))
self.ref_len = np.log(float(pkl_file['ref_len']))
self.document_frequency = pkl_file['document_frequency']
self.cook_append(test, refs)
def clear(self):
self.crefs = []
self.ctest = []
def cook_append(self, test, refs):
if (refs is not None):
self.crefs.append(cook_refs(refs))
if (test is not None):
self.ctest.append(cook_test(test))
else:
self.ctest.append(None)
def size(self):
assert (len(self.crefs) == len(self.ctest)), ('refs/test mismatch! %d<>%d' % (len(self.crefs), len(self.ctest)))
return len(self.crefs)
def __iadd__(self, other):
if (type(other) is tuple):
self.cook_append(other[0], other[1])
else:
self.ctest.extend(other.ctest)
self.crefs.extend(other.crefs)
return self
def compute_doc_freq(self):
for refs in self.crefs:
for ngram in set([ngram for ref in refs for (ngram, count) in ref.items()]):
self.document_frequency[ngram] += 1
def compute_cider(self):
def counts2vec(cnts):
vec = [defaultdict(float) for _ in range(self.n)]
length = 0
norm = [0.0 for _ in range(self.n)]
for (ngram, term_freq) in cnts.items():
df = np.log(max(1.0, self.document_frequency[ngram]))
n = (len(ngram) - 1)
vec[n][ngram] = (float(term_freq) * (self.ref_len - df))
norm[n] += pow(vec[n][ngram], 2)
if (n == 1):
length += term_freq
norm = [np.sqrt(n) for n in norm]
return (vec, norm, length)
def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref):
delta = float((length_hyp - length_ref))
val = np.array([0.0 for _ in range(self.n)])
for n in range(self.n):
for (ngram, count) in vec_hyp[n].items():
val[n] += (min(vec_hyp[n][ngram], vec_ref[n][ngram]) * vec_ref[n][ngram])
if ((norm_hyp[n] != 0) and (norm_ref[n] != 0)):
val[n] /= (norm_hyp[n] * norm_ref[n])
assert (not math.isnan(val[n]))
val[n] *= (np.e ** ((- (delta ** 2)) / (2 * (self.sigma ** 2))))
return val
if (self.df_mode == 'corpus'):
self.ref_len = np.log(float(len(self.crefs)))
scores = []
for (test, refs) in zip(self.ctest, self.crefs):
(vec, norm, length) = counts2vec(test)
score = np.array([0.0 for _ in range(self.n)])
for ref in refs:
(vec_ref, norm_ref, length_ref) = counts2vec(ref)
score += sim(vec, vec_ref, norm, norm_ref, length, length_ref)
score_avg = np.mean(score)
score_avg /= len(refs)
score_avg *= 10.0
scores.append(score_avg)
return scores
def compute_score(self, option=None, verbose=0):
if (self.df_mode == 'corpus'):
self.document_frequency = defaultdict(float)
self.compute_doc_freq()
assert (len(self.ctest) >= max(self.document_frequency.values()))
score = self.compute_cider()
return (np.mean(np.array(score)), np.array(score)) |
class Config():
library_path = None
library_file = None
compatibility_check = False
loaded = False
def set_library_path(path):
if Config.loaded:
raise Exception('library path must be set before before using any other functionalities in libclang.')
Config.library_path = path
def set_library_file(filename):
if Config.loaded:
raise Exception('library file must be set before before using any other functionalities in libclang.')
Config.library_file = filename
def set_compatibility_check(check_status):
if Config.loaded:
raise Exception('compatibility_check must be set before before using any other functionalities in libclang.')
Config.compatibility_check = check_status
def lib(self):
lib = self.get_cindex_library()
register_functions(lib, (not Config.compatibility_check))
Config.loaded = True
return lib
def get_filename(self):
if Config.library_file:
return Config.library_file
import platform
name = platform.system()
if (name == 'Darwin'):
file = 'libclang.dylib'
elif (name == 'Windows'):
file = 'libclang.dll'
else:
file = 'libclang.so'
if Config.library_path:
file = ((Config.library_path + '/') + file)
return file
def get_cindex_library(self):
try:
library = cdll.LoadLibrary(self.get_filename())
except OSError as e:
msg = (str(e) + '. To provide a path to libclang use Config.set_library_path() or Config.set_library_file().')
raise LibclangError(msg)
return library
def function_exists(self, name):
try:
getattr(self.lib, name)
except AttributeError:
return False
return True |
class AdaINorm2d(_AdaINorm):
def _check_input_dim(self, input):
if (input.dim() != 4):
raise ValueError('expected 4D input (got {}D input)'.format(input.dim())) |
def remove_tmp_file(func):
(func)
def wrapper(*args, **kwargs):
onnx_file = 'tmp.onnx'
kwargs['onnx_file'] = onnx_file
try:
result = func(*args, **kwargs)
finally:
if os.path.exists(onnx_file):
os.remove(onnx_file)
return result
return wrapper |
def adjust_axes(r, t, fig, axes):
bb = t.get_window_extent(renderer=r)
text_width_inches = (bb.width / fig.dpi)
current_fig_width = fig.get_figwidth()
new_fig_width = (current_fig_width + text_width_inches)
propotion = (new_fig_width / current_fig_width)
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], (x_lim[1] * propotion)]) |
(scope='module')
def lapicque_hidden_reset_none_instance():
return snn.Lapicque(beta=0.5, init_hidden=True, reset_mechanism='none') |
def gen_voxel(cropped, com_2d, cube, voxel_len):
(H, W) = cropped.shape
x = np.arange(H)
y = np.arange(W)
(x, y) = np.meshgrid(x, y, indexing='ij')
z = cropped.copy()
mask = np.bitwise_and((cropped >= (com_2d[2] - (cube[2] / 2.0))), (cropped < (com_2d[2] + (cube[2] / 2.0))))
mask = mask.reshape((- 1))
x = x.reshape((- 1))[mask]
y = y.reshape((- 1))[mask]
z = z.reshape((- 1))[mask]
x = (x / H)
y = (y / W)
z = (((z - com_2d[2]) + (cube[2] / 2)) / cube[2])
voxel = np.zeros([voxel_len, voxel_len, voxel_len], dtype=np.int)
x = (x * voxel_len).astype(np.int)
y = (y * voxel_len).astype(np.int)
z = (z * voxel_len).astype(np.int)
voxel[(x, y, z)] = 1
return voxel |
def eval_test(model, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for (data, target) in test_loader:
(data, target) = (data.cuda(), target.cuda())
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).item()
pred = output.max(1, keepdim=True)[1]
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
print('Test: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(test_loss, correct, len(test_loader.dataset), ((100.0 * correct) / len(test_loader.dataset))))
test_accuracy = (correct / len(test_loader.dataset))
return (test_loss, test_accuracy) |
def get_flat_arcs(index, sen):
sid = sen.id
arcs = []
for (j, token) in enumerate(sen):
form = token.form
lemma = token.lemma
upos = token.upos
xpos = token.xpos
deprel = token.deprel
if token.scope:
for (head, lbl) in token.scope:
lbl = (lbl[3:] if lbl.startswith('IN:') else lbl)
arcs.append(FlatArc(index, sid, form, lemma, upos, xpos, deprel, j, lbl))
return arcs |
class OneInstanceLauncher(Launcher):
def launch(self, args, memory_prefix_list):
processes = []
cmd = []
cmd_for_print = []
processes = []
tmp_log_path = ''
cores = 1
current_path = os.path.abspath(os.getcwd())
batch_size_list = []
if (args.batch_size == 'auto'):
batch_size_list = '1,2,4,8,16,32,64,128'.split(',')
else:
batch_size_list = args.batch_size.split(',')
first_write = True
instance = 1
if (args.instance_num == '1'):
cores = self.cores_per_socket
core_list = np.arange(0, cores)
min_latency =
min_config = Configs(1, instance, cores, 'disabled', 'default', 'cycle_buffer', '', args.mode)
for batch_str in batch_size_list:
batch_size = int(batch_str)
for (mp_list_idx, mp_list_item) in enumerate(memory_prefix_list):
cmd_prefix = get_cmd_prefix(core_list)
cmd_prefix = (replace_instance_num(memory_prefix_list[mp_list_idx], instance) + cmd_prefix)
cmd.clear()
cmd_for_print.clear()
set_cmd_prefix(cmd, core_list)
cmd_for_print.append(cmd_prefix)
cmd.append(sys.executable)
cmd.append('-u')
cmd_for_print.append(sys.executable)
cmd_for_print.append('-u')
weight_sharing = get_weight_sharing(cmd_prefix)
memory_allocator = get_memory_allocator(cmd_prefix)
memory_planning = get_memory_planning(cmd_prefix)
import shlex
cmd.append(shlex.quote(args.program))
cmd_for_print.append(args.program)
batch_size = replace_batch(cmd, args, batch_size)
replace_batch(cmd_for_print, args, batch_size)
tmp_config = Configs(batch_size, instance, cores, weight_sharing, memory_allocator, memory_planning, memory_prefix_list[mp_list_idx], args.mode)
tmp_log_path = get_tmp_log_path(tmp_config, current_path, 0)
if (os.path.exists((current_path + '/all_latency')) == 0):
os.mkdir((current_path + '/all_latency'))
cmd.append(('--log_file=' + tmp_log_path))
cmd_for_print.append(('--log_file=' + tmp_log_path))
cmd_s = ' '.join(cmd_for_print)
tmp_config.set_cmds(cmd_s)
env_cmd = self.launcher_env
set_numactl_env(env_cmd, core_list)
set_jemalloc_env(env_cmd, memory_allocator, self.project_path)
set_unified_buffer_env(env_cmd, memory_planning)
set_weight_sharing(env_cmd, weight_sharing)
set_instance_num(env_cmd, instance)
process = subprocess.Popen(cmd, env=env_cmd, shell=False, stdout=subprocess.PIPE)
processes.append(process)
for process in processes:
process.wait()
if (process.returncode != 0):
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd_s)
output_log_name = get_formal_log_path(args.output_file, current_path)
tmp_latency = latency_mode_grab_log(current_path, output_log_name, tmp_config, False, first_write, 0)
first_write = False
if (tmp_latency < min_latency):
min_latency = tmp_latency
min_config.set_batch(batch_size)
min_config.set_instance(instance)
min_config.set_cores_per_instance(cores)
min_config.set_weight_sharing(weight_sharing)
min_config.set_memory_allocator(memory_allocator)
min_config.set_memory_planning(memory_planning)
min_config.set_cmds(cmd_s)
latency_mode_grab_log(current_path, output_log_name, min_config, True, first_write, 0)
elif (args.instance_num == 'auto'):
cores = self.cores_per_socket
min_latency =
min_config = Configs(1, instance, cores, 'disabled', 'default', 'cycle_buffer', '', args.mode)
for batch_str in batch_size_list:
batch_size = int(batch_str)
cores_iterator = int((cores / 2))
while (cores_iterator <= cores):
core_list = np.arange(0, cores_iterator)
cmd_prefix = get_cmd_prefix(core_list)
for (mp_list_idx, mp_list_item) in enumerate(memory_prefix_list):
cmd_prefix = (replace_instance_num(memory_prefix_list[mp_list_idx], instance) + cmd_prefix)
cmd.clear()
cmd_for_print.clear()
set_cmd_prefix(cmd, core_list)
cmd_for_print.append(cmd_prefix)
cmd.append(sys.executable)
cmd.append('-u')
cmd_for_print.append(sys.executable)
cmd_for_print.append('-u')
weight_sharing = get_weight_sharing(cmd_prefix)
memory_allocator = get_memory_allocator(cmd_prefix)
memory_planning = get_memory_planning(cmd_prefix)
import shlex
cmd.append(shlex.quote(args.program))
cmd_for_print.append(args.program)
batch_size = replace_batch(cmd, args, batch_size)
replace_batch(cmd_for_print, args, batch_size)
tmp_config = Configs(batch_size, instance, cores_iterator, weight_sharing, memory_allocator, memory_planning, memory_prefix_list[mp_list_idx], args.mode)
tmp_log_path = get_tmp_log_path(tmp_config, current_path, 0)
if (os.path.exists((current_path + '/all_latency')) == 0):
os.mkdir((current_path + '/all_latency'))
log_file_path = '--log_file={}'.format(tmp_log_path)
cmd.append(log_file_path)
cmd_for_print.append(('--log_file=' + tmp_log_path))
cmd.append(('--log_file=' + tmp_log_path))
cmd_s = ' '.join(cmd)
tmp_config.set_cmds(cmd_s)
env_cmd = self.launcher_env
set_numactl_env(env_cmd, core_list)
set_jemalloc_env(env_cmd, memory_allocator, self.project_path)
set_unified_buffer_env(env_cmd, memory_planning)
set_weight_sharing(env_cmd, weight_sharing)
set_instance_num(env_cmd, instance)
process = subprocess.Popen(cmd, env=env_cmd, shell=False, stdout=subprocess.PIPE)
processes.append(process)
for process in processes:
process.wait()
if (process.returncode != 0):
raise subprocess.CalledProcessError(returncode=process.returncode, cmd=cmd)
output_log_name = get_formal_log_path(args.output_file, current_path)
latency_tmp = latency_mode_grab_log(current_path, output_log_name, tmp_config, False, first_write, 0)
first_write = False
if (latency_tmp < min_latency):
min_latency = latency_tmp
min_config.set_batch(batch_size)
min_config.set_instance(instance)
min_config.set_cores_per_instance(cores_iterator)
min_config.set_weight_sharing(weight_sharing)
min_config.set_memory_allocator(memory_allocator)
min_config.set_memory_planning(memory_planning)
min_config.set_cmds(tmp_config.get_cmds())
cores_iterator += 1
output_log_name = get_min_latency_output_log_path(min_config, current_path)
if (args.output_file != ''):
output_log_name = args.output_file
latency_mode_grab_log(current_path, output_log_name, min_config, True, first_write, 0)
else:
print('Latency mode only support instance=auto or instance=1 !!!') |
_module()
class ADE20KDataset(CustomDataset):
CLASSES = ('wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ', 'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth', 'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car', 'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug', 'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe', 'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column', 'signboard', 'chest of drawers', 'counter', 'sand', 'sink', 'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path', 'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door', 'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table', 'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove', 'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar', 'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower', 'chandelier', 'awning', 'streetlight', 'booth', 'television receiver', 'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister', 'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van', 'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything', 'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent', 'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank', 'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake', 'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce', 'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen', 'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass', 'clock', 'flag')
PALETTE = [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50], [4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255], [230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7], [150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82], [143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3], [0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255], [255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220], [255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224], [255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255], [224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7], [255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153], [6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255], [140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0], [255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255], [255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255], [11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255], [0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0], [255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0], [0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255], [173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255], [255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20], [255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255], [255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255], [0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255], [0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0], [143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0], [8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255], [255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112], [92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160], [163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163], [255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0], [255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0], [10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255], [255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204], [41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255], [71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255], [184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194], [102, 255, 0], [92, 0, 255]]
def __init__(self, **kwargs):
super(ADE20KDataset, self).__init__(img_suffix='.jpg', seg_map_suffix='.png', reduce_zero_label=True, **kwargs)
def results2img(self, results, imgfile_prefix, to_label_id):
mmcv.mkdir_or_exist(imgfile_prefix)
result_files = []
prog_bar = mmcv.ProgressBar(len(self))
for idx in range(len(self)):
result = results[idx]
filename = self.img_infos[idx]['filename']
basename = osp.splitext(osp.basename(filename))[0]
png_filename = osp.join(imgfile_prefix, f'{basename}.png')
result = (result + 1)
output = Image.fromarray(result.astype(np.uint8))
output.save(png_filename)
result_files.append(png_filename)
prog_bar.update()
return result_files
def format_results(self, results, imgfile_prefix=None, to_label_id=True):
assert isinstance(results, list), 'results must be a list'
assert (len(results) == len(self)), f'The length of results is not equal to the dataset len: {len(results)} != {len(self)}'
if (imgfile_prefix is None):
tmp_dir = tempfile.TemporaryDirectory()
imgfile_prefix = tmp_dir.name
else:
tmp_dir = None
result_files = self.results2img(results, imgfile_prefix, to_label_id)
return (result_files, tmp_dir) |
def get_mean_std(exp_name):
root_path = '/data/sls/scratch/yuangong/avbyol/egs/vggsound/exp/'
three_res = []
for repeat in ['-r1', '-r2', '-r3']:
cur_res = (np.loadtxt((((root_path + exp_name) + repeat) + '/result.csv'), delimiter=',') * 100)
three_res.append(cur_res)
three_res = np.stack(three_res)
res_mean = np.mean(three_res, axis=0)
res_std = np.std(three_res, axis=0)
max_idx = 24
res_mean = res_mean[(max_idx, [0, 3, 6])]
res_std = res_std[(max_idx, [0, 3, 6])]
return (res_mean[0], res_mean[1], res_mean[2], res_std[0], res_std[1], res_std[2]) |
_config
def pnn_rigidity():
cfg = {'learner': {'model': 'LifelongSidetuneNetwork', 'model_kwargs': {'use_baked_encoding': False, 'base_class': 'TaskonomyEncoderWithCache', 'side_class': 'FCN5ProgressiveH', 'pnn': True, 'dense': False}}} |
class VisionTextDualEncoderProcessor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'AutoImageProcessor'
tokenizer_class = 'AutoTokenizer'
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if ('feature_extractor' in kwargs):
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = (image_processor if (image_processor is not None) else feature_extractor)
if (image_processor is None):
raise ValueError('You have to specify an image_processor.')
if (tokenizer is None):
raise ValueError('You have to specify a tokenizer.')
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
if ((text is None) and (images is None)):
raise ValueError('You have to specify either text or images. Both cannot be none.')
if (text is not None):
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
if (images is not None):
image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
if ((text is not None) and (images is not None)):
encoding['pixel_values'] = image_features.pixel_values
return encoding
elif (text is not None):
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys((tokenizer_input_names + image_processor_input_names)))
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor |
def generate_regnet_parameters(w_a, w_0, w_m, d, q=8):
assert ((w_a >= 0) and (w_0 > 0) and (w_m > 1) and ((w_0 % q) == 0))
ws_cont = ((np.arange(d) * w_a) + w_0)
ks = np.round((np.log((ws_cont / w_0)) / np.log(w_m)))
ws_all = (w_0 * np.power(w_m, ks))
ws_all = (np.round(np.divide(ws_all, q)).astype(int) * q)
(ws, ds) = np.unique(ws_all, return_counts=True)
(num_stages, total_stages) = (len(ws), (ks.max() + 1))
(ws, ds, ws_all, ws_cont) = (x.tolist() for x in (ws, ds, ws_all, ws_cont))
return (ws, ds, num_stages, total_stages, ws_all, ws_cont) |
class MotionEncoderBiGRUCo(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(MotionEncoderBiGRUCo, self).__init__()
self.input_emb = nn.Linear(input_size, hidden_size)
self.gru = nn.GRU(hidden_size, hidden_size, batch_first=True, bidirectional=True)
self.output_net = nn.Sequential(nn.Linear((hidden_size * 2), hidden_size), nn.LayerNorm(hidden_size), nn.LeakyReLU(0.2, inplace=True), nn.Linear(hidden_size, output_size))
self.hidden_size = hidden_size
self.hidden = nn.Parameter(torch.randn((2, 1, self.hidden_size), requires_grad=True))
def forward(self, inputs, m_lens):
num_samples = inputs.shape[0]
input_embs = self.input_emb(inputs)
hidden = self.hidden.repeat(1, num_samples, 1)
cap_lens = m_lens.data.tolist()
emb = pack_padded_sequence(input_embs, cap_lens, batch_first=True)
(gru_seq, gru_last) = self.gru(emb, hidden)
gru_last = torch.cat([gru_last[0], gru_last[1]], dim=(- 1))
return self.output_net(gru_last) |
def prepare_minibatch(egs_file, minibatch_size):
egs = load_egs(egs_file)
random.shuffle(egs)
merged_egs = kaldi.chain.MergeChainEgs(egs, str(minibatch_size))
return merged_egs |
class TensorboardManager():
def __init__(self, path):
self.writer = tensorboardX.SummaryWriter(path)
def update(self, split, step, vals):
for (k, v) in vals.items():
self.writer.add_scalar(('%s_%s' % (split, k)), v, step)
def close(self):
self.writer.flush()
self.writer.close() |
class TrendBlock(Block):
def __init__(self, units, thetas_dim, past_seq_len=10, future_seq_len=5, nb_harmonics=None):
super(TrendBlock, self).__init__(units, thetas_dim, past_seq_len, future_seq_len, share_thetas=True)
def forward(self, x):
x = super(TrendBlock, self).forward(x)
backcast = trend_model(self.theta_b_fc(x), self.backcast_linspace)
forecast = trend_model(self.theta_f_fc(x), self.forecast_linspace)
return (backcast, forecast) |
class TestStochasticSwap(QiskitTestCase):
def test_multiple_registers_with_layout_adjust(self):
coupling = CouplingMap([[0, 1], [1, 2]])
qr_q = QuantumRegister(2, 'q')
qr_a = QuantumRegister(1, 'a')
cr_c = ClassicalRegister(3, 'c')
circ = QuantumCircuit(qr_q, qr_a, cr_c)
circ.cx(qr_q[0], qr_a[0])
circ.cx(qr_q[1], qr_a[0])
circ.measure(qr_q[0], cr_c[0])
circ.measure(qr_q[1], cr_c[1])
circ.measure(qr_a[0], cr_c[2])
dag = circuit_to_dag(circ)
layout = Layout({qr_q[0]: 0, qr_q[1]: 1, qr_a[0]: 2})
pass_ = StochasticSwap(coupling, layout, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_multiple_registers_with_good_layout(self):
coupling = CouplingMap([[0, 1], [1, 2]])
qr_q = QuantumRegister(2, 'q')
qr_a = QuantumRegister(1, 'a')
cr_c = ClassicalRegister(3, 'c')
circ = QuantumCircuit(qr_q, qr_a, cr_c)
circ.cx(qr_q[0], qr_a[0])
circ.cx(qr_q[1], qr_a[0])
circ.measure(qr_q[0], cr_c[0])
circ.measure(qr_q[1], cr_c[1])
circ.measure(qr_a[0], cr_c[2])
dag = circuit_to_dag(circ)
layout = Layout({qr_q[0]: 0, qr_a[0]: 1, qr_q[1]: 2})
pass_ = StochasticSwap(coupling, layout, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_multiple_registers_with_default_layout(self):
coupling = CouplingMap([[0, 1], [1, 2]])
qr_q = QuantumRegister(2, 'q')
qr_a = QuantumRegister(1, 'a')
cr_c = ClassicalRegister(3, 'c')
circ = QuantumCircuit(qr_q, qr_a, cr_c)
circ.cx(qr_q[0], qr_a[0])
circ.cx(qr_q[1], qr_a[0])
circ.measure(qr_q[0], cr_c[0])
circ.measure(qr_q[1], cr_c[1])
circ.measure(qr_a[0], cr_c[2])
dag = circuit_to_dag(circ)
layout = None
pass_ = StochasticSwap(coupling, layout, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_trivial_case(self):
coupling = CouplingMap([[0, 1], [0, 2]])
qr = QuantumRegister(3, 'q')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[1])
circuit.h(qr[0])
circuit.cx(qr[0], qr[2])
dag = circuit_to_dag(circuit)
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_trivial_in_same_layer(self):
coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])
qr = QuantumRegister(4, 'q')
circuit = QuantumCircuit(qr)
circuit.cx(qr[2], qr[3])
circuit.cx(qr[0], qr[1])
dag = circuit_to_dag(circuit)
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_permute_wires_1(self):
coupling = CouplingMap([[0, 1], [0, 2]])
qr = QuantumRegister(3, 'q')
circuit = QuantumCircuit(qr)
circuit.cx(qr[1], qr[2])
dag = circuit_to_dag(circuit)
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_permute_wires_2(self):
coupling = CouplingMap([[1, 0], [1, 2]])
qr = QuantumRegister(3, 'q')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[2])
circuit.h(qr[0])
dag = circuit_to_dag(circuit)
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_permute_wires_3(self):
coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])
qr = QuantumRegister(4, 'q')
circuit = QuantumCircuit(qr)
circuit.cx(qr[0], qr[3])
circuit.cx(qr[3], qr[0])
dag = circuit_to_dag(circuit)
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_permute_wires_4(self):
coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])
qr = QuantumRegister(4, 'q')
circuit = QuantumCircuit(qr)
circuit.h(qr[3])
circuit.cx(qr[3], qr[0])
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr)
expected.h(qr[3])
expected.swap(qr[2], qr[3])
expected.swap(qr[0], qr[1])
expected.cx(qr[2], qr[1])
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(expected), after)
def test_permute_wires_5(self):
coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])
qr = QuantumRegister(4, 'q')
circuit = QuantumCircuit(qr)
circuit.cx(qr[3], qr[0])
circuit.h(qr[3])
dag = circuit_to_dag(circuit)
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_permute_wires_6(self):
coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])
qr = QuantumRegister(4, 'q')
circuit = QuantumCircuit(qr)
circuit.cx(qr[3], qr[0])
circuit.h(qr[3])
circuit.cx(qr[0], qr[3])
dag = circuit_to_dag(circuit)
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_overoptimization_case(self):
coupling = CouplingMap([[0, 2], [1, 2], [2, 3]])
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.x(qr[0])
circuit.y(qr[1])
circuit.z(qr[2])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[2], qr[3])
circuit.s(qr[1])
circuit.t(qr[2])
circuit.h(qr[3])
circuit.cx(qr[1], qr[2])
circuit.measure(qr[0], cr[0])
circuit.measure(qr[1], cr[1])
circuit.measure(qr[2], cr[2])
circuit.measure(qr[3], cr[3])
dag = circuit_to_dag(circuit)
expected = QuantumCircuit(qr, cr)
expected.z(qr[2])
expected.y(qr[1])
expected.x(qr[0])
expected.swap(qr[1], qr[2])
expected.cx(qr[0], qr[2])
expected.swap(qr[2], qr[3])
expected.cx(qr[1], qr[2])
expected.s(qr[3])
expected.t(qr[1])
expected.h(qr[2])
expected.measure(qr[0], cr[0])
expected.swap(qr[1], qr[2])
expected.cx(qr[3], qr[2])
expected.measure(qr[1], cr[3])
expected.measure(qr[3], cr[1])
expected.measure(qr[2], cr[2])
expected_dag = circuit_to_dag(expected)
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(expected_dag, after)
def test_already_mapped(self):
coupling = CouplingMap([[1, 0], [1, 2], [2, 3], [3, 4], [3, 14], [5, 4], [6, 5], [6, 7], [6, 11], [7, 10], [8, 7], [9, 8], [9, 10], [11, 10], [12, 5], [12, 11], [12, 13], [13, 4], [13, 14], [15, 0], [15, 0], [15, 2], [15, 14]])
qr = QuantumRegister(16, 'q')
cr = ClassicalRegister(16, 'c')
circ = QuantumCircuit(qr, cr)
circ.cx(qr[3], qr[14])
circ.cx(qr[5], qr[4])
circ.h(qr[9])
circ.cx(qr[9], qr[8])
circ.x(qr[11])
circ.cx(qr[3], qr[4])
circ.cx(qr[12], qr[11])
circ.cx(qr[13], qr[4])
for j in range(16):
circ.measure(qr[j], cr[j])
dag = circuit_to_dag(circ)
pass_ = StochasticSwap(coupling, None, 20, 13)
after = pass_.run(dag)
self.assertEqual(circuit_to_dag(circ), after)
def test_map_with_layout(self):
coupling = CouplingMap([[0, 1], [1, 2]])
qra = QuantumRegister(2, 'qa')
qrb = QuantumRegister(1, 'qb')
cr = ClassicalRegister(3, 'r')
circ = QuantumCircuit(qra, qrb, cr)
circ.cx(qra[0], qrb[0])
circ.measure(qra[0], cr[0])
circ.measure(qra[1], cr[1])
circ.measure(qrb[0], cr[2])
dag = circuit_to_dag(circ)
layout = Layout({qra[0]: 0, qra[1]: 1, qrb[0]: 2})
pass_ = StochasticSwap(coupling, layout, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_congestion(self):
coupling = CouplingMap([[0, 1], [1, 2], [1, 3]])
qr = QuantumRegister(2, 'q')
ar = QuantumRegister(2, 'a')
cr = ClassicalRegister(4, 'c')
circ = QuantumCircuit(qr, ar, cr)
circ.cx(qr[1], ar[0])
circ.cx(qr[0], ar[1])
circ.measure(qr[0], cr[0])
circ.h(qr)
circ.h(ar)
circ.cx(qr[0], qr[1])
circ.cx(ar[0], ar[1])
circ.measure(qr[0], cr[0])
circ.measure(qr[1], cr[1])
circ.measure(ar[0], cr[2])
circ.measure(ar[1], cr[3])
dag = circuit_to_dag(circ)
expected = QuantumCircuit(qr, ar, cr)
expected.cx(qr[1], ar[0])
expected.swap(qr[0], qr[1])
expected.cx(qr[1], ar[1])
expected.h(ar[1])
expected.h(ar[0])
expected.measure(qr[1], cr[0])
expected.h(qr[0])
expected.swap(qr[1], ar[1])
expected.h(ar[1])
expected.cx(ar[0], qr[1])
expected.measure(ar[0], cr[2])
expected.swap(qr[1], ar[1])
expected.measure(ar[1], cr[3])
expected.cx(qr[1], qr[0])
expected.measure(qr[1], cr[0])
expected.measure(qr[0], cr[1])
expected_dag = circuit_to_dag(expected)
layout = Layout({qr[0]: 0, qr[1]: 1, ar[0]: 2, ar[1]: 3})
pass_ = StochasticSwap(coupling, layout, 20, 13)
after = pass_.run(dag)
self.assertEqual(expected_dag, after)
def test_all_single_qubit(self):
coupling = CouplingMap([[0, 1], [1, 2], [1, 3]])
qr = QuantumRegister(2, 'q')
ar = QuantumRegister(2, 'a')
cr = ClassicalRegister(4, 'c')
circ = QuantumCircuit(qr, ar, cr)
circ.h(qr)
circ.h(ar)
circ.s(qr)
circ.s(ar)
circ.t(qr)
circ.t(ar)
circ.measure(qr[0], cr[0])
circ.measure(qr[0], cr[0])
circ.measure(qr[1], cr[1])
circ.measure(ar[0], cr[2])
circ.measure(ar[1], cr[3])
dag = circuit_to_dag(circ)
layout = Layout({qr[0]: 0, qr[1]: 1, ar[0]: 2, ar[1]: 3})
pass_ = StochasticSwap(coupling, layout, 20, 13)
after = pass_.run(dag)
self.assertEqual(dag, after)
def test_only_output_cx_and_swaps_in_coupling_map(self):
coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[2])
circuit.cx(qr[0], qr[3])
circuit.measure(qr, cr)
dag = circuit_to_dag(circuit)
layout = Layout({qr[0]: 0, qr[1]: 1, qr[2]: 2, qr[3]: 3})
pass_ = StochasticSwap(coupling, layout, 20, 5)
after = pass_.run(dag)
valid_couplings = [set([layout[a], layout[b]]) for (a, b) in coupling.get_edges()]
for _2q_gate in after.twoQ_gates():
self.assertIn(set(_2q_gate.qargs), valid_couplings)
def test_len_coupling_vs_dag(self):
coupling = CouplingMap([[0, 1], [1, 2], [2, 3], [3, 4]])
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[2])
circuit.cx(qr[0], qr[3])
circuit.measure(qr, cr)
dag = circuit_to_dag(circuit)
pass_ = StochasticSwap(coupling)
with self.assertRaises(TranspilerError):
_ = pass_.run(dag)
def test_len_layout_vs_dag(self):
coupling = CouplingMap([[0, 1], [1, 2], [2, 3]])
qr = QuantumRegister(4, 'q')
cr = ClassicalRegister(4, 'c')
circuit = QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.cx(qr[0], qr[1])
circuit.cx(qr[0], qr[2])
circuit.cx(qr[0], qr[3])
circuit.measure(qr, cr)
dag = circuit_to_dag(circuit)
layout = Layout({qr[0]: 0, qr[1]: 1, qr[2]: 2})
pass_ = StochasticSwap(coupling, layout)
with self.assertRaises(TranspilerError):
_ = pass_.run(dag) |
class ImageMirror(ImagePreprocessing):
def __init__(self, bigdl_type='float'):
super(ImageMirror, self).__init__(bigdl_type) |
def get_available_gpus(session_config=None):
if (session_config is None):
session_config = get_session()._config
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices(session_config)
return [x.name for x in local_device_protos if (x.device_type == 'GPU')] |
class LayoutLMv3Processor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'LayoutLMv3ImageProcessor'
tokenizer_class = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if ('feature_extractor' in kwargs):
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = (image_processor if (image_processor is not None) else feature_extractor)
if (image_processor is None):
raise ValueError('You need to specify an `image_processor`.')
if (tokenizer is None):
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(image_processor, tokenizer)
def __call__(self, images, text: Union[(TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput])]=None, text_pair: Optional[Union[(PreTokenizedInput, List[PreTokenizedInput])]]=None, boxes: Union[(List[List[int]], List[List[List[int]]])]=None, word_labels: Optional[Union[(List[int], List[List[int]])]]=None, add_special_tokens: bool=True, padding: Union[(bool, str, PaddingStrategy)]=False, truncation: Union[(bool, str, TruncationStrategy)]=None, max_length: Optional[int]=None, stride: int=0, pad_to_multiple_of: Optional[int]=None, return_token_type_ids: Optional[bool]=None, return_attention_mask: Optional[bool]=None, return_overflowing_tokens: bool=False, return_special_tokens_mask: bool=False, return_offsets_mapping: bool=False, return_length: bool=False, verbose: bool=True, return_tensors: Optional[Union[(str, TensorType)]]=None, **kwargs) -> BatchEncoding:
if (self.image_processor.apply_ocr and (boxes is not None)):
raise ValueError('You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.')
if (self.image_processor.apply_ocr and (word_labels is not None)):
raise ValueError('You cannot provide word labels if you initialized the image processor with apply_ocr set to True.')
features = self.image_processor(images=images, return_tensors=return_tensors)
if ((text is not None) and self.image_processor.apply_ocr and (text_pair is None)):
if isinstance(text, str):
text = [text]
text_pair = features['words']
encoded_inputs = self.tokenizer(text=(text if (text is not None) else features['words']), text_pair=(text_pair if (text_pair is not None) else None), boxes=(boxes if (boxes is not None) else features['boxes']), word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, return_tensors=return_tensors, **kwargs)
images = features.pop('pixel_values')
if (return_overflowing_tokens is True):
images = self.get_overflowing_images(images, encoded_inputs['overflow_to_sample_mapping'])
encoded_inputs['pixel_values'] = images
return encoded_inputs
def get_overflowing_images(self, images, overflow_to_sample_mapping):
images_with_overflow = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx])
if (len(images_with_overflow) != len(overflow_to_sample_mapping)):
raise ValueError(f'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}')
return images_with_overflow
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def model_input_names(self):
return ['input_ids', 'bbox', 'attention_mask', 'pixel_values']
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class
def feature_extractor(self):
warnings.warn('`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', FutureWarning)
return self.image_processor |
class ROIAlign3d(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
super(ROIAlign3d, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
return roi_align_3d(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio)
def __repr__(self):
tmpstr = (self.__class__.__name__ + '(')
tmpstr += ('output_size=' + str(self.output_size))
tmpstr += (', spatial_scale=' + str(self.spatial_scale))
tmpstr += (', sampling_ratio=' + str(self.sampling_ratio))
tmpstr += ')'
return tmpstr |
class StableDiffusionOnnxPipeline(metaclass=DummyObject):
_backends = ['torch', 'transformers', 'onnx']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch', 'transformers', 'onnx'])
def from_config(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers', 'onnx'])
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ['torch', 'transformers', 'onnx']) |
def preprocess_org_min_date(path=DATA_PATH, file=DATA_FILE, path_proc=DATA_PATH_PROCESSED, min_item_support=MIN_ITEM_SUPPORT, min_session_length=MIN_SESSION_LENGTH, min_date=MIN_DATE):
data = load_data((path + file))
data = filter_data(data, min_item_support, min_session_length)
data = filter_min_date(data, min_date)
split_data_org(data, (path_proc + file)) |
class DropdownSelectionWidget(Widget):
def __init__(self, options, value, description, parameter_dict, **kwargs):
super().__init__(**kwargs)
self.drop_down = widgets.Dropdown(options=options, value=value, description=description, disabled=False)
self.parameter_dict = parameter_dict
self.drop_down.observe(self.on_change, names='value')
self.parameter_accordion = ParameterSelectionWidget(self.parameter_dict[self.drop_down.value])
self.widget = widgets.VBox([self.drop_down, self.parameter_accordion.parameter_accordion])
def get_value(self):
return self.drop_down.value
def on_change(self, change):
if ((change['type'] == 'change') and (change['name'] == 'value')):
self.parameter_accordion.update(self.parameter_dict[change['new']])
def get_parameter_values(self):
return self.parameter_accordion.get_currently_selected_parameters() |
def apk(actual, predicted, k=10):
if (len(predicted) > k):
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for (i, p) in enumerate(predicted):
if ((p in actual) and (p not in predicted[:i])):
num_hits += 1.0
score += (num_hits / (i + 1.0))
if (not actual):
return 0.0
return (score / min(len(actual), k)) |
.slow
def test_factorized_antisymmetry_can_be_evaluated():
(key, init_pos, slog_psis) = _make_factorized_antisymmetries()
[_jit_eval_model_and_verify_output_shape(key, init_pos, slog_psi) for slog_psi in slog_psis] |
class HeteroDotProductPredictor(nn.Module):
def forward(self, graph, h, etype):
with graph.local_scope():
graph.ndata['h'] = h
graph.apply_edges(fn.u_dot_v('h', 'h', 'score'), etype=etype)
return graph.edges[etype].data['score'] |
def get_cosine_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float=0.5, last_epoch: int=(- 1), min_lr_ratio: float=0.0):
lr_lambda = partial(_get_cosine_schedule_with_warmup_lr_lambda, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps, num_cycles=num_cycles, min_lr_ratio=min_lr_ratio)
return LambdaLR(optimizer, lr_lambda, last_epoch) |
class XNet_sb(nn.Module):
def __init__(self, in_channels, num_classes):
super(XNet_sb, self).__init__()
(l1c, l2c, l3c, l4c, l5c) = (64, 128, 256, 512, 1024)
self.b1_1_1 = nn.Sequential(conv3x3(in_channels, l1c), conv3x3(l1c, l1c), BasicBlock(l1c, l1c))
self.b1_1_2_down = down_conv(l1c, l2c)
self.b1_1_3 = DoubleBasicBlock((l1c + l1c), l1c, nn.Sequential(conv1x1(in_planes=(l1c + l1c), out_planes=l1c), BatchNorm2d(l1c, momentum=BN_MOMENTUM)))
self.b1_1_4 = nn.Conv2d(l1c, num_classes, kernel_size=1, stride=1, padding=0)
self.b1_2_1 = DoubleBasicBlock(l2c, l2c)
self.b1_2_2_down = down_conv(l2c, l3c)
self.b1_2_3 = DoubleBasicBlock((l2c + l2c), l2c, nn.Sequential(conv1x1(in_planes=(l2c + l2c), out_planes=l2c), BatchNorm2d(l2c, momentum=BN_MOMENTUM)))
self.b1_2_4_up = up_conv(l2c, l1c)
self.b1_3_1 = DoubleBasicBlock(l3c, l3c)
self.b1_3_2_down = down_conv(l3c, l4c)
self.b1_3_3 = DoubleBasicBlock((l3c + l3c), l3c, nn.Sequential(conv1x1(in_planes=(l3c + l3c), out_planes=l3c), BatchNorm2d(l3c, momentum=BN_MOMENTUM)))
self.b1_3_4_up = up_conv(l3c, l2c)
self.b1_4_1 = DoubleBasicBlock(l4c, l4c)
self.b1_4_2_down = down_conv(l4c, l5c)
self.b1_4_2 = DoubleBasicBlock(l4c, l4c)
self.b1_4_5 = DoubleBasicBlock(l4c, l4c)
self.b1_4_6 = DoubleBasicBlock((l4c + l4c), l4c, nn.Sequential(conv1x1(in_planes=(l4c + l4c), out_planes=l4c), BatchNorm2d(l4c, momentum=BN_MOMENTUM)))
self.b1_4_7_up = up_conv(l4c, l3c)
self.b1_5_1 = DoubleBasicBlock(l5c, l5c)
self.b1_5_4 = DoubleBasicBlock(l5c, l5c)
self.b1_5_5_up = up_conv(l5c, l4c)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, std=0.001)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
def forward(self, input1):
x1_1 = self.b1_1_1(input1)
x1_2 = self.b1_1_2_down(x1_1)
x1_2 = self.b1_2_1(x1_2)
x1_3 = self.b1_2_2_down(x1_2)
x1_3 = self.b1_3_1(x1_3)
x1_4_1 = self.b1_3_2_down(x1_3)
x1_4_1 = self.b1_4_1(x1_4_1)
x1_4_2 = self.b1_4_2(x1_4_1)
x1_4_2 = self.b1_4_5(x1_4_2)
x1_5_1 = self.b1_4_2_down(x1_4_1)
x1_5_1 = self.b1_5_1(x1_5_1)
x1_5_1 = self.b1_5_4(x1_5_1)
x1_5_1 = self.b1_5_5_up(x1_5_1)
x1_4_2 = torch.cat((x1_4_2, x1_5_1), dim=1)
x1_4_2 = self.b1_4_6(x1_4_2)
x1_4_2 = self.b1_4_7_up(x1_4_2)
x1_3 = torch.cat((x1_3, x1_4_2), dim=1)
x1_3 = self.b1_3_3(x1_3)
x1_3 = self.b1_3_4_up(x1_3)
x1_2 = torch.cat((x1_2, x1_3), dim=1)
x1_2 = self.b1_2_3(x1_2)
x1_2 = self.b1_2_4_up(x1_2)
x1_1 = torch.cat((x1_1, x1_2), dim=1)
x1_1 = self.b1_1_3(x1_1)
x1_1 = self.b1_1_4(x1_1)
return x1_1 |
def dump_fold_into_csv_CUB(lsamples, outpath, tag):
msg = "'tag' must be an integer. Found {}.".format(tag)
assert isinstance(tag, int), msg
msg = "'tag' = {} is unknown. Please see constants.samples_tags = {}.".format(tag, constants.samples_tags)
assert (tag in constants.samples_tags), msg
assert (tag == constants.L)
with open(outpath, 'w') as fcsv:
filewriter = csv.writer(fcsv, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for (img_path, mask_path, img_label, idcnt) in lsamples:
filewriter.writerow([str(int(idcnt)), img_path, mask_path, img_label, tag]) |
def cifar_model_large(conv_layer, linear_layer, init_type, **kwargs):
assert (init_type == 'kaiming_normal'), 'only supporting kaiming_normal init'
model = nn.Sequential(conv_layer(3, 32, 3, stride=1, padding=1), nn.ReLU(), conv_layer(32, 32, 4, stride=2, padding=1), nn.ReLU(), conv_layer(32, 64, 3, stride=1, padding=1), nn.ReLU(), conv_layer(64, 64, 4, stride=2, padding=1), nn.ReLU(), Flatten(), linear_layer(((64 * 8) * 8), 512), nn.ReLU(), linear_layer(512, 512), nn.ReLU(), linear_layer(512, 10))
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
m.bias.data.zero_()
return model |
def init(model_s, model_t, init_modules, criterion, train_loader, logger, opt):
model_t.eval()
model_s.eval()
init_modules.train()
if torch.cuda.is_available():
model_s.cuda()
model_t.cuda()
init_modules.cuda()
cudnn.benchmark = True
if ((opt.model_s in ['resnet8', 'resnet14', 'resnet20', 'resnet32', 'resnet44', 'resnet56', 'resnet110', 'resnet8x4', 'resnet32x4', 'wrn_16_1', 'wrn_16_2', 'wrn_40_1', 'wrn_40_2']) and (opt.distill == 'factor')):
lr = 0.01
else:
lr = opt.learning_rate
optimizer = optim.SGD(init_modules.parameters(), lr=lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
for epoch in range(1, (opt.init_epochs + 1)):
batch_time.reset()
data_time.reset()
losses.reset()
end = time.time()
for (idx, data) in enumerate(train_loader):
if (opt.distill in ['crd']):
(input, target, index, contrast_idx) = data
else:
(input, target, index) = data
data_time.update((time.time() - end))
input = input.float()
if torch.cuda.is_available():
input = input.cuda()
target = target.cuda()
index = index.cuda()
if (opt.distill in ['crd']):
contrast_idx = contrast_idx.cuda()
preact = (opt.distill == 'abound')
(feat_s, _) = model_s(input, is_feat=True, preact=preact)
with torch.no_grad():
(feat_t, _) = model_t(input, is_feat=True, preact=preact)
feat_t = [f.detach() for f in feat_t]
if (opt.distill == 'abound'):
g_s = init_modules[0](feat_s[1:(- 1)])
g_t = feat_t[1:(- 1)]
loss_group = criterion(g_s, g_t)
loss = sum(loss_group)
elif (opt.distill == 'factor'):
f_t = feat_t[(- 2)]
(_, f_t_rec) = init_modules[0](f_t)
loss = criterion(f_t_rec, f_t)
elif (opt.distill == 'fsp'):
loss_group = criterion(feat_s[:(- 1)], feat_t[:(- 1)])
loss = sum(loss_group)
else:
raise NotImplemented('Not supported in init training: {}'.format(opt.distill))
losses.update(loss.item(), input.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update((time.time() - end))
end = time.time()
logger.add_scalar('init_train_loss', losses.avg, epoch)
print('Epoch: [{0}/{1}]\tTime {batch_time.val:.3f} ({batch_time.avg:.3f})\tlosses: {losses.val:.3f} ({losses.avg:.3f})'.format(epoch, opt.init_epochs, batch_time=batch_time, losses=losses))
sys.stdout.flush() |
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, dilation=(1, 1), residual=True, BatchNorm=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = BatchNorm(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=dilation[1], bias=False, dilation=dilation[1])
self.bn2 = BatchNorm(planes)
self.conv3 = nn.Conv2d(planes, (planes * 4), kernel_size=1, bias=False)
self.bn3 = BatchNorm((planes * 4))
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if (self.downsample is not None):
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out |
class Net(torch.nn.Module):
def __init__(self, train_dataset):
super(Net, self).__init__()
self.conv1 = GATConv(train_dataset.num_features, 256, heads=4)
self.lin1 = torch.nn.Linear(train_dataset.num_features, (4 * 256))
self.conv2 = GATConv((4 * 256), 256, heads=4)
self.lin2 = torch.nn.Linear((4 * 256), (4 * 256))
self.conv3 = GATConv((4 * 256), train_dataset.num_classes, heads=6, concat=False)
self.lin3 = torch.nn.Linear((4 * 256), train_dataset.num_classes)
def forward(self, x, edge_index):
x = F.elu((self.conv1(x, edge_index) + self.lin1(x)))
x = F.elu((self.conv2(x, edge_index) + self.lin2(x)))
x = (self.conv3(x, edge_index) + self.lin3(x))
return x |
def test(args, model, device, test_loader, logger):
model.eval()
with torch.no_grad():
for (data, target) in test_loader:
start = time()
(data, target) = (data.to(device), target.to(device))
predictions = model(data)
loss = F.cross_entropy(predictions, target)
pred = predictions.max(1, keepdim=True)[1]
accuracy = pred.eq(target.view_as(pred)).double().mean()
stats = {'val.loss': loss.item(), 'val.accuracy': accuracy.item()}
if logger:
logger.update_average(stats)
if (logger.avg_count['val.loss'] > 3):
logger.update_average({'val.time': (time() - start)})
else:
print(stats)
if logger:
logger.print(prefix='val') |
class Adjective_Rate(object):
def __init__(self, sentence_objs):
self.sentence_objs = sentence_objs
def handle(self):
(tot_num_adjs, tot_num_words) = (0, 0)
for so in self.sentence_objs:
tot_num_adjs += so.pos_tag_counter.get_pos_tag_count(ADJECTIVE)
tot_num_words += so.num_words()
return (tot_num_adjs / tot_num_words) |
def clear_dir(directory):
if (not os.path.isdir(directory)):
raise Exception(('%s is not a directory' % directory))
if (type(directory) != str):
raise Exception(('string type required for directory: %s' % directory))
if (directory in ['..', '.', '', '/', './', '../', '*']):
raise Exception('trying to delete current directory, probably bad idea?!')
for f in os.listdir(directory):
path = os.path.join(directory, f)
try:
if os.path.isfile(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
except Exception as e:
print(e) |
def get_config(num_targets):
if ((num_targets == 0) or (not isinstance(num_targets, int))):
raise ValueError(f'num_targets is {num_targets}, but must be a positive integer')
screen = sprite.Sprite(x=0.5, y=0.5, shape='square', scale=2.0, c0=0.6, c1=0.7, c2=0.7)
target_factor_distrib = distribs.Product([distribs.Continuous('c0', 0.0, 1.0)], shape='circle', scale=0.085, c1=1.0, c2=1.0)
cover_factors = dict(mass=0.0, shape='circle', scale=0.1, c0=0.0, c1=0.0, c2=0.5, opacity=0)
def state_initializer():
sprite_positions = (0.5 + (0.35 * _get_polygon(num_targets, 0.7)))
target_factors = [target_factor_distrib.sample() for _ in range(num_targets)]
targets = [sprite.Sprite(x=pos[0], y=pos[1], **factors) for (pos, factors) in zip(sprite_positions, target_factors)]
covers = [sprite.Sprite(x=pos[0], y=pos[1], **cover_factors) for pos in sprite_positions]
for (i, s) in enumerate(covers):
if (i == 0):
s.metadata = {'prey': True}
else:
s.metadata = {'prey': False}
cue_factors = copy.deepcopy(target_factors[0])
cue_factors['scale'] = (0.7 * target_factors[0]['scale'])
cue = sprite.Sprite(x=0.5, y=0.501, opacity=0, mass=np.inf, **cue_factors)
agent = sprite.Sprite(x=0.5, y=0.5, shape='circle', scale=0.1, c0=0.4, c1=0.0, c2=1.0, mass=np.inf)
annulus_verts = shapes.annulus_vertices(0.34, 0.36)
annulus = sprite.Sprite(x=0.5, y=0.5, shape=annulus_verts, scale=1.0, c0=0.0, c1=0.0, c2=0.3)
state = collections.OrderedDict([('annulus', [annulus]), ('targets', targets), ('covers', covers), ('agent', [agent]), ('cue', [cue]), ('screen', [screen])])
return state
drag = (physics_lib.Drag(coeff_friction=0.25), ['agent', 'cue'])
tether_covers = physics_lib.TetherZippedLayers(('targets', 'covers'), anchor=np.array([0.5, 0.5]))
physics = physics_lib.Physics(drag, updates_per_env_step=1, corrective_physics=[tether_covers])
contact_task = tasks.ContactReward(reward_fn=(lambda _, s: (1 if s.metadata['prey'] else (- 1))), layers_0='agent', layers_1='covers')
def _should_reset(state, meta_state):
should_reset = ((state['covers'][0].opacity == 0) and (meta_state['phase'] == 'response'))
return should_reset
reset_task = tasks.Reset(condition=_should_reset, steps_after_condition=15)
task = tasks.CompositeTask(contact_task, reset_task, timeout_steps=800)
action_space = action_spaces.Joystick(scaling_factor=0.01, action_layers=['agent', 'cue'])
_polygon_modifier = observers.polygon_modifiers.FirstPersonAgent(agent_layer='agent')
observer = observers.PILRenderer(image_size=(64, 64), anti_aliasing=1, color_to_rgb='hsv_to_rgb', polygon_modifier=_polygon_modifier)
def _make_opaque(s):
s.opacity = 255
def _make_transparent(s):
s.opacity = 0
screen_phase = gr.Phase(duration=1, name='screen')
disappear_screen = gr.ModifySprites('screen', _make_transparent)
visible_phase = gr.Phase(one_time_rules=disappear_screen, duration=2, name='visible')
def _move(s):
s.velocity = np.random.uniform((- 0.25), 0.25, size=(2,))
cover_targets = gr.ModifySprites('covers', _make_opaque)
begin_motion = BeginMotion(angle_vel_range=(0.1, 0.3))
motion_phase = gr.Phase(one_time_rules=[cover_targets, begin_motion], duration=100, name='motion')
def _stop(s):
s.angle_vel = 0.0
s.velocity = np.zeros(2)
def _unglue(s):
s.mass = 1.0
appear_cue = gr.ModifySprites('cue', _make_opaque)
stop_targets = gr.ModifySprites(('targets', 'covers'), _stop)
unglue_agent = gr.ModifySprites(('agent', 'cue'), _unglue)
make_targets_discoverable = gr.ModifyOnContact(layers_0='agent', layers_1='covers', modifier_1=_make_transparent)
response_phase = gr.Phase(one_time_rules=[appear_cue, stop_targets, unglue_agent], continual_rules=make_targets_discoverable, name='response')
phase_sequence = gr.PhaseSequence(screen_phase, visible_phase, motion_phase, response_phase, meta_state_phase_name_key='phase')
config = {'state_initializer': state_initializer, 'physics': physics, 'task': task, 'action_space': action_space, 'observers': {'image': observer}, 'game_rules': (phase_sequence,), 'meta_state_initializer': (lambda : {'phase': ''})}
return config |
def plot_alignment_to_numpy(alignment, info=None):
global MATPLOTLIB_FLAG
if (not MATPLOTLIB_FLAG):
import matplotlib
matplotlib.use('Agg')
MATPLOTLIB_FLAG = True
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
import matplotlib.pylab as plt
import numpy as np
(fig, ax) = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower', interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if (info is not None):
xlabel += ('\n\n' + info)
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
fig.canvas.draw()
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape((fig.canvas.get_width_height()[::(- 1)] + (3,)))
plt.close()
return data |
def parse_dict_args(**kwargs):
def to_cmdline_kwarg(key, value):
if (len(key) == 1):
key = '-{}'.format(key)
else:
key = '--{}'.format(re.sub('_', '-', key))
value = str(value)
return (key, value)
kwargs_pairs = (to_cmdline_kwarg(key, value) for (key, value) in kwargs.items())
cmdline_args = list(sum(kwargs_pairs, ()))
LOG.info('Using these command line args: %s', ' '.join(cmdline_args))
return create_parser().parse_args(cmdline_args) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.