code stringlengths 101 5.91M |
|---|
_grad()
def evaluation(model, data_loader, tokenizer, device, config):
model.eval()
metric_logger = utils.MetricLogger(delimiter=' ')
header = 'Generate VQA test result:'
print_freq = 50
result = []
answer_list = [(answer + config['eos']) for answer in data_loader.dataset.answer_list]
answer_input = tokenizer(answer_list, padding='longest', return_tensors='pt').to(device)
for (n, (image, question, question_id)) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device, non_blocking=True)
question_input = tokenizer(question, padding='longest', return_tensors='pt').to(device)
(topk_ids, topk_probs) = model(image, question_input, answer_input, train=False, k=config['k_test'])
for (ques_id, topk_id, topk_prob) in zip(question_id, topk_ids, topk_probs):
ques_id = int(ques_id.item())
(_, pred) = topk_prob.max(dim=0)
result.append({'question_id': ques_id, 'answer': data_loader.dataset.answer_list[topk_id[pred]]})
return result |
class AnchorGenerator(object):
__metaclass__ = ABCMeta
def name_scope(self):
pass
def check_num_anchors(self):
return True
def num_anchors_per_location(self):
pass
def generate(self, feature_map_shape_list, **params):
if (self.check_num_anchors and (len(feature_map_shape_list) != len(self.num_anchors_per_location()))):
raise ValueError('Number of feature maps is expected to equal the length of `num_anchors_per_location`.')
with tf.name_scope(self.name_scope()):
anchors = self._generate(feature_map_shape_list, **params)
if self.check_num_anchors:
with tf.control_dependencies([self._assert_correct_number_of_anchors(anchors, feature_map_shape_list)]):
anchors.set(tf.identity(anchors.get()))
return anchors
def _generate(self, feature_map_shape_list, **params):
pass
def _assert_correct_number_of_anchors(self, anchors, feature_map_shape_list):
expected_num_anchors = 0
for (num_anchors_per_location, feature_map_shape) in zip(self.num_anchors_per_location(), feature_map_shape_list):
expected_num_anchors += ((num_anchors_per_location * feature_map_shape[0]) * feature_map_shape[1])
return tf.assert_equal(expected_num_anchors, anchors.num_boxes()) |
def main():
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR', help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18', choices=model_names, help=(('model architecture: ' + ' | '.join(model_names)) + ' (default: resnet18)'))
parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of total epochs to run')
parser.add_argument('--max_epochs', default=90, type=int, metavar='N', help='number of max epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256), this is the total batch size of all GPUs on the current node when using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0.0001, type=float, metavar='W', help='weight decay (default: 1e-4)', dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int, metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model')
parser.add_argument('--world-size', default=(- 1), type=int, help='number of nodes for distributed training')
parser.add_argument('--rank', default=(- 1), type=int, help='node rank for distributed training')
parser.add_argument('--seed', default=None, type=int, help='seed for initializing training. ')
parser.add_argument('--cores', default=4, type=int, help='num of CPUs to use.')
parser.add_argument('--nodes', default=1, type=int, help='num of nodes to use.')
parser.add_argument('--executor_memory', default='20g', type=str, help='size of executor memory.')
parser.add_argument('--driver_memory', default='20g', type=str, help='size of driver memory.')
parser.add_argument('--driver_cores', default=1, type=int, help='num of driver cores to use.')
parser.add_argument('--num_executors', type=int, default=16, help='number of executors')
parser.add_argument('--deploy_mode', type=str, default='yarn-client', help='yarn deploy mode, yarn-client or yarn-cluster')
args = parser.parse_args()
hadoop_conf = os.environ.get('HADOOP_CONF_DIR')
invalidInputError((hadoop_conf is not None), 'Directory path to hadoop conf not found for yarn-client mode. Please set the environment variable HADOOP_CONF_DIR')
sc = init_orca_context(cluster_mode=args.deploy_mode, hadoop_conf=hadoop_conf, conf={'spark.executor.memory': args.executor_memory, 'spark.executor.cores': args.cores, 'spark.executor.instances': args.num_executors})
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(traindir, transforms.Compose([transforms.RandomResizedCrop(224), transforms.RandomHorizontalFlip(), transforms.ToTensor(), normalize]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
model = torchvision.models.resnet50()
val_loader = torch.utils.data.DataLoader(datasets.ImageFolder(valdir, transforms.Compose([transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), normalize])), batch_size=args.batch_size, shuffle=False)
iterationPerEpoch = int(math.ceil((float(1281167) / args.batch_size)))
step = Step((iterationPerEpoch * 30), 0.1)
zooOptimizer = SGD(args.lr, momentum=args.momentum, dampening=0.0, leaningrate_schedule=step, weightdecay=args.weight_decay)
zooModel = TorchModel.from_pytorch(model)
criterion = torch.nn.CrossEntropyLoss()
zooCriterion = TorchLoss.from_pytorch(criterion)
estimator = Estimator(zooModel, optim_methods=zooOptimizer)
train_featureSet = FeatureSet.pytorch_dataloader(train_loader)
test_featureSet = FeatureSet.pytorch_dataloader(val_loader)
estimator.train_minibatch(train_featureSet, zooCriterion, end_trigger=MaxEpoch(args.max_epochs), checkpoint_trigger=EveryEpoch(), validation_set=test_featureSet, validation_method=[Accuracy(), Top5Accuracy()]) |
def fill_parameters(parameters, default_parameters, name='unknown function'):
string = ''
if ('verbose' not in parameters.keys()):
if ('verbose' not in default_parameters.keys()):
default_parameters['verbose'] = 0
parameters['verbose'] = default_parameters['verbose']
verbose = parameters['verbose']
for key in default_parameters.keys():
check = False
if (key not in parameters.keys()):
check = True
elif (('param' in key) or ('opt' in key)):
if isinstance(parameters[key], dict):
check = True
if check:
val = default_parameters[key]
if (val is None):
raise Exception((((('In ' + name) + ', field "') + key) + '" is required.'))
elif (isinstance(val, dict) and (('param' in key) or ('opt' in key))):
if (key not in parameters.keys()):
parameters[key] = {}
in_verbose = 0
if ('verbose' in parameters[key]):
in_verbose = parameters[key]['verbose']
parameters[key]['verbose'] = verbose
parameters[key] = fill_parameters(parameters[key], default_parameters[key], (name + ('\'s subfield "%s"' % key)))
parameters[key]['verbose'] = in_verbose
else:
parameters[key] = val
if verbose:
string = (((('In ' + name) + ', field "') + key) + '" set to default value.')
if (type(val) in (tuple, float, bool, int)):
string = (string[:(- 1)] + (': %s.' % val))
elif isinstance(val, str):
string = (string[:(- 1)] + (': "%s".' % val))
print(string)
return parameters |
class _DeformConv(Function):
def forward(ctx, input, offset, weight, stride=1, padding=0, dilation=1, groups=1, deformable_groups=1, im2col_step=64):
if ((input is not None) and (input.dim() != 4)):
raise ValueError('Expected 4D tensor as input, got {}D tensor instead.'.format(input.dim()))
ctx.stride = _pair(stride)
ctx.padding = _pair(padding)
ctx.dilation = _pair(dilation)
ctx.groups = groups
ctx.deformable_groups = deformable_groups
ctx.im2col_step = im2col_step
ctx.save_for_backward(input, offset, weight)
output = input.new_empty(_DeformConv._output_size(input, weight, ctx.padding, ctx.dilation, ctx.stride))
ctx.bufs_ = [input.new_empty(0), input.new_empty(0)]
if (not input.is_cuda):
raise NotImplementedError
else:
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize'
_C.deform_conv_forward(input, weight, offset, output, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step)
return output
_differentiable
def backward(ctx, grad_output):
(input, offset, weight) = ctx.saved_tensors
grad_input = grad_offset = grad_weight = None
if (not grad_output.is_cuda):
raise NotImplementedError
else:
cur_im2col_step = _DeformConv._cal_im2col_step(input.shape[0], ctx.im2col_step)
assert ((input.shape[0] % cur_im2col_step) == 0), 'im2col step must divide batchsize'
if (ctx.needs_input_grad[0] or ctx.needs_input_grad[1]):
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
_C.deform_conv_backward_input(input, offset, grad_output, grad_input, grad_offset, weight, ctx.bufs_[0], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, cur_im2col_step)
if ctx.needs_input_grad[2]:
grad_weight = torch.zeros_like(weight)
_C.deform_conv_backward_filter(input, offset, grad_output, grad_weight, ctx.bufs_[0], ctx.bufs_[1], weight.size(3), weight.size(2), ctx.stride[1], ctx.stride[0], ctx.padding[1], ctx.padding[0], ctx.dilation[1], ctx.dilation[0], ctx.groups, ctx.deformable_groups, 1, cur_im2col_step)
return (grad_input, grad_offset, grad_weight, None, None, None, None, None, None)
def _output_size(input, weight, padding, dilation, stride):
channels = weight.size(0)
output_size = (input.size(0), channels)
for d in range((input.dim() - 2)):
in_size = input.size((d + 2))
pad = padding[d]
kernel = ((dilation[d] * (weight.size((d + 2)) - 1)) + 1)
stride_ = stride[d]
output_size += (((((in_size + (2 * pad)) - kernel) // stride_) + 1),)
if (not all(map((lambda s: (s > 0)), output_size))):
raise ValueError('convolution input is too small (output would be {})'.format('x'.join(map(str, output_size))))
return output_size
_cache(maxsize=128)
def _cal_im2col_step(input_size, default_size):
if (input_size <= default_size):
return input_size
best_step = 1
for step in range(2, min((int(math.sqrt(input_size)) + 1), default_size)):
if ((input_size % step) == 0):
if ((input_size // step) <= default_size):
return (input_size // step)
best_step = step
return best_step |
class _NCEGeneratorState(object):
def __init__(self, context_size):
self._doc_id = multiprocessing.RawValue('i', 0)
self._in_doc_pos = multiprocessing.RawValue('i', context_size)
self._lock = multiprocessing.Lock()
def update_state(self, dataset, batch_size, context_size, num_examples_in_doc):
with self._lock:
doc_id = self._doc_id.value
in_doc_pos = self._in_doc_pos.value
self._advance_indices(dataset, batch_size, context_size, num_examples_in_doc)
return (doc_id, in_doc_pos)
def _advance_indices(self, dataset, batch_size, context_size, num_examples_in_doc):
num_examples = num_examples_in_doc(dataset[self._doc_id.value], self._in_doc_pos.value)
if (num_examples > batch_size):
self._in_doc_pos.value += batch_size
return
if (num_examples == batch_size):
if (self._doc_id.value < (len(dataset) - 1)):
self._doc_id.value += 1
else:
self._doc_id.value = 0
self._in_doc_pos.value = context_size
return
while (num_examples < batch_size):
if (self._doc_id.value == (len(dataset) - 1)):
self._doc_id.value = 0
self._in_doc_pos.value = context_size
return
self._doc_id.value += 1
num_examples += num_examples_in_doc(dataset[self._doc_id.value])
self._in_doc_pos.value = ((len(dataset[self._doc_id.value].text) - context_size) - (num_examples - batch_size)) |
def successors(ctree, cerrors, gold):
for merror in cerrors.missing:
for eerror in cerrors.extra:
if (merror[1] == eerror[1]):
(yield gen_different_label_successor(ctree, eerror[1], eerror[2], merror[2]))
for error in cerrors.missing:
(yield gen_missing_successor(ctree, error))
for error in cerrors.extra:
(yield gen_extra_successor(ctree, error, gold))
for source_span in ctree:
for left in range(len(source_span.subtrees)):
for right in range(left, len(source_span.subtrees)):
if ((left == 0) and (right == (len(source_span.subtrees) - 1))):
continue
new_parents = []
if (left != 0):
new_parent = source_span.subtrees[(left - 1)]
while (not new_parent.is_terminal()):
if cerrors.is_extra(new_parent):
new_parents.append(new_parent)
new_parent = new_parent.subtrees[(- 1)]
if (right != (len(source_span.subtrees) - 1)):
new_parent = source_span.subtrees[(right + 1)]
while (not new_parent.is_terminal()):
if cerrors.is_extra(new_parent):
new_parents.append(new_parent)
new_parent = new_parent.subtrees[0]
if (cerrors.is_extra(source_span) and ((left == 0) or (right == (len(source_span.subtrees) - 1)))):
if (left == 0):
if (source_span.subtrees[left].span[0] > 0):
for new_parent in ctree.get_nodes('all', end=source_span.subtrees[left].span[0]):
if cerrors.is_extra(new_parent):
new_parents.append(new_parent)
if (right == (len(source_span.subtrees) - 1)):
if (source_span.subtrees[right].span[1] < ctree.span[1]):
for new_parent in ctree.get_nodes('all', start=source_span.subtrees[right].span[1]):
if cerrors.is_extra(new_parent):
new_parents.append(new_parent)
if (left == 0):
new_parent = source_span.parent
while (not (new_parent.parent is None)):
new_parents.append(new_parent)
if (new_parent.parent.span[0] < source_span.span[0]):
break
new_parent = new_parent.parent
if (right == (len(source_span.subtrees) - 1)):
new_parent = source_span.parent
while (not (new_parent.parent is None)):
new_parents.append(new_parent)
if (new_parent.parent.span[1] > source_span.span[1]):
break
new_parent = new_parent.parent
for new_parent in new_parents:
(yield gen_move_successor(source_span, left, right, new_parent, cerrors, gold)) |
def test_masked_softmax_nll():
rng = np.random.RandomState(9823174)
n_data = 22
data_dim = 45
logits_np = (4 * rng.randn(n_data, data_dim))
mask_np = (rng.randn(n_data, data_dim) > 0.0)
while (not np.all(np.sum((mask_np == 1), axis=1))):
mask_np = (rng.randn(n_data, data_dim) > 0.0)
labels_np = (logits_np + (2 * rng.randn(n_data, data_dim)))
labels_np[(~ mask_np)] = (- np.inf)
labels_np = np.argmax(labels_np, axis=1)
logits = torch.tensor(logits_np)
mask = torch.tensor(mask_np)
labels = torch.tensor(labels_np, dtype=torch.int64)
nll_from_func = torch_utils.masked_softmax_nll(logits, labels, mask).numpy()
softmax = nn.Softmax(dim=1)
probs = softmax(logits)
prob_adjustment_factor = torch.log(torch.sum((probs * mask.float()), dim=1))
criterion = nn.CrossEntropyLoss(reduction='none')
loss_pers = criterion(logits, labels)
loss_pers = (loss_pers + prob_adjustment_factor)
nll_from_alternative = loss_pers.numpy()
np.testing.assert_array_almost_equal(nll_from_func, nll_from_alternative) |
class LongformerForSequenceClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
class ResNetSharingClassifier(SharingClassifier):
block = BasicBlock
num_blocks = [2, 2, 2, 2]
norm_layer = nn.InstanceNorm2d
def __init__(self, params, experts):
super().__init__(params, experts)
self.precursors = [expert.d for expert in self.experts[1:]]
first = (len(self.precursors) == 0)
if (MODELS_NDPM_CLASSIFIER_NUM_BLOCKS is not None):
num_blocks = MODELS_NDPM_CLASSIFIER_NUM_BLOCKS
else:
num_blocks = self.num_blocks
if (MODELS_NDPM_CLASSIFIER_NORM_LAYER is not None):
self.norm_layer = getattr(nn, MODELS_NDPM_CLASSIFIER_NORM_LAYER)
else:
self.norm_layer = nn.BatchNorm2d
num_classes = n_classes[params.data]
nf = (MODELS_NDPM_CLASSIFIER_CLS_NF_BASE if first else MODELS_NDPM_CLASSIFIER_CLS_NF_EXT)
nf_cat = (MODELS_NDPM_CLASSIFIER_CLS_NF_BASE + (len(self.precursors) * MODELS_NDPM_CLASSIFIER_CLS_NF_EXT))
self.nf = (MODELS_NDPM_CLASSIFIER_CLS_NF_BASE if first else MODELS_NDPM_CLASSIFIER_CLS_NF_EXT)
self.nf_cat = nf_cat
self.layer0 = nn.Sequential(nn.Conv2d(3, (nf * 1), kernel_size=3, stride=1, padding=1, bias=False), self.norm_layer((nf * 1)), nn.ReLU())
self.layer1 = self._make_layer((nf_cat * 1), (nf * 1), num_blocks[0], stride=1)
self.layer2 = self._make_layer((nf_cat * 1), (nf * 2), num_blocks[1], stride=2)
self.layer3 = self._make_layer((nf_cat * 2), (nf * 4), num_blocks[2], stride=2)
self.layer4 = self._make_layer((nf_cat * 4), (nf * 8), num_blocks[3], stride=2)
self.predict = nn.Sequential(nn.Linear((nf_cat * 8), num_classes), nn.LogSoftmax(dim=1))
self.setup_optimizer()
def _make_layer(self, nf_in, nf_out, num_blocks, stride):
norm_layer = self.norm_layer
block = self.block
downsample = None
if ((stride != 1) or (nf_in != nf_out)):
downsample = nn.Sequential(conv1x1(nf_in, nf_out, stride), norm_layer(nf_out))
layers = [block(nf_in, nf_out, stride, downsample=downsample, norm_layer=norm_layer)]
for _ in range(1, num_blocks):
layers.append(block(nf_out, nf_out, norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x, collect=False):
x = maybe_cuda(x)
if (len(self.precursors) == 0):
h1 = self.layer0(x)
h2 = self.layer1(h1)
h3 = self.layer2(h2)
h4 = self.layer3(h3)
h5 = self.layer4(h4)
h5 = F.avg_pool2d(h5, h5.size(2)).view(h5.size(0), (- 1))
pred = self.predict(h5)
if collect:
return ([pred], [h1.detach(), h2.detach(), h3.detach(), h4.detach(), h5.detach()])
else:
return pred
(preds, features) = self.precursors[(- 1)](x, collect=True)
h1 = self.layer0(x)
h1_cat = torch.cat([features[0], h1], dim=1)
h2 = self.layer1(h1_cat)
h2_cat = torch.cat([features[1], h2], dim=1)
h3 = self.layer2(h2_cat)
h3_cat = torch.cat([features[2], h3], dim=1)
h4 = self.layer3(h3_cat)
h4_cat = torch.cat([features[3], h4], dim=1)
h5 = self.layer4(h4_cat)
h5 = F.avg_pool2d(h5, h5.size(2)).view(h5.size(0), (- 1))
h5_cat = torch.cat([features[4], h5], dim=1)
pred = self.predict(h5_cat)
if collect:
preds.append(pred)
return (preds, [h1_cat.detach(), h2_cat.detach(), h3_cat.detach(), h4_cat.detach(), h5_cat.detach()])
else:
return pred |
def write_to_ray(idx, partition, redis_address, redis_password, partition_store_names):
init_ray_if_not(redis_address, redis_password)
ip = ray._private.services.get_node_ip_address()
local_store_name = None
for name in partition_store_names:
if name.endswith(ip):
local_store_name = name
break
if (local_store_name is None):
local_store_name = random.choice(partition_store_names)
local_store = ray.get_actor(local_store_name)
result = []
for (shard_id, shard) in enumerate(partition):
shard_ref = ray.put(shard, _owner=local_store)
result.append(local_store.upload_shards.remote((idx, shard_id), [shard_ref]))
is_empty = (len(result) == 0)
if is_empty:
partition_ref = ray.put([], _owner=local_store)
result.append(local_store.upload_partition.remote(idx, [partition_ref]))
logger.warning(f'Partition {idx} is empty.')
ray.get(result)
return [(idx, local_store_name.split(':')[(- 1)], local_store_name)] |
class TransFuse_L(nn.Module):
def __init__(self, num_classes=1, drop_rate=0.2, normal_init=True, pretrained=False):
super(TransFuse_L, self).__init__()
self.resnet = resnet50()
if pretrained:
self.resnet.load_state_dict(torch.load('/home/chenfei/.cache/torch/hub/checkpoints/resnet50-19c8e357.pth'))
self.resnet.fc = nn.Identity()
self.resnet.layer4 = nn.Identity()
self.transformer = deit_b(pretrained=pretrained)
self.up1 = Up(in_ch1=768, out_ch=512)
self.up2 = Up(512, 256)
self.final_x = nn.Sequential(Conv(1024, 256, 1, bn=True, relu=True), Conv(256, 256, 3, bn=True, relu=True), Conv(256, num_classes, 3, bn=False, relu=False))
self.final_1 = nn.Sequential(Conv(256, 256, 3, bn=True, relu=True), Conv(256, num_classes, 3, bn=False, relu=False))
self.final_2 = nn.Sequential(Conv(256, 256, 3, bn=True, relu=True), Conv(256, num_classes, 3, bn=False, relu=False))
self.up_c = BiFusion_block(ch_1=1024, ch_2=768, r_2=4, ch_int=1024, ch_out=1024, drop_rate=(drop_rate / 2))
self.up_c_1_1 = BiFusion_block(ch_1=512, ch_2=512, r_2=2, ch_int=512, ch_out=512, drop_rate=(drop_rate / 2))
self.up_c_1_2 = Up(in_ch1=1024, out_ch=512, in_ch2=512, attn=True)
self.up_c_2_1 = BiFusion_block(ch_1=256, ch_2=256, r_2=1, ch_int=256, ch_out=256, drop_rate=(drop_rate / 2))
self.up_c_2_2 = Up(512, 256, 256, attn=True)
self.drop = nn.Dropout2d(drop_rate)
if normal_init:
self.init_weights()
def forward(self, imgs, labels=None):
x_b = self.transformer(imgs)
x_b = torch.transpose(x_b, 1, 2)
x_b = x_b.view(x_b.shape[0], (- 1), 32, 32)
x_b = self.drop(x_b)
x_b_1 = self.up1(x_b)
x_b_1 = self.drop(x_b_1)
x_b_2 = self.up2(x_b_1)
x_b_2 = self.drop(x_b_2)
x_u = self.resnet.conv1(imgs)
x_u = self.resnet.bn1(x_u)
x_u = self.resnet.relu(x_u)
x_u = self.resnet.maxpool(x_u)
x_u_2 = self.resnet.layer1(x_u)
x_u_2 = self.drop(x_u_2)
x_u_1 = self.resnet.layer2(x_u_2)
x_u_1 = self.drop(x_u_1)
x_u = self.resnet.layer3(x_u_1)
x_u = self.drop(x_u)
x_c = self.up_c(x_u, x_b)
x_c_1_1 = self.up_c_1_1(x_u_1, x_b_1)
x_c_1 = self.up_c_1_2(x_c, x_c_1_1)
x_c_2_1 = self.up_c_2_1(x_u_2, x_b_2)
x_c_2 = self.up_c_2_2(x_c_1, x_c_2_1)
map_x = F.interpolate(self.final_x(x_c), scale_factor=16, mode='bilinear')
map_1 = F.interpolate(self.final_1(x_b_2), scale_factor=4, mode='bilinear')
map_2 = F.interpolate(self.final_2(x_c_2), scale_factor=4, mode='bilinear')
return (map_x, map_1, map_2)
def init_weights(self):
self.up1.apply(init_weights)
self.up2.apply(init_weights)
self.final_x.apply(init_weights)
self.final_1.apply(init_weights)
self.final_2.apply(init_weights)
self.up_c.apply(init_weights)
self.up_c_1_1.apply(init_weights)
self.up_c_1_2.apply(init_weights)
self.up_c_2_1.apply(init_weights)
self.up_c_2_2.apply(init_weights) |
def get_early_stopping_callback(metric, patience):
return EarlyStopping(monitor=f'val_{metric}', mode=('min' if ('loss' in metric) else 'max'), patience=patience, verbose=True) |
def predict_compression(predictor, inp_batch_json, margin=0):
output = predictor.predict_batch_json(inp_batch_json)
rts = []
comps = []
for (idx, out) in enumerate(output):
tag_logits = out['tag_logits']
tokens = inp_batch_json[idx]['sentence']
original_len = len(tokens)
rt = []
for (logit, tok) in zip(tag_logits, tokens):
del_score = logit[0]
retain_score = logit[1]
if (del_score <= (retain_score + margin)):
rt.append(tok.text)
compressed_len = len(rt)
rt = ' '.join(rt)
compression_rate = (compressed_len / original_len)
comps.append(compression_rate)
rts.append(rt)
return (rts, comps) |
def get_declarative_equations(model, question, prompt, max_tokens, stop_token, temperature):
prompt = prompt.format(question=question)
response = openai.Completion.create(model=model, prompt=prompt, max_tokens=max_tokens, stop=stop_token, temperature=temperature, top_p=1)
result = response['choices'][0]['text']
eq_list = re.findall('\\[\\[.*?\\]\\]', result)
if (len(eq_list) > 0):
return reformat_equations_from_peano(reformat_incre_equations(eq_list))
else:
print()
print(response['choices'][0]['text'])
return response['choices'][0]['text'] |
class dataset():
def __init__(self, root=None, train=True, example_weight=None):
self.root = root
self.train = train
self.transform = transforms.ToTensor()
if self.train:
train_data_path = os.path.join(root, 'train_data')
train_labels_path = os.path.join(root, 'train_labels')
self.train_data = numpy.load(open(train_data_path, 'rb'))
self.train_data = torch.from_numpy(self.train_data.astype('float32'))
self.train_labels = numpy.load(open(train_labels_path, 'rb')).astype('int')
self.example_weight = example_weight
else:
test_data_path = os.path.join(root, 'test_data')
test_labels_path = os.path.join(root, 'test_labels')
self.test_data = numpy.load(open(test_data_path, 'rb'))
self.test_data = torch.from_numpy(self.test_data.astype('float32'))
self.test_labels = numpy.load(open(test_labels_path, 'rb')).astype('int')
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def __getitem__(self, index):
if self.train:
(img, target, weight) = (self.train_data[index], self.train_labels[index], self.example_weight[index])
else:
(img, target, weight) = (self.test_data[index], self.test_labels[index], [])
return (img, target, weight) |
def convert_cityscapes_instance_only(data_dir, out_dir):
sets = ['gtFine_val', 'gtFine_train', 'gtFine_test']
ann_dirs = ['gtFine_trainvaltest/gtFine/val', 'gtFine_trainvaltest/gtFine/train', 'gtFine_trainvaltest/gtFine/test']
json_name = 'instancesonly_filtered_%s.json'
ends_in = '%s_polygons.json'
img_id = 0
ann_id = 0
cat_id = 1
category_dict = {}
category_instancesonly = ['person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle']
for (data_set, ann_dir) in zip(sets, ann_dirs):
print(('Starting %s' % data_set))
ann_dict = {}
images = []
annotations = []
ann_dir = os.path.join(data_dir, ann_dir)
for (root, _, files) in os.walk(ann_dir):
for filename in files:
if filename.endswith((ends_in % data_set.split('_')[0])):
if ((len(images) % 50) == 0):
print(('Processed %s images, %s annotations' % (len(images), len(annotations))))
json_ann = json.load(open(os.path.join(root, filename)))
image = {}
image['id'] = img_id
img_id += 1
image['width'] = json_ann['imgWidth']
image['height'] = json_ann['imgHeight']
image['file_name'] = (filename[:(- len((ends_in % data_set.split('_')[0])))] + 'leftImg8bit.png')
image['seg_file_name'] = (filename[:(- len((ends_in % data_set.split('_')[0])))] + ('%s_instanceIds.png' % data_set.split('_')[0]))
images.append(image)
fullname = os.path.join(root, image['seg_file_name'])
objects = cs.instances2dict_with_polygons([fullname], verbose=False)[fullname]
for object_cls in objects:
if (object_cls not in category_instancesonly):
continue
for obj in objects[object_cls]:
if (obj['contours'] == []):
print('Warning: empty contours.')
continue
len_p = [len(p) for p in obj['contours']]
if (min(len_p) <= 4):
print('Warning: invalid contours.')
continue
ann = {}
ann['id'] = ann_id
ann_id += 1
ann['image_id'] = image['id']
ann['segmentation'] = obj['contours']
if (object_cls not in category_dict):
category_dict[object_cls] = cat_id
cat_id += 1
ann['category_id'] = category_dict[object_cls]
ann['iscrowd'] = 0
ann['area'] = obj['pixelCount']
ann['bbox'] = bboxs_util.xyxy_to_xywh(segms_util.polys_to_boxes([ann['segmentation']])).tolist()[0]
annotations.append(ann)
ann_dict['images'] = images
categories = [{'id': category_dict[name], 'name': name} for name in category_dict]
ann_dict['categories'] = categories
ann_dict['annotations'] = annotations
print(('Num categories: %s' % len(categories)))
print(('Num images: %s' % len(images)))
print(('Num annotations: %s' % len(annotations)))
with open(os.path.join(out_dir, (json_name % data_set)), 'w') as outfile:
outfile.write(json.dumps(ann_dict)) |
def get_tasks(args, task_names, max_seq_len):
tasks = []
for name in task_names:
assert (name in NAME2INFO), 'Task not found!'
task = NAME2INFO[name][0](args, NAME2INFO[name][1], max_seq_len, name)
tasks.append(task)
logging.info('\tFinished loading tasks: %s.', ' '.join([task.name for task in tasks]))
return tasks |
def EpochModelCheckpoint(*args, **kwargs):
callback = pl.callbacks.ModelCheckpoint(*args, **kwargs)
_on_validation_end = callback.on_validation_end
_on_save_checkpoint = callback.on_save_checkpoint
def on_validation_end(*args, **kwargs):
return
def on_save_checkpoint(trainer, module, *args):
if hasattr(module, 'config'):
with fsspec.open(os.path.join(callback.dirpath, 'config.json'), 'w+') as f:
json.dump(module.config.asdict(), f)
f.flush()
return _on_save_checkpoint(trainer, module, *args)
def on_train_epoch_end(trainer, module, _):
_on_validation_end(trainer, module)
callback.on_save_checkpoint = on_save_checkpoint
callback.on_validation_end = on_validation_end
callback.on_train_epoch_end = on_train_epoch_end
return callback |
def conv2d(in_channels, out_channels, kernel_size=3, stride=1, dilation=1, groups=1):
return nn.Sequential(nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=dilation, dilation=dilation, bias=False, groups=groups), nn.BatchNorm2d(out_channels), nn.LeakyReLU(0.2, inplace=True)) |
class IP(nn.Module):
def __init__(self, args):
super(IP, self).__init__()
self.model_recognition = LightCNN_29Layers_v2(num_classes=346)
self.model_recognition = torch.nn.DataParallel(self.model_recognition).cuda()
checkpoint = torch.load('lightCNN_pretrain.pth.tar')
self.model_recognition.load_state_dict(checkpoint['state_dict'])
self.submean = common.MeanShift(args.rgb_range)
for p in self.parameters():
p.requires_grad = False
def forward(self, sr, hr):
def _forward(x):
x = self.submean(x)
x = self.model_recognition(x)
return x
(out_sr, feat_sr) = _forward(sr[0])
with torch.no_grad():
(out_hr, feat_hr) = _forward(hr[0].detach())
loss = (F.mse_loss(feat_sr, feat_hr) + F.mse_loss(out_sr, out_hr))
return loss |
def analyse_obs_spaces(env_obs_space, model_obs_space):
entries_only_in_env = []
entries_only_in_model = []
entries_in_both_but_with_different_values = []
for key in env_obs_space.keys():
if (key not in model_obs_space.keys()):
entries_only_in_env.append(key)
elif ((key in model_obs_space.keys()) and (model_obs_space[key] != env_obs_space[key])):
entries_in_both_but_with_different_values.append((key, model_obs_space[key], env_obs_space[key]))
for key in model_obs_space.keys():
if (key not in env_obs_space.keys()):
entries_only_in_model.append(key)
print('[Error] Your model failed to load due to incompatible observation spaces!')
print('This is a list of all elements that were only found in the env observation space:')
print(('#' * 15))
for entry in entries_only_in_env:
print(entry)
print(('#' * 15))
print("This is a list of all elements that were only found in the model's observation space:")
print(('#' * 15))
for entry in entries_only_in_model:
print(entry)
print(('#' * 15))
print('This is a list of all elements that were found in both, but have mismatching definitions:')
print(('#' * 15))
for entry in entries_in_both_but_with_different_values:
print('Entry: ', entry[0])
print('Value in model: ', entry[1])
print('Value in env: ', entry[2])
print(('#' * 15)) |
class BufferList(torch.nn.Module):
def __init__(self, buffers):
super(BufferList, self).__init__()
self.buffers = []
for (i, b) in enumerate(buffers):
name = '_buffer_{}'.format(i)
self.register_buffer(name, b)
self.buffers.append(getattr(self, name))
def __repr__(self):
return str(self.buffers)
def __getitem__(self, i):
return self.buffers[i]
def dtype(self):
return self.buffers[0].dtype |
def plot_acc(history):
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc=0) |
def structure_encoding(atoms):
enc = [0 for _ in range(55)]
for atom in atoms:
enc[atom.GetAtomicNum()] += 1
return enc |
class VKITTI(Dataset):
def __init__(self, data_dir_root, do_kb_crop=True):
import glob
self.image_files = glob.glob(os.path.join(data_dir_root, 'test_color', '*.png'))
self.depth_files = [r.replace('test_color', 'test_depth') for r in self.image_files]
self.do_kb_crop = True
self.transform = ToTensor()
def __getitem__(self, idx):
image_path = self.image_files[idx]
depth_path = self.depth_files[idx]
image = Image.open(image_path)
depth = Image.open(depth_path)
depth = cv2.imread(depth_path, (cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH))
print('dpeth min max', depth.min(), depth.max())
if (self.do_kb_crop and False):
height = image.height
width = image.width
top_margin = int((height - 352))
left_margin = int(((width - 1216) / 2))
depth = depth.crop((left_margin, top_margin, (left_margin + 1216), (top_margin + 352)))
image = image.crop((left_margin, top_margin, (left_margin + 1216), (top_margin + 352)))
image = (np.asarray(image, dtype=np.float32) / 255.0)
depth = depth[(..., None)]
sample = dict(image=image, depth=depth)
sample = self.transform(sample)
if (idx == 0):
print(sample['image'].shape)
return sample
def __len__(self):
return len(self.image_files) |
class DAPPM(nn.Module):
def __init__(self, inplanes, branch_planes, outplanes, BatchNorm=nn.BatchNorm2d):
super(DAPPM, self).__init__()
bn_mom = 0.1
self.scale1 = nn.Sequential(nn.AvgPool2d(kernel_size=5, stride=2, padding=2), BatchNorm(inplanes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False))
self.scale2 = nn.Sequential(nn.AvgPool2d(kernel_size=9, stride=4, padding=4), BatchNorm(inplanes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False))
self.scale3 = nn.Sequential(nn.AvgPool2d(kernel_size=17, stride=8, padding=8), BatchNorm(inplanes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False))
self.scale4 = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)), BatchNorm(inplanes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False))
self.scale0 = nn.Sequential(BatchNorm(inplanes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(inplanes, branch_planes, kernel_size=1, bias=False))
self.process1 = nn.Sequential(BatchNorm(branch_planes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False))
self.process2 = nn.Sequential(BatchNorm(branch_planes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False))
self.process3 = nn.Sequential(BatchNorm(branch_planes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False))
self.process4 = nn.Sequential(BatchNorm(branch_planes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(branch_planes, branch_planes, kernel_size=3, padding=1, bias=False))
self.compression = nn.Sequential(BatchNorm((branch_planes * 5), momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d((branch_planes * 5), outplanes, kernel_size=1, bias=False))
self.shortcut = nn.Sequential(BatchNorm(inplanes, momentum=bn_mom), nn.ReLU(inplace=True), nn.Conv2d(inplanes, outplanes, kernel_size=1, bias=False))
def forward(self, x):
width = x.shape[(- 1)]
height = x.shape[(- 2)]
x_list = []
x_list.append(self.scale0(x))
x_list.append(self.process1((F.interpolate(self.scale1(x), size=[height, width], mode='bilinear', align_corners=algc) + x_list[0])))
x_list.append(self.process2((F.interpolate(self.scale2(x), size=[height, width], mode='bilinear', align_corners=algc) + x_list[1])))
x_list.append(self.process3((F.interpolate(self.scale3(x), size=[height, width], mode='bilinear', align_corners=algc) + x_list[2])))
x_list.append(self.process4((F.interpolate(self.scale4(x), size=[height, width], mode='bilinear', align_corners=algc) + x_list[3])))
out = (self.compression(torch.cat(x_list, 1)) + self.shortcut(x))
return out |
def get_tl_line_values_gt(line, LTRB=True, withTranscription=False, withConfidence=False, imWidth=0, imHeight=0):
confidence = 0.0
transcription = ''
points = []
if LTRB:
raise Exception('Not implemented.')
else:
if (withTranscription and withConfidence):
raise 'not implemented'
elif withConfidence:
raise 'not implemented'
elif withTranscription:
ptr = line.strip().split(',####')
cors = ptr[0].split(',')
recs = ptr[1].strip()
assert ((len(cors) % 2) == 0), 'num cors should be even.'
try:
points = [float(ic) for ic in cors[:]]
except Exception as e:
raise e
else:
raise 'not implemented'
validate_clockwise_points(points)
if ((imWidth > 0) and (imHeight > 0)):
for ip in range(0, len(points), 2):
validate_point_inside_bounds(points[ip], points[(ip + 1)], imWidth, imHeight)
if withConfidence:
try:
confidence = 1.0
except ValueError:
raise Exception('Confidence value must be a float')
if withTranscription:
transcription = recs
m2 = re.match('^\\s*\\"(.*)\\"\\s*$', transcription)
if (m2 != None):
transcription = m2.group(1).replace('\\\\', '\\').replace('\\"', '"')
return (points, confidence, transcription) |
_grad()
def concat_all_gather(tensor):
if (get_mpi_size() == 1):
return tensor
if (not is_hvd_initialized()):
tensors_gather = [torch.ones_like(tensor) for _ in range(get_mpi_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
else:
import horovod as hvd
output = hvd.torch.allgather(tensor)
return output |
class LinearWarmUpScheduler(LRScheduler):
def __init__(self, optimizer, warmup, total_steps, last_epoch=(- 1)):
self.warmup = warmup
self.total_steps = total_steps
super(LinearWarmUpScheduler, self).__init__(optimizer, last_epoch)
def get_lr(self):
progress = (self.last_epoch / self.total_steps)
if (progress < self.warmup):
return [((base_lr * progress) / self.warmup) for base_lr in self.base_lrs]
else:
return [(base_lr * max(((progress - 1.0) / (self.warmup - 1.0)), 0.0)) for base_lr in self.base_lrs] |
class DesiTileFileHandler(DesiDataFileHandler):
def __init__(self, analysis_type, use_non_coadded_spectra, logger, input_directory):
self.input_directory = input_directory
super().__init__(analysis_type, use_non_coadded_spectra, logger)
def read_file(self, filename, catalogue):
try:
hdul = fitsio.FITS(filename)
except IOError:
self.logger.warning(f'Error reading file {filename}. Ignoring file')
return ({}, 0)
fibermap = hdul['FIBERMAP'].read()
ra = fibermap['TARGET_RA']
dec = fibermap['TARGET_DEC']
tile_spec = fibermap['TILEID'][0]
try:
if ('cumulative' in self.input_directory):
night_spec = int(filename.split('thru')[(- 1)].split('.')[0])
else:
night_spec = int(filename.split('-')[(- 1)].split('.')[0])
except ValueError:
self.logger.warning(f'In file {filename}, error reading night. Ignoring file')
return ({}, 0)
colors = ['B', 'R', 'Z']
ra = np.radians(ra)
dec = np.radians(dec)
petal_spec = fibermap['PETAL_LOC'][0]
spectrographs_data = {}
for color in colors:
try:
spec = {}
spec['WAVELENGTH'] = hdul[f'{color}_WAVELENGTH'].read()
spec['FLUX'] = hdul[f'{color}_FLUX'].read()
spec['IVAR'] = (hdul[f'{color}_IVAR'].read() * (hdul[f'{color}_MASK'].read() == 0))
if (self.analysis_type == 'PK 1D'):
if (f'{color}_RESOLUTION' in hdul):
spec['RESO'] = hdul[f'{color}_RESOLUTION'].read()
else:
raise DataError(f"Error while reading {color} band from {{filename}}. Analysis type is 'PK 1D', but file does not contain HDU '{color}_RESOLUTION' ")
w = (np.isnan(spec['FLUX']) | np.isnan(spec['IVAR']))
for key in ['FLUX', 'IVAR']:
spec[key][w] = 0.0
spectrographs_data[color] = spec
except OSError:
self.logger.warning(f'Error while reading {color} band from {filename}.Ignoring color.')
hdul.close()
if ('cumulative' in self.input_directory):
select = (((catalogue['TILEID'] == tile_spec) & (catalogue['PETAL_LOC'] == petal_spec)) & (catalogue['LASTNIGHT'] == night_spec))
else:
select = (((catalogue['TILEID'] == tile_spec) & (catalogue['PETAL_LOC'] == petal_spec)) & (catalogue['NIGHT'] == night_spec))
self.logger.progress(f'This is tile {tile_spec}, petal {petal_spec}, night {night_spec}')
(forests_by_targetid, num_data) = self.format_data(catalogue[select], spectrographs_data, fibermap['TARGETID'])
return (forests_by_targetid, num_data) |
def seed_all(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed) |
def clip_norm(g, c, n):
if (c > 0):
g = T.switch(T.ge(n, c), ((g * c) / n), g)
return g |
def sim_gcn(reps):
n = reps.shape[0]
sim_mat = np.zeros([n, n])
return cosine_similarity(reps, reps) |
class OpWeights(JsonSerializer):
def __init__(self, weights_data: dict):
super().__init__()
self.dtype: str = weights_data.get('dtype', None)
self.granularity: str = weights_data.get('granularity', None) |
def ProcessLineForNonScoredWords(a):
global num_lines, num_correct_lines, ref_change_stats
try:
assert (len(a) == 8)
num_lines += 1
duration = a[3]
hyp_word = a[4]
ref_word = a[6]
edit_type = a[7]
if (edit_type == 'ins'):
assert (ref_word == '<eps>')
if (hyp_word in non_scored_words):
ref_change_stats[((ref_word + ' -> ') + hyp_word)] += 1
ref_word = hyp_word
edit_type = 'fix'
elif (edit_type == 'del'):
assert ((hyp_word == '<eps>') and (float(duration) == 0.0))
if (ref_word in non_scored_words):
ref_change_stats[((ref_word + ' -> ') + hyp_word)] += 1
return []
elif (edit_type == 'sub'):
assert (hyp_word != '<eps>')
if ((hyp_word in non_scored_words) and (ref_word in non_scored_words)):
ref_change_stats[((ref_word + ' -> ') + hyp_word)] += 1
ref_word = hyp_word
edit_type = 'fix'
else:
assert ((edit_type == 'cor') or (edit_type == 'sil'))
num_correct_lines += 1
a[4] = hyp_word
a[6] = ref_word
a[7] = edit_type
return a
except Exception:
logger.error('bad line in ctm-edits input: {0}'.format(a))
raise RuntimeError |
class ImbalanceSVHN(torchvision.datasets.SVHN):
cls_num = 10
def __init__(self, root, imb_type='exp', imb_factor=0.01, rand_number=0, split='train', transform=None, target_transform=None, download=False):
super(ImbalanceSVHN, self).__init__(root, split, transform, target_transform, download)
np.random.seed(rand_number)
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = 1000
img_num_per_cls = []
if (imb_type == 'exp'):
for cls_idx in range(cls_num):
num = (img_max * (imb_factor ** (cls_idx / (cls_num - 1.0))))
img_num_per_cls.append(int(num))
elif (imb_type == 'step'):
for cls_idx in range((cls_num // 2)):
img_num_per_cls.append(int(img_max))
for cls_idx in range((cls_num // 2)):
img_num_per_cls.append(int((img_max * imb_factor)))
else:
img_num_per_cls.extend(([int(img_max)] * cls_num))
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
targets_np = np.array(self.labels, dtype=np.int64)
classes = np.unique(targets_np)
classes = np.concatenate([classes[1:], classes[:1]], axis=0)
self.num_per_cls_dict = dict()
for (the_class, the_img_num) in zip(classes, img_num_per_cls):
self.num_per_cls_dict[the_class] = the_img_num
idx = np.where((targets_np == the_class))[0]
print(f'Class {the_class}: {len(idx)}')
np.random.shuffle(idx)
selec_idx = idx[:the_img_num]
new_data.append(self.data[(selec_idx, ...)])
new_targets.extend(([the_class] * the_img_num))
new_data = np.vstack(new_data)
self.data = new_data
self.labels = new_targets
assert (new_data.shape[0] == len(new_targets)), 'Length of data & labels do not match!'
def get_cls_num_list(self):
cls_num_list = []
for i in range(self.cls_num):
cls_num_list.append(self.num_per_cls_dict[i])
return cls_num_list |
class VideoRenderer(mp.Process):
def __init__(self, display=False, verbose=0, verbose_size=None, output_crop=False, resolution=256, crop_scale=1.2, encoder_codec='mp4v', separate_process=False):
super(VideoRenderer, self).__init__()
self._display = display
self._verbose = verbose
self._verbose_size = verbose_size
self._output_crop = output_crop
self._resolution = resolution
self._crop_scale = crop_scale
self._running = True
self._input_queue = mp.Queue()
self._reply_queue = mp.Queue()
self._fourcc = cv2.VideoWriter_fourcc(*encoder_codec)
self._separate_process = separate_process
self._in_vid = None
self._out_vid = None
self._seq = None
self._in_vid_path = None
self._total_frames = None
self._frame_count = 0
def init(self, in_vid_path, seq, out_vid_path=None, **kwargs):
if self._separate_process:
self._input_queue.put([in_vid_path, seq, out_vid_path, kwargs])
else:
self._init_task(in_vid_path, seq, out_vid_path, kwargs)
def write(self, *args):
if self._separate_process:
self._input_queue.put([a.cpu() for a in args])
else:
self._write_batch([a.cpu() for a in args])
def finalize(self):
if self._separate_process:
self._input_queue.put(True)
else:
self._finalize_task()
def wait_until_finished(self):
if self._separate_process:
return self._reply_queue.get()
else:
return True
def on_render(self, *args):
return tensor2bgr(args[0])
def start(self):
if self._separate_process:
super(VideoRenderer, self).start()
def kill(self):
if self._separate_process:
super(VideoRenderer, self).kill()
def run(self):
while self._running:
task = self._input_queue.get()
if (self._in_vid is None):
self._init_task(*task[:3], task[3])
continue
if isinstance(task, bool):
self._finalize_task()
self._reply_queue.put(True)
continue
self._write_batch(task)
def _render(self, render_bgr, full_frame_bgr=None, bbox=None):
if ((self._verbose == 0) and (not self._output_crop) and (full_frame_bgr is not None)):
render_bgr = crop2img(full_frame_bgr, render_bgr, bbox)
if (self._out_vid is not None):
self._out_vid.write(render_bgr)
if self._display:
cv2.imshow('render', render_bgr)
if ((cv2.waitKey(1) & 255) == ord('q')):
self._running = False
def _init_task(self, in_vid_path, seq, out_vid_path, additional_attributes):
(self._in_vid_path, self._seq) = (in_vid_path, seq)
self._frame_count = 0
for (attr_name, attr_val) in additional_attributes.items():
setattr(self, attr_name, attr_val)
self._in_vid = cv2.VideoCapture(self._in_vid_path)
assert self._in_vid.isOpened(), f'Failed to open video: "{self._in_vid_path}"'
in_total_frames = int(self._in_vid.get(cv2.CAP_PROP_FRAME_COUNT))
fps = self._in_vid.get(cv2.CAP_PROP_FPS)
in_vid_width = int(self._in_vid.get(cv2.CAP_PROP_FRAME_WIDTH))
in_vid_height = int(self._in_vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
self._total_frames = (in_total_frames if (self._verbose == 0) else len(self._seq))
if (out_vid_path is not None):
out_size = (in_vid_width, in_vid_height)
if ((self._verbose <= 0) and self._output_crop):
out_size = (self._resolution, self._resolution)
elif (self._verbose_size is not None):
out_size = self._verbose_size
self._out_vid = cv2.VideoWriter(out_vid_path, self._fourcc, fps, out_size)
if (self._verbose == 0):
for i in range(self._seq.start_index):
(ret, frame_bgr) = self._in_vid.read()
assert (frame_bgr is not None), f'Failed to read frame {i} from input video: "{self._in_vid_path}"'
self._render(frame_bgr)
self._frame_count += 1
def _write_batch(self, tensors):
batch_size = tensors[0].shape[0]
for b in range(batch_size):
(full_frame_bgr, bbox) = (None, None)
if ((self._verbose == 0) and (not self._output_crop)):
(ret, full_frame_bgr) = self._in_vid.read()
assert (full_frame_bgr is not None), f'Failed to read frame {self._frame_count} from input video: "{self._in_vid_path}"'
det = self._seq[(self._frame_count - self._seq.start_index)]
bbox = np.concatenate((det[:2], (det[2:] - det[:2])))
bbox = scale_bbox(bbox, self._crop_scale)
render_bgr = self.on_render(*[t[b] for t in tensors])
self._render(render_bgr, full_frame_bgr, bbox)
self._frame_count += 1
def _finalize_task(self):
if ((self._verbose == 0) and (self._frame_count >= (self._seq.start_index + len(self._seq)))):
for i in range((self._seq.start_index + len(self._seq)), self._total_frames):
(ret, frame_bgr) = self._in_vid.read()
assert (frame_bgr is not None), f'Failed to read frame {i} from input video: "{self._in_vid_path}"'
self._render(frame_bgr)
self._frame_count += 1
self._in_vid.release()
self._out_vid.release()
self._in_vid = None
self._out_vid = None
self._seq = None
self._in_vid_path = None
self._total_frames = None
self._frame_count = 0 |
def sample_ddpg_params(trial):
gamma = trial.suggest_categorical('gamma', [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])
learning_rate = trial.suggest_loguniform('lr', 1e-05, 1)
batch_size = trial.suggest_categorical('batch_size', [16, 32, 64, 128, 256])
buffer_size = trial.suggest_categorical('memory_limit', [int(10000.0), int(100000.0), int(1000000.0)])
noise_type = trial.suggest_categorical('noise_type', ['ornstein-uhlenbeck', 'normal', 'adaptive-param'])
noise_std = trial.suggest_uniform('noise_std', 0, 1)
normalize_observations = trial.suggest_categorical('normalize_observations', [True, False])
normalize_returns = trial.suggest_categorical('normalize_returns', [True, False])
hyperparams = {'gamma': gamma, 'actor_lr': learning_rate, 'critic_lr': learning_rate, 'batch_size': batch_size, 'memory_limit': buffer_size, 'normalize_observations': normalize_observations, 'normalize_returns': normalize_returns}
if (noise_type == 'adaptive-param'):
hyperparams['param_noise'] = AdaptiveParamNoiseSpec(initial_stddev=noise_std, desired_action_stddev=noise_std)
hyperparams['policy_kwargs'] = dict(layer_norm=True)
elif (noise_type == 'normal'):
hyperparams['action_noise'] = NormalActionNoise(mean=np.zeros(trial.n_actions), sigma=(noise_std * np.ones(trial.n_actions)))
elif (noise_type == 'ornstein-uhlenbeck'):
hyperparams['action_noise'] = OrnsteinUhlenbeckActionNoise(mean=np.zeros(trial.n_actions), sigma=(noise_std * np.ones(trial.n_actions)))
return hyperparams |
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, milestones, gamma=0.1, warmup_factor=(1.0 / 3), warmup_iters=500, warmup_method='linear', last_epoch=(- 1)):
if (not (list(milestones) == sorted(milestones))):
raise ValueError('Milestones should be a list of increasing integers. Got {}', milestones)
if (warmup_method not in ('constant', 'linear', 'none')):
raise ValueError("Only 'constant' or 'linear' warmup_method acceptedgot {}".format(warmup_method))
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if (self.last_epoch < self.warmup_iters):
if (self.warmup_method == 'linear'):
warmup_factor = ((1 + self.last_epoch) / self.warmup_iters)
elif (self.warmup_method == 'constant'):
warmup_factor = 1
return [((base_lr * warmup_factor) * (self.gamma ** bisect_right(self.milestones, self.last_epoch))) for base_lr in self.base_lrs] |
def _gen_mnasnet_b1(variant, channel_multiplier=1.0, pretrained=False, **kwargs):
arch_def = [['ds_r1_k3_s1_c16_noskip'], ['ir_r3_k3_s2_e3_c24'], ['ir_r3_k5_s2_e3_c40'], ['ir_r3_k5_s2_e6_c80'], ['ir_r2_k3_s1_e6_c96'], ['ir_r4_k5_s2_e6_c192'], ['ir_r1_k3_s1_e6_c320_noskip']]
model_kwargs = dict(block_args=decode_arch_def(arch_def), stem_size=32, channel_multiplier=channel_multiplier, norm_kwargs=resolve_bn_args(kwargs), **kwargs)
model = _create_effnet(variant, pretrained, **model_kwargs)
return model |
def dump_results(results):
with open(result_file, 'wb') as f:
pickle.dump(dict(results), f) |
class TemperatureTanh(nn.Module):
def __init__(self, temperature: float=1.0) -> None:
super().__init__()
assert (temperature != 0.0), 'temperature must be nonzero.'
self._T = temperature
self.tanh = torch.nn.Tanh()
def forward(self, x: Tensor) -> Tensor:
return self.tanh((x / self._T)) |
class Args(Tap):
ckpts_dirs: List[str]
split_type: Literal[('random', 'scaffold')]
num_folds: int = 10 |
(kernels.Kernel, TensorLike, TensorLike, TensorLike)
def _exact_fallback(kern: kernels.Kernel, Z: TensorLike, u: TensorLike, f: TensorLike, *, L: TensorLike=None, diag: TensorLike=None, basis: AbstractBasis=None, **kwargs):
u_shape = tuple(u.shape)
f_shape = tuple(f.shape)
assert (u_shape[(- 1)] == 1), 'Recieved multiple output features'
assert (u_shape == f_shape[(- len(u_shape)):]), 'Incompatible shapes detected'
if (basis is None):
basis = kernel_basis(kern, centers=Z)
if (diag is None):
diag = default_jitter()
if isinstance(diag, float):
diag = tf.convert_to_tensor(diag, dtype=f.dtype)
diag = tf.expand_dims(diag, axis=(- 1))
err = (u - f)
err -= (tf.sqrt(diag) * tf.random.normal(err.shape, dtype=err.dtype))
if (L is None):
if isinstance(Z, inducing_variables.InducingVariables):
K = covariances.Kuu(Z, kern, jitter=0.0)
else:
K = kern(Z, full_cov=True)
K = tf.linalg.set_diag(K, (tf.linalg.diag_part(K) + diag[(..., 0)]))
L = tf.linalg.cholesky(K)
weights = tf.linalg.adjoint(tf.linalg.cholesky_solve(L, err))
return DenseSampler(basis=basis, weights=weights, **kwargs) |
class CompositeAudioTransform(AudioTransform):
def _from_config_dict(cls, transform_type, get_audio_transform, composite_cls, config=None, return_empty=False):
_config = ({} if (config is None) else config)
_transforms = _config.get(f'{transform_type}_transforms')
if (_transforms is None):
if return_empty:
_transforms = []
else:
return None
transforms = [get_audio_transform(_t).from_config_dict(_config.get(_t)) for _t in _transforms]
return composite_cls(transforms)
def __init__(self, transforms):
self.transforms = [t for t in transforms if (t is not None)]
def __call__(self, x):
for t in self.transforms:
x = t(x)
return x
def __repr__(self):
format_string = (([(self.__class__.__name__ + '(')] + [f' {t.__repr__()}' for t in self.transforms]) + [')'])
return '\n'.join(format_string) |
class ColumnSample():
basedir = prev_dir(os.getcwd())
def __init__(self, sampletype, column, directory, settings):
self.sampletype = sampletype
self.column = column
self.directory = directory
self.settings = settings
self.basedir = basedir
def featurize(self):
print(self.sampletype)
if (self.sampletype == 'audio'):
(features_, labels) = audio_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif (self.sampletype == 'text'):
(features_, labels) = text_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif (self.sampletype == 'image'):
(features_, labels) = image_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif (self.sampletype == 'video'):
(features_, labels) = video_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif (self.sampletype == 'categorical'):
(features_, labels) = category_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif (self.sampletype == 'typedtext'):
(features_, labels) = typedtext_featurize_columns(self.column, self.directory, self.settings, self.basedir)
elif (self.sampletype == 'numerical'):
(features_, labels) = numerical_featurize_columns(self.column, self.directory, self.settings, self.basedir)
self.features = features_
self.labels = labels |
class XLNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, max_len=None, do_lower_case=False, remove_space=True, keep_accents=False, bos_token='<s>', eos_token='</s>', unk_token='<unk>', sep_token='<sep>', pad_token='<pad>', cls_token='<cls>', mask_token='<mask>', additional_special_tokens=['<eop>', '<eod>'], **kwargs):
super(XLNetTokenizer, self).__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, additional_special_tokens=additional_special_tokens, **kwargs)
try:
import sentencepiece as spm
except ImportError:
logger.warning('You need to install SentencePiece to use XLNetTokenizer: install sentencepiece')
self.do_lower_case = do_lower_case
self.remove_space = remove_space
self.keep_accents = keep_accents
self.vocab_file = vocab_file
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
def vocab_size(self):
return len(self.sp_model)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning('You need to install SentencePiece to use XLNetTokenizer: install sentencepiece')
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def preprocess_text(self, inputs):
if self.remove_space:
outputs = ' '.join(inputs.strip().split())
else:
outputs = inputs
outputs = outputs.replace('``', '"').replace("''", '"')
if (six.PY2 and isinstance(outputs, str)):
outputs = outputs.decode('utf-8')
if (not self.keep_accents):
outputs = unicodedata.normalize('NFKD', outputs)
outputs = ''.join([c for c in outputs if (not unicodedata.combining(c))])
if self.do_lower_case:
outputs = outputs.lower()
return outputs
def _tokenize(self, text, return_unicode=True, sample=False):
text = self.preprocess_text(text)
if (six.PY2 and isinstance(text, unicode)):
text = text.encode('utf-8')
if (not sample):
pieces = self.sp_model.EncodeAsPieces(text)
else:
pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1)
new_pieces = []
for piece in pieces:
if ((len(piece) > 1) and (piece[(- 1)] == ',') and piece[(- 2)].isdigit()):
cur_pieces = self.sp_model.EncodeAsPieces(piece[:(- 1)].replace(SPIECE_UNDERLINE, ''))
if ((piece[0] != SPIECE_UNDERLINE) and (cur_pieces[0][0] == SPIECE_UNDERLINE)):
if (len(cur_pieces[0]) == 1):
cur_pieces = cur_pieces[1:]
else:
cur_pieces[0] = cur_pieces[0][1:]
cur_pieces.append(piece[(- 1)])
new_pieces.extend(cur_pieces)
else:
new_pieces.append(piece)
if (six.PY2 and return_unicode):
ret_pieces = []
for piece in new_pieces:
if isinstance(piece, str):
piece = piece.decode('utf-8')
ret_pieces.append(piece)
new_pieces = ret_pieces
return new_pieces
def _convert_token_to_id(self, token):
return self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index, return_unicode=True):
token = self.sp_model.IdToPiece(index)
if (six.PY2 and return_unicode and isinstance(token, str)):
token = token.decode('utf-8')
return token
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def add_special_tokens_single_sentence(self, token_ids):
logger.warning('No method was defined for special tokens and single sentence streams in XLNet. Returning token_ids')
return token_ids
def add_special_tokens_sentences_pair(self, *token_ids):
sep = [self._convert_token_to_id(self.sep_token)]
cls = [self._convert_token_to_id(self.cls_token)]
return ((((token_ids[0] + sep) + token_ids[1]) + sep) + cls)
def save_vocabulary(self, save_directory):
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,) |
def load_config(args=None, config_file=None, overwrite_fairseq=False):
if (args is not None):
config_file = args.taskconfig
config = recursive_config(config_file)
if (config.dataset.subsampling is not None):
batch_size = (config.fairseq.dataset.batch_size // config.dataset.subsampling)
print('adjusting batch_size to {} due to subsampling {}.'.format(batch_size, config.dataset.subsampling))
config.fairseq.dataset.batch_size = batch_size
is_test = ((config.dataset.split is not None) and (config.dataset.split == 'test'))
if (not is_test):
if ((config.fairseq.checkpoint is None) or (config.fairseq.checkpoint.save_dir is None)):
raise ValueError('fairseq save_dir or save_path must be specified.')
save_dir = config.fairseq.checkpoint.save_dir
os.makedirs(save_dir, exist_ok=True)
if (config.fairseq.common.tensorboard_logdir is not None):
tb_run_dir = suffix_rundir(save_dir, config.fairseq.common.tensorboard_logdir)
config.fairseq.common.tensorboard_logdir = tb_run_dir
print('update tensorboard_logdir as', config.fairseq.common.tensorboard_logdir)
os.makedirs(save_dir, exist_ok=True)
OmegaConf.save(config=config, f=os.path.join(save_dir, 'config.yaml'))
if (overwrite_fairseq and (config.fairseq is not None) and (args is not None)):
for group in config.fairseq:
for field in config.fairseq[group]:
print(('overwrite args.' + field), 'as', config.fairseq[group][field])
setattr(args, field, config.fairseq[group][field])
return config |
def prune_outside_window(keypoints, window, scope=None):
with tf.name_scope(scope, 'PruneOutsideWindow'):
(y, x) = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
(win_y_min, win_x_min, win_y_max, win_x_max) = tf.unstack(window)
valid_indices = tf.logical_and(tf.logical_and((y >= win_y_min), (y <= win_y_max)), tf.logical_and((x >= win_x_min), (x <= win_x_max)))
new_y = tf.where(valid_indices, y, (np.nan * tf.ones_like(y)))
new_x = tf.where(valid_indices, x, (np.nan * tf.ones_like(x)))
new_keypoints = tf.concat([new_y, new_x], 2)
return new_keypoints |
def print_results(results_path):
with open(results_path, 'r') as fp:
results = json.load(fp)
print('Flags:')
for (k, v) in sorted(results['flags'].items()):
print('\t{}: {}'.format(k, v))
print('HParams:')
for (k, v) in sorted(results['hparams'].items()):
print('\t{}: {}'.format(k, v))
t = setup_pretty_table(Namespace(**results['flags']))
envs = datasets.get_environments(results['flags']['dataset'])
test_env = envs[results['flags']['test_env']]
train_names = [(str(env) + '_in_loss') for (i, env) in enumerate(envs) if (i != results['flags']['test_env'])]
steps = [key for key in results.keys() if (key not in ['hparams', 'flags'])]
for s in steps:
t.add_row(((((([s] + ['{:.2f} :: {:.2f}'.format(results[s][(str(e) + '_in_acc')], results[s][(str(e) + '_out_acc')]) for e in envs]) + ['{:.2f}'.format(np.average([results[s][str(e)] for e in train_names]))]) + ['{}'.format('.')]) + ['{}'.format('.')]) + ['{}'.format('.')]))
print('\n'.join(t.get_string().splitlines()[(- 2):(- 1)])) |
def log_Logistic_256(x, mean, logvar, average=False, reduce=True, dim=None):
bin_size = (1.0 / 256.0)
scale = torch.exp(logvar)
x = (((torch.floor((x / bin_size)) * bin_size) - mean) / scale)
cdf_plus = torch.sigmoid((x + (bin_size / scale)))
cdf_minus = torch.sigmoid(x)
log_logist_256 = (- torch.log(((cdf_plus - cdf_minus) + 1e-07)))
if reduce:
if average:
return torch.mean(log_logist_256, dim)
else:
return torch.sum(log_logist_256, dim)
else:
return log_logist_256 |
def get_net(input_shape, num_output_channels, net_config):
num_input_channels = input_shape[0]
if (net_config['type'] == 'mlp'):
assert (len(input_shape) == 1)
return get_mlp(num_input_channels=num_input_channels, hidden_channels=net_config['hidden_channels'], num_output_channels=num_output_channels, activation=get_activation(net_config['activation']))
elif (net_config['type'] == 'resnet'):
assert (len(input_shape) == 3)
return get_resnet(num_input_channels=num_input_channels, hidden_channels=net_config['hidden_channels'], num_output_channels=num_output_channels)
elif (net_config['type'] == 'glow-cnn'):
assert (len(input_shape) == 3)
return get_glow_cnn(num_input_channels=num_input_channels, num_hidden_channels=net_config['num_hidden_channels'], num_output_channels=num_output_channels, zero_init_output=net_config['zero_init_output'])
elif (net_config['type'] == 'constant'):
value = torch.full((num_output_channels, *input_shape[1:]), net_config['value'], dtype=torch.get_default_dtype())
return ConstantNetwork(value=value, fixed=net_config['fixed'])
elif (net_config['type'] == 'identity'):
assert (num_output_channels == num_input_channels)
return (lambda x: x)
else:
assert False, f"Invalid net type {net_config['type']}" |
class ImageParameters(object):
def __init__(self):
self.width_px = 96
self.height_px = 96
self.arcsec_per_pixel = 0.396
self.world_origin = WorldCoordinate(0, 0)
self.band_nelec_per_nmgy = [1000.0 for _ in range(5)]
def degrees_per_pixel(self):
return (self.arcsec_per_pixel / ARCSEC_PER_DEGREE)
def get_image_center_world_coordinates(self):
width_deg = (self.width_px * self.degrees_per_pixel())
height_deg = (self.height_px * self.degrees_per_pixel())
return self.world_origin.add((height_deg / 2.0), (width_deg / 2.0)) |
def UsesColor(term, color_env_var, color_flag):
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if (color_flag is None):
args = []
else:
args = [('--%s=%s' % (COLOR_FLAG, color_flag))]
p = gtest_test_utils.Subprocess(([COMMAND] + args))
return ((not p.exited) or p.exit_code) |
def ResNet(stack_fn, preact, use_bias, model_name='resnet', include_top=False, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000, **kwargs):
if (not ((weights in {'imagenet', None}) or os.path.exists(weights))):
raise ValueError('The `weights` argument should be either `None` (random initialization), `imagenet` (pre-training on ImageNet), or the path to the weights file to be loaded.')
if ((weights == 'imagenet') and include_top and (classes != 1000)):
raise ValueError('If using `weights` as `"imagenet"` with `include_top` as true, `classes` should be 1000')
input_shape = _obtain_input_shape(input_shape, default_size=224, min_size=32, data_format=backend.image_data_format(), require_flatten=include_top, weights=weights)
if (input_tensor is None):
img_input = layers.Input(shape=input_shape)
elif (not backend.is_keras_tensor(input_tensor)):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = (3 if (backend.image_data_format() == 'channels_last') else 1)
x = layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(img_input)
x = layers.Conv2D(64, 7, strides=2, use_bias=use_bias, name='conv1_conv')(x)
if (preact is False):
x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-05, name='conv1_bn')(x)
x = layers.Activation('relu', name='conv1_relu')(x)
x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
if include_top:
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = layers.Dense(classes, activation='softmax', name='probs')(x)
elif (pooling == 'avg'):
x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
elif (pooling == 'max'):
x = layers.GlobalMaxPooling2D(name='max_pool')(x)
if (input_tensor is not None):
inputs = keras_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = models.Model(inputs, x, name=model_name)
if ((weights == 'imagenet') and (model_name in WEIGHTS_HASHES)):
if include_top:
file_name = (model_name + '_weights_tf_dim_ordering_tf_kernels.h5')
file_hash = WEIGHTS_HASHES[model_name][0]
else:
file_name = (model_name + '_weights_tf_dim_ordering_tf_kernels_notop.h5')
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = keras_utils.get_file(file_name, (BASE_WEIGHTS_PATH + file_name), cache_subdir='models', file_hash=file_hash)
model.load_weights(weights_path)
elif (weights is not None):
model.load_weights(weights)
return model |
def _is_torch_dtype(x):
import torch
if isinstance(x, str):
if hasattr(torch, x):
x = getattr(torch, x)
else:
return False
return isinstance(x, torch.dtype) |
def merge_models(shape_size, models_list, out_model_file, avg=True):
from keras.models import load_model, save_model, Model
from keras.layers import Input, Average
if (not os.path.isfile(out_model_file)):
models = []
for m_path in models_list:
m = load_model(m_path)
models.append(m)
inp = Input(shape_size)
outs = []
for (i, m) in enumerate(models):
m.name += '_{}'.format(i)
x = m(inp)
outs.append(x)
if (avg is True):
outs = Average()(outs)
final_model = Model(inputs=inp, outputs=outs)
print(final_model.summary())
print('Single model memory: {} GB'.format(get_model_memory_usage(1, final_model)))
save_model(final_model, out_model_file)
else:
final_model = load_model(out_model_file)
return final_model |
class FeatureExtractor():
date_time_format_str = '%Y-%m-%d'
def __init__(self):
self.user_mnr_normalizer = 0
self.prod_mnr_normalizer = 0
self.product_num_ratings = {}
self.product_sum_ratings = {}
def MNR(self, data, data_type='user'):
feature = {}
for (i, d) in data.items():
frequency = {}
for t in d:
if (t[3] not in frequency):
frequency[t[3]] = 1
else:
frequency[t[3]] += 1
feature[i] = max(frequency.values())
if (data_type == 'user'):
self.user_mnr_normalizer = max(feature.values())
for k in feature.keys():
feature[k] /= self.user_mnr_normalizer
else:
self.prod_mnr_normalizer = max(feature.values())
for k in feature.keys():
feature[k] /= self.prod_mnr_normalizer
return feature
def iMNR(self, data, new_data, data_type='user'):
feature = {}
for (i, d) in new_data.items():
all_d = deepcopy(d)
if (i in data):
all_d += data[i]
frequency = {}
for t in all_d:
if (t[3] not in frequency):
frequency[t[3]] = 1
else:
frequency[t[3]] += 1
feature[i] = max(frequency.values())
if (data_type == 'user'):
self.user_mnr_normalizer = max(max(feature.values()), self.user_mnr_normalizer)
for k in feature.keys():
feature[k] /= self.user_mnr_normalizer
else:
self.prod_mnr_normalizer = max(max(feature.values()), self.prod_mnr_normalizer)
for k in feature.keys():
feature[k] /= self.prod_mnr_normalizer
return feature
def PR_NR(self, data):
feature = {}
for (i, d) in data.items():
positives = [1 for t in d if (t[1] > 3)]
negatives = [1 for t in d if (t[1] < 3)]
feature[i] = ((float(len(positives)) / len(d)), (float(len(negatives)) / len(d)))
return feature
def iPR_NR(self, data, new_data):
feature = {}
for (i, d) in new_data.items():
all_d = deepcopy(d)
if (i in data):
all_d = (all_d + data[i])
positives = [1 for t in all_d if (t[1] > 3)]
negatives = [1 for t in all_d if (t[1] < 3)]
feature[i] = ((float(len(positives)) / len(all_d)), (float(len(negatives)) / len(all_d)))
return feature
def avgRD_user(self, user_data, product_data):
p_avg = {}
for (i, d) in product_data.items():
self.product_num_ratings[i] = len(d)
self.product_sum_ratings[i] = np.sum(np.array([t[1] for t in d]))
p_avg[i] = np.mean(np.array([t[1] for t in d]))
u_avgRD = {}
for (i, d) in user_data.items():
u_avgRD[i] = np.mean(np.array([abs((t[1] - p_avg[t[0]])) for t in d]))
return u_avgRD
def iavgRD_user(self, user_data, new_user_data, product_data, new_product_data):
user_involved = set()
for (i, d) in new_product_data.items():
if (i in self.product_num_ratings):
self.product_num_ratings[i] += len(d)
else:
self.product_num_ratings[i] = len(d)
for t in d:
if (i in self.product_sum_ratings):
self.product_sum_ratings[i] += t[1]
else:
self.product_sum_ratings[i] = t[1]
user_id = t[0]
user_involved.add(user_id)
if (i in product_data):
for t in product_data[i]:
user_id = t[0]
user_involved.add(user_id)
for (i, d) in new_user_data.items():
assert (i in user_involved)
p_avg = {}
u_avgRD = {}
for user_id in user_involved:
all_d = []
if (user_id in new_user_data):
all_d += new_user_data[user_id]
if (user_id in user_data):
all_d += user_data[user_id]
for r in all_d:
product_id = r[0]
if (product_id not in p_avg):
p_avg[product_id] = (self.product_sum_ratings[product_id] / self.product_num_ratings[product_id])
u_avgRD[user_id] = np.mean(np.array([abs((r[1] - p_avg[r[0]])) for r in all_d]))
return u_avgRD
def avgRD_prod(self, product_data):
p_avg = {}
for (i, d) in product_data.items():
self.product_num_ratings[i] = len(d)
self.product_sum_ratings[i] = np.sum(np.array([t[1] for t in d]))
p_avg[i] = np.mean(np.array([t[1] for t in d]))
p_avgRD = {}
for (i, d) in product_data.items():
p_avgRD[i] = np.mean(np.array([abs((t[1] - p_avg[i])) for t in d]))
return p_avgRD
def iavgRD_prod(self, product_data, new_product_data):
for (i, d) in new_product_data.items():
if (i in self.product_num_ratings):
self.product_num_ratings[i] += len(d)
else:
self.product_num_ratings[i] = len(d)
for t in d:
if (i in self.product_sum_ratings):
self.product_sum_ratings[i] += t[1]
else:
self.product_sum_ratings[i] = t[1]
p_avg = {}
p_avgRD = {}
for (i, d) in new_product_data.items():
all_d = deepcopy(d)
if (i in product_data):
all_d += product_data[i]
p_avg[i] = np.mean(np.array([t[1] for t in all_d]))
p_avgRD[i] = np.mean(np.array([abs((t[1] - p_avg[i])) for t in all_d]))
return p_avgRD
def BST(self, user_data):
bst = {}
tau = 28.0
for (i, d) in user_data.items():
post_dates = sorted([datetime.strptime(t[3], self.date_time_format_str) for t in d])
delta_days = (post_dates[(- 1)] - post_dates[0]).days
if (delta_days > tau):
bst[i] = 0.0
else:
bst[i] = (1.0 - (delta_days / tau))
return bst
def iBST(self, user_data, new_user_data):
bst = {}
tau = 28.0
for (i, d) in new_user_data.items():
all_d = deepcopy(d)
if (i in user_data):
all_d += user_data[i]
post_dates = sorted([datetime.strptime(t[3], self.date_time_format_str) for t in all_d])
delta_days = (post_dates[(- 1)] - post_dates[0]).days
if (delta_days > tau):
bst[i] = 0.0
else:
bst[i] = (1.0 - (delta_days / tau))
return bst
def ERD(self, data):
erd = {}
for (i, d) in data.items():
ratings = [t[1] for t in d]
(h, _) = np.histogram(ratings, bins=np.arange(1, 7))
h = (h / h.sum())
h = h[np.nonzero(h)]
erd[i] = ((- h) * np.log2(h)).sum()
return erd
def iERD(self, data, new_data):
erd = {}
for (i, d) in new_data.items():
all_d = deepcopy(d)
if (i in data):
all_d += data[i]
ratings = [t[1] for t in all_d]
(h, _) = np.histogram(ratings, bins=np.arange(1, 7))
h = (h / h.sum())
h = h[np.nonzero(h)]
erd[i] = ((- h) * np.log2(h)).sum()
return erd
def ETG(self, data):
etg = {}
edges = [0, 0.5, 1, 4, 7, 13, 33]
for (i, d) in data.items():
if (len(d) <= 1):
etg[i] = 0
continue
posting_dates = sorted([datetime.strptime(t[3], self.date_time_format_str) for t in d])
delta_days = [(posting_dates[(i + 1)] - posting_dates[i]).days for i in range((len(posting_dates) - 1))]
delta_days = [d for d in delta_days if (d < 33)]
h = []
for delta in delta_days:
j = 0
while ((j < len(edges)) and (delta > edges[j])):
j += 1
h.append(j)
(_, h) = np.unique(h, return_counts=True)
if (h.sum() == 0):
etg[i] = 0
continue
h = (h / h.sum())
h = h[np.nonzero(h)]
etg[i] = np.sum(((- h) * np.log2(h)))
return etg
def iETG(self, data, new_data):
etg = {}
edges = [0, 0.5, 1, 4, 7, 13, 33]
for (i, d) in new_data.items():
all_d = deepcopy(d)
if (i in data):
all_d += data[i]
if (len(all_d) <= 1):
etg[i] = 0
continue
posting_dates = sorted([datetime.strptime(t[3], self.date_time_format_str) for t in all_d])
delta_days = [(posting_dates[(i + 1)] - posting_dates[i]).days for i in range((len(posting_dates) - 1))]
delta_days = [d for d in delta_days if (d < 33)]
h = []
for delta in delta_days:
j = 0
while ((j < len(edges)) and (delta > edges[j])):
j += 1
h.append(j)
(_, h) = np.unique(h, return_counts=True)
if (h.sum() == 0):
etg[i] = 0
continue
h = (h / h.sum())
h = h[np.nonzero(h)]
etg[i] = np.sum(((- h) * np.log2(h)))
return etg
def RD(self, product_data):
rd = {}
for (i, d) in product_data.items():
avg = np.mean(np.array([t[1] for t in d]))
for t in d:
rd[(t[0], i)] = abs((t[1] - avg))
return rd
def iRD(self, product_data, new_product_data):
rd = {}
for (i, d) in new_product_data.items():
all_d = deepcopy(d)
if (i in product_data):
all_d = (d + product_data[i])
avg = np.mean(np.array([t[1] for t in all_d]))
for t in all_d:
rd[(t[0], i)] = abs((t[1] - avg))
return rd
def EXT(self, product_data):
ext = {}
for (i, d) in product_data.items():
for t in d:
if ((int(t[1]) == 5) or (int(t[1]) == 1)):
ext[(t[0], i)] = 1
else:
ext[(t[0], i)] = 0
return ext
def iEXT(self, product_data, new_product_data):
ext = {}
for (i, d) in new_product_data.items():
all_d = deepcopy(d)
if (i in product_data):
all_d = (d + product_data[i])
for t in all_d:
if ((int(t[1]) == 5) or (int(t[1]) == 1)):
ext[(t[0], i)] = 1
else:
ext[(t[0], i)] = 0
return ext
def DEV(self, product_data):
beta_1 = 0.63
dev = {}
for (i, d) in product_data.items():
p_avg_rating = np.mean(np.array([t[1] for t in d]))
for t in d:
u_id = t[0]
if ((abs((p_avg_rating - t[1])) / 4.0) > 0.63):
dev[(u_id, i)] = 1
else:
dev[(u_id, i)] = 0
return dev
def iDEV(self, product_data, new_product_data):
beta_1 = 0.63
dev = {}
for (i, d) in new_product_data.items():
all_d = deepcopy(d)
if (i in product_data):
all_d = (d + product_data[i])
p_avg_rating = np.mean(np.array([t[1] for t in all_d]))
for t in all_d:
u_id = t[0]
if ((abs((p_avg_rating - t[1])) / 4.0) > 0.63):
dev[(u_id, i)] = 1
else:
dev[(u_id, i)] = 0
return dev
def ETF(self, product_data):
beta_3 = 0.69
first_time_product = {}
for (i, d) in product_data.items():
for t in d:
if (i not in first_time_product):
first_time_product[i] = datetime.strptime(t[3], self.date_time_format_str)
elif (datetime.strptime(t[3], self.date_time_format_str) < first_time_product[i]):
first_time_product[i] = datetime.strptime(t[3], self.date_time_format_str)
etf = {}
for (i, d) in product_data.items():
for t in d:
td = (datetime.strptime(t[3], self.date_time_format_str) - first_time_product[i])
if ((t[0], i) not in etf):
etf[(t[0], i)] = td
elif (td > etf[(t[0], i)]):
etf[(t[0], i)] = td
for (k, v) in etf.items():
if (v.days > (7 * 30)):
etf[k] = 0
elif ((1.0 - (v.days / (7 * 30))) > beta_3):
etf[k] = 1
else:
etf[k] = 0
return etf
def iETF(self, product_data, new_product_data):
beta_3 = 0.69
first_time_product = {}
for (i, d) in new_product_data.items():
all_d = deepcopy(d)
if (i in product_data):
all_d = (d + product_data[i])
for t in all_d:
if (i not in first_time_product):
first_time_product[i] = datetime.strptime(t[3], self.date_time_format_str)
elif (datetime.strptime(t[3], self.date_time_format_str) < first_time_product[i]):
first_time_product[i] = datetime.strptime(t[3], self.date_time_format_str)
etf = {}
for (i, d) in new_product_data.items():
all_d = deepcopy(d)
if (i in product_data):
all_d = (d + product_data[i])
for t in all_d:
td = (datetime.strptime(t[3], self.date_time_format_str) - first_time_product[i])
if ((t[0], i) not in etf):
etf[(t[0], i)] = td
elif (td > etf[(t[0], i)]):
etf[(t[0], i)] = td
for (k, v) in etf.items():
if (v.days > (7 * 30)):
etf[k] = 0
elif ((1.0 - (v.days / (7 * 30))) > beta_3):
etf[k] = 1
else:
etf[k] = 0
return etf
def ISR(self, user_data):
isr = {}
for (i, d) in user_data.items():
for t in d:
if (len(d) == 1):
isr[(i, t[0])] = 1
else:
isr[(i, t[0])] = 0
return isr
def iISR(self, user_data, new_user_data):
isr = {}
for (i, d) in new_user_data.items():
all_d = deepcopy(d)
if (i in user_data):
all_d = (d + user_data[i])
for t in all_d:
if (len(all_d) == 1):
isr[(i, t[0])] = 1
else:
isr[(i, t[0])] = 0
return isr
def add_feature(self, existing_features, new_features, feature_names):
for (k, v) in new_features.items():
if (k not in existing_features):
existing_features[k] = dict()
for i in range(len(feature_names)):
if (len(feature_names) > 1):
existing_features[k][feature_names[i]] = v[i]
else:
existing_features[k][feature_names[i]] = v
def construct_all_features(self, user_data, prod_data):
UserFeatures = {}
ProdFeatures = {}
uf = self.MNR(user_data, data_type='user')
self.add_feature(UserFeatures, uf, ['MNR'])
pf = self.MNR(prod_data, data_type='prod')
self.add_feature(ProdFeatures, pf, ['MNR'])
uf = self.PR_NR(user_data)
self.add_feature(UserFeatures, uf, ['PR', 'NR'])
pf = self.PR_NR(prod_data)
self.add_feature(ProdFeatures, pf, ['PR', 'NR'])
uf = self.avgRD_user(user_data, prod_data)
self.add_feature(UserFeatures, uf, ['avgRD'])
pf = self.avgRD_prod(prod_data)
self.add_feature(ProdFeatures, pf, ['avgRD'])
uf = self.BST(user_data)
self.add_feature(UserFeatures, uf, ['BST'])
uf = self.ERD(user_data)
self.add_feature(UserFeatures, uf, ['ERD'])
pf = self.ERD(prod_data)
self.add_feature(ProdFeatures, pf, ['ERD'])
uf = self.ETG(user_data)
self.add_feature(UserFeatures, uf, ['ETG'])
pf = self.ETG(prod_data)
self.add_feature(ProdFeatures, pf, ['ETG'])
ReviewFeatures = {}
rf = self.RD(prod_data)
self.add_feature(ReviewFeatures, rf, ['RD'])
rf = self.EXT(prod_data)
self.add_feature(ReviewFeatures, rf, ['EXT'])
rf = self.DEV(prod_data)
self.add_feature(ReviewFeatures, rf, ['DEV'])
rf = self.ETF(prod_data)
self.add_feature(ReviewFeatures, rf, ['ETF'])
rf = self.ISR(user_data)
self.add_feature(ReviewFeatures, rf, ['ISR'])
return (UserFeatures, ProdFeatures, ReviewFeatures)
def update_all_features(self, user_data, new_user_data, prod_data, new_product_data, UserFeatures, ProdFeatures, ReviewFeatures):
uf = self.iMNR(user_data, new_user_data, data_type='user')
self.add_feature(UserFeatures, uf, ['MNR'])
pf = self.iMNR(prod_data, new_product_data, data_type='prod')
self.add_feature(ProdFeatures, pf, ['MNR'])
uf = self.iPR_NR(user_data, new_user_data)
self.add_feature(UserFeatures, uf, ['PR', 'NR'])
pf = self.iPR_NR(prod_data, new_product_data)
self.add_feature(ProdFeatures, pf, ['PR', 'NR'])
uf = self.iavgRD_user(user_data, new_user_data, prod_data, new_product_data)
self.add_feature(UserFeatures, uf, ['avgRD'])
pf = self.iavgRD_prod(prod_data, new_product_data)
self.add_feature(ProdFeatures, pf, ['avgRD'])
uf = self.iBST(user_data, new_user_data)
self.add_feature(UserFeatures, uf, ['BST'])
uf = self.iERD(user_data, new_user_data)
self.add_feature(UserFeatures, uf, ['ERD'])
pf = self.iERD(prod_data, new_product_data)
self.add_feature(ProdFeatures, pf, ['ERD'])
uf = self.iETG(user_data, new_user_data)
self.add_feature(UserFeatures, uf, ['ETG'])
pf = self.iETG(prod_data, new_product_data)
self.add_feature(ProdFeatures, pf, ['ETG'])
rf = self.iRD(prod_data, new_product_data)
self.add_feature(ReviewFeatures, rf, ['RD'])
rf = self.iEXT(prod_data, new_product_data)
self.add_feature(ReviewFeatures, rf, ['EXT'])
rf = self.iDEV(prod_data, new_product_data)
self.add_feature(ReviewFeatures, rf, ['DEV'])
rf = self.iETF(prod_data, new_product_data)
self.add_feature(ReviewFeatures, rf, ['ETF'])
rf = self.iISR(user_data, new_user_data)
self.add_feature(ReviewFeatures, rf, ['ISR'])
return (UserFeatures, ProdFeatures, ReviewFeatures)
def calculateNodePriors(self, feature_names, features_py, when_suspicious):
priors = {}
for (node_id, v) in features_py.items():
priors[node_id] = 0
for (f_idx, fn) in enumerate(feature_names):
fv_py = []
for (node_id, v) in features_py.items():
if (fn not in v):
fv_py.append((node_id, (- 1)))
else:
fv_py.append((node_id, v[fn]))
fv_py = sorted(fv_py, key=(lambda x: x[1]))
i = 0
while (i < len(fv_py)):
start = i
end = (i + 1)
while ((end < len(fv_py)) and (fv_py[start][1] == fv_py[end][1])):
end += 1
i = end
for j in range(start, end):
node_id = fv_py[j][0]
if (fv_py[j][0] == (- 1)):
priors[node_id] += pow(0.5, 2)
continue
if (when_suspicious[fn] == '+1'):
priors[node_id] += pow((1.0 - (float((start + 1)) / len(fv_py))), 2)
else:
priors[node_id] += pow((float(end) / len(fv_py)), 2)
for (node_id, v) in features_py.items():
priors[node_id] = (1.0 - math.sqrt((priors[node_id] / len(feature_names))))
if (priors[node_id] > 0.999):
priors[node_id] = 0.999
elif (priors[node_id] < 0.001):
priors[node_id] = 0.001
return priors
def calNewNodePriors(self, feature_names, features_py, when_suspicious):
priors = {}
for (node_id, v) in features_py.items():
priors[node_id] = []
for (f_idx, fn) in enumerate(feature_names):
fv_py = []
for (node_id, v) in features_py.items():
if (fn not in v):
fv_py.append((node_id, (- 1)))
else:
fv_py.append((node_id, v[fn]))
fv_py = sorted(fv_py, key=(lambda x: x[1]))
i = 0
while (i < len(fv_py)):
start = i
end = (i + 1)
while ((end < len(fv_py)) and (fv_py[start][1] == fv_py[end][1])):
end += 1
i = end
for j in range(start, end):
node_id = fv_py[j][0]
if (fv_py[j][0] == (- 1)):
priors[node_id] += pow(0.5, 2)
continue
if (when_suspicious[fn] == '+1'):
priors[node_id].append((1.0 - (float((start + 1)) / len(fv_py))))
if (node_id == '201'):
print(priors[node_id])
else:
priors[node_id].append((float(end) / len(fv_py)))
if (node_id == '201'):
print(priors[node_id])
for (node_id, v) in features_py.items():
priors[node_id] = min(priors[node_id])
if (priors[node_id] > 0.999):
priors[node_id] = 0.999
elif (priors[node_id] < 0.001):
priors[node_id] = 0.001
return priors |
def bootstrap_confidence(true, pred, n=10000, confidence=0.9):
Rs = []
for _ in range(n):
indice = np.random.randint(0, len(pred), len(pred))
t = [true[i] for i in indice]
p = [pred[i] for i in indice]
(a, b, R, _, std_err) = stats.linregress(t, p)
Rs.append(R)
Rs = np.array(Rs)
return stats.t.interval(confidence, (len(Rs) - 1), loc=np.mean(Rs), scale=np.std(Rs)) |
class Conv2d(_ConvBase):
def __init__(self, in_size: int, out_size: int, *, kernel_size: Tuple[(int, int)]=(1, 1), stride: Tuple[(int, int)]=(1, 1), padding: Tuple[(int, int)]=(0, 0), activation=nn.ReLU(inplace=True), bn: bool=False, init=nn.init.kaiming_normal_, bias: bool=True, preact: bool=False, name: str=''):
super().__init__(in_size, out_size, kernel_size, stride, padding, activation, bn, init, conv=nn.Conv2d, batch_norm=BatchNorm2d, bias=bias, preact=preact, name=name) |
def main():
if (not os.path.exists(opt.outf)):
os.makedirs(opt.outf)
print('Loading dataset ...\n')
dataset_train = Dataset(train=True)
loader_train = DataLoader(dataset=dataset_train, num_workers=4, batch_size=opt.batchSize, shuffle=True, pin_memory=True)
print(('# of training samples: %d\n' % int(len(dataset_train))))
net = WINNetklvl(steps=opt.num_of_steps, layers=opt.num_of_layers, channels=opt.num_of_channels, klvl=opt.lvl, mode=opt.split, dnlayers=opt.dnlayers)
criterion = nn.MSELoss(size_average=False).cuda()
device_ids = [0]
model = nn.DataParallel(net, device_ids=device_ids).cuda()
torch.backends.cudnn.benchmark = True
pytorch_total_params = sum((p.numel() for p in model.parameters() if p.requires_grad))
print('Total Number of Parameters: ', pytorch_total_params)
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
noiseL_B = [0, 55]
start_epoch = opt.start_epoch
if (start_epoch > 0):
print('Start Epoch: ', start_epoch)
model.load_state_dict(torch.load(os.path.join(opt.outf, 'net_WINNet_epoch_{}.pth'.format(start_epoch))))
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
current_lr = (current_lr * (opt.decay_rate ** (start_epoch // opt.milestone)))
param_group['lr'] = current_lr
print('Learning rate: ', current_lr)
else:
torch.save(model.state_dict(), os.path.join(opt.outf, 'net_WINNet_epoch_{}.pth'.format(start_epoch)))
epoch = (start_epoch + 1)
torch.cuda.synchronize()
t1 = time.time()
print('Epoch: ', epoch)
while (epoch <= opt.epochs):
with tqdm(loader_train, unit='batch') as tepoch:
i = 0
for data in tepoch:
tepoch.set_description(f'Epoch {epoch}')
model.train()
model.zero_grad()
optimizer.zero_grad()
img_train = data.cuda()
if (opt.mode == 'S'):
stdN = opt.noiseL
if (opt.mode == 'B'):
stdRD = np.random.uniform(noiseL_B[0], noiseL_B[1], size=1)
stdN = stdRD[0]
stdNv = Variable((stdN * torch.ones(1, device=torch.device('cuda:0'))))
noise = torch.cuda.FloatTensor(img_train.size()).normal_(mean=0, std=(stdN / 255.0))
imgn_train = (img_train + noise)
(img_train, imgn_train) = (Variable(img_train), Variable(imgn_train))
(outn_train, oloss) = model(imgn_train, stdNv)
loss = (criterion(outn_train, img_train) / (imgn_train.size()[0] * 2))
loss = (loss + oloss)
Loss = loss.detach()
if ((i % 10) == 0):
norm_PU = net.linear()
loss = (loss + ((1 * 0.1) * norm_PU))
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), max_norm=1, norm_type=2)
optimizer.step()
tepoch.set_postfix(loss=Loss)
time.sleep(0.0001)
i += 1
torch.cuda.synchronize()
t2 = time.time()
print(f'Time:{((t2 - t1) / 60): .3e} mins')
print('Orthogonal Loss: ', oloss)
print('Norm PU', norm_PU)
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
print('Learning rate: ', current_lr)
if (torch.isnan(Loss) or torch.isinf(Loss)):
if (epoch > 0):
print('Load Model Epoch: ', (epoch - 1))
model.load_state_dict(torch.load(os.path.join(opt.outf, 'net_WINNet_epoch_{}.pth'.format((epoch - 1)))))
else:
net = WINNetklvl(steps=opt.num_of_steps, layers=opt.num_of_layers, channels=opt.num_of_channels, klvl=opt.lvl, mode=opt.modeSM, dnlayers=opt.dnlayers)
model = nn.DataParallel(net, device_ids=device_ids).cuda()
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
current_lr = (current_lr * 0.8)
param_group['lr'] = current_lr
print('Learning rate: ', current_lr)
continue
torch.cuda.synchronize()
t1 = time.time()
print(('Save Model Epoch: %d' % epoch))
print('\n')
torch.save(model.state_dict(), os.path.join(opt.outf, 'net_WINNet_epoch_{}.pth'.format(epoch)))
epoch += 1
print('Epoch: ', epoch)
if ((epoch > 0) and ((epoch % opt.milestone) == 0)):
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
current_lr = (current_lr * opt.decay_rate)
param_group['lr'] = current_lr
print('Learning rate: ', current_lr)
if (opt.mode == 'S'):
prepare_data(data_path='data', patch_size=40, stride=10, aug_times=1)
if (opt.mode == 'B'):
prepare_data(data_path='data', patch_size=50, stride=10, aug_times=2) |
class PaddedAdditiveSelfAttention(snt.AbstractModule):
def __init__(self, v_dim, attn_mlp_fn, attn_output_dim, gnn_mlp_fn, max_n_node, train_batch_size, node_embedding_dim, scaling=True, name='padded_additive_self_attention'):
super(PaddedAdditiveSelfAttention, self).__init__(name=name)
self.v_dim = v_dim
self.attn_mlp = attn_mlp_fn()
self.gnn_mlp = gnn_mlp_fn()
self.attn_output_dim = attn_output_dim
self.max_n_node = max_n_node
self.scaling = scaling
self.train_batch_size = train_batch_size
self.node_embedding_dim = node_embedding_dim
self.wv = tf.keras.layers.Dense(v_dim, use_bias=False)
def pad(self, node_embeddings, n_node):
n_node_diffs = (self.max_n_node - n_node)
graph_list = tf.split(node_embeddings, n_node, 0)
padded_graphs = []
padding = tf.constant([[0, 1], [0, 0]])
for i in range(self.train_batch_size):
padded_graphs.append(tf.pad(graph_list[i], (padding * n_node_diffs[i])))
final_output = tf.stack(padded_graphs)
return final_output
def unpad(self, node_embeddings, n_node):
graph_list = tf.unstack(node_embeddings, axis=0)
to_concat = []
for i in range(self.train_batch_size):
to_concat.append(graph_list[i][0:n_node[i]])
final_output = tf.concat(to_concat, axis=0)
return final_output
def _build(self, graph):
node_embeddings = graph.nodes
node_embeddings = tf.Print(node_embeddings, [node_embeddings], 'node_embeddings is: ', summarize=100, first_n=1)
project_v = self.wv(node_embeddings)
project_v = tf.Print(project_v, [project_v], 'project v is: ', summarize=100, first_n=1)
padded_project_v = self.pad(project_v, graph.n_node)
padded_project_v.set_shape([self.train_batch_size, self.max_n_node, self.v_dim])
padded_project_v = tf.Print(padded_project_v, [padded_project_v], 'padded project v is ', summarize=100, first_n=1)
padded_node_embeddings = self.pad(node_embeddings, graph.n_node)
padded_node_embeddings = tf.Print(padded_node_embeddings, [padded_node_embeddings], 'padded_node_embeddings is: ', summarize=100, first_n=1)
pairwise_concat = pairwise_concat_padded(padded_node_embeddings)
pairwise_concat = tf.reshape(pairwise_concat, [(self.train_batch_size * (self.max_n_node ** 2)), self.node_embedding_dim])
pairwise_concat.set_shape([None, self.node_embedding_dim])
additive_attn_output = self.attn_mlp(pairwise_concat)
dot_var = tf.get_variable('dot_var', shape=[self.attn_output_dim], dtype=tf.float32, initializer=tf.glorot_normal_initializer, trainable=True)
d = tf.tensordot(dot_var, additive_attn_output, axes=[0, 1])
logits = tf.reshape(d, [self.train_batch_size, self.max_n_node, self.max_n_node])
logits.set_shape([self.train_batch_size, self.max_n_node, self.max_n_node])
lm = loss_mask_padded(graph, self.max_n_node)
logits = (logits - (100000 * (1 - lm)))
attn_weights = tf.nn.softmax(logits, axis=(- 1))
attended_nodes = tf.matmul(attn_weights, padded_project_v)
attended_nodes = self.unpad(attended_nodes, graph.n_node)
concat_nodes = tf.concat([graph.nodes, attended_nodes], axis=(- 1))
new_nodes = self.gnn_mlp(concat_nodes)
return graph.replace(nodes=new_nodes) |
def gen_dis_sample(train_pos_path, train_neg_path, vocab_path, n_dim=100, res_file='discriminator_train_data.npz'):
vocab_map = load_vocab(vocab_path)
print('vocab length:', len(vocab_map))
train_pos = []
for file in os.listdir(train_pos_path):
with open(os.path.join(train_pos_path, file), 'r') as rf:
content = rf.read().strip('.').split()
text = list(map((lambda x: (vocab_map[x] if (x in vocab_map) else 0)), content))
if (len(text) <= n_dim):
sample = np.pad(text, (0, (n_dim - len(text))), mode='constant', constant_values=1).astype(np.int)
else:
sample = text[:n_dim]
train_pos.append(sample)
print('positive samples:', len(train_pos))
train_neg = []
for file in os.listdir(train_neg_path):
with open(os.path.join(train_neg_path, file), 'r') as rf:
content = rf.read().strip('.').split()
text = list(map((lambda x: (vocab_map[x] if (x in vocab_map) else 0)), content))
if (len(text) <= n_dim):
sample = np.pad(text, (0, (n_dim - len(text))), mode='constant', constant_values=1).astype(np.int)
else:
sample = text[:n_dim]
train_neg.append(sample)
print('negative samples:', len(train_neg))
np.savez(res_file, pos_summary_idx=np.array(train_pos), neg_summary_idx=np.array(train_neg))
print('file saved: ', res_file) |
_torch
_vision
class MgpstrProcessorTest(unittest.TestCase):
image_processing_class = (ViTImageProcessor if is_vision_available() else None)
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def setUp(self):
self.image_size = (3, 32, 128)
self.tmpdirname = tempfile.mkdtemp()
vocab = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
vocab_tokens = dict(zip(vocab, range(len(vocab))))
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write((json.dumps(vocab_tokens) + '\n'))
image_processor_map = {'do_normalize': False, 'do_resize': True, 'feature_extractor_type': 'ViTFeatureExtractor', 'resample': 3, 'size': {'height': 32, 'width': 128}}
self.image_processor_file = os.path.join(self.tmpdirname, IMAGE_PROCESSOR_NAME)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(image_processor_map, fp)
def get_tokenizer(self, **kwargs):
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_image_processor(self, **kwargs):
return ViTImageProcessor.from_pretrained(self.tmpdirname, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def prepare_image_inputs(self):
image_input = np.random.randint(255, size=(3, 30, 400), dtype=np.uint8)
image_input = Image.fromarray(np.moveaxis(image_input, 0, (- 1)))
return image_input
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
image_processor = self.get_image_processor()
processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor)
processor.save_pretrained(self.tmpdirname)
processor = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=False)
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer, MgpstrTokenizer)
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor.image_processor, ViTImageProcessor)
def test_save_load_pretrained_additional_features(self):
tokenizer = self.get_tokenizer()
image_processor = self.get_image_processor()
processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor)
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
image_processor_add_kwargs = self.get_image_processor(do_normalize=False, padding_value=1.0)
processor = MgpstrProcessor.from_pretrained(self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=False, padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer, MgpstrTokenizer)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, ViTImageProcessor)
def test_image_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor)
image_input = self.prepare_image_inputs()
input_image_proc = image_processor(image_input, return_tensors='np')
input_processor = processor(images=image_input, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=0.01)
def test_tokenizer(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'test'
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_processor(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = 'test'
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), ['pixel_values', 'labels'])
with pytest.raises(ValueError):
processor()
def test_tokenizer_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.char_decode(predicted_ids)
decoded_tok = tokenizer.batch_decode(predicted_ids)
decode_strs = [seq.replace(' ', '') for seq in decoded_tok]
self.assertListEqual(decode_strs, decoded_processor)
def test_model_input_names(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor)
input_str = None
image_input = self.prepare_image_inputs()
inputs = processor(text=input_str, images=image_input)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
def test_processor_batch_decode(self):
image_processor = self.get_image_processor()
tokenizer = self.get_tokenizer()
processor = MgpstrProcessor(tokenizer=tokenizer, image_processor=image_processor)
char_input = torch.randn(1, 27, 38)
bpe_input = torch.randn(1, 27, 50257)
wp_input = torch.randn(1, 27, 30522)
results = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()), ['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds']) |
class ConvLSTM(nn.Module):
def __init__(self, input_channels, hidden_channels, kernel_size, step=1, effective_step=[1]):
super(ConvLSTM, self).__init__()
self.input_channels = ([input_channels] + hidden_channels)
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.num_layers = len(hidden_channels)
self.step = step
self.effective_step = effective_step
self._all_layers = []
for i in range(self.num_layers):
name = 'cell{}'.format(i)
cell = ConvLSTMCell(self.input_channels[i], self.hidden_channels[i], self.kernel_size)
setattr(self, name, cell)
self._all_layers.append(cell)
def forward(self, input):
internal_state = []
outputs = []
for step in range(self.step):
x = input
for i in range(self.num_layers):
name = 'cell{}'.format(i)
if (step == 0):
(bsize, _, height, width) = x.size()
(h, c) = getattr(self, name).init_hidden(batch_size=bsize, hidden=self.hidden_channels[i], shape=(height, width))
internal_state.append((h, c))
(h, c) = internal_state[i]
(x, new_c) = getattr(self, name)(x, h, c)
internal_state[i] = (x, new_c)
if (step in self.effective_step):
outputs.append(x)
return (outputs, (x, new_c)) |
def initialize_dataset_loader(data, stage, params, loader_default_params=None):
transform = initialize_transforms(params.pop('transforms'), mean_std=params.pop('mean_std'))
dataset = initialize_dataset(data, stage, transform, params.pop('dataset'))
loader_params = {**LOADER_DEFAULT_PARAMS, **(loader_default_params or {}), **dataset.loader_params, **params.pop('loader', {})}
assert ('batch_size' in loader_params)
assert (not params), params.keys()
return DataLoader(dataset, **loader_params) |
def make_fast_softmax_attention(qkv_dim, renormalize_attention=True, numerical_stabilizer=1e-06, nb_features=256, ortho_features=True, ortho_scaling=0.0, redraw_features=True, unidirectional=False, nonnegative_features=True, lax_scan_unroll=1):
logging.info('Fast softmax attention: %s features and orthogonal=%s, renormalize=%s', nb_features, ortho_features, renormalize_attention)
if ortho_features:
matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=ortho_scaling)
else:
matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim)
if nonnegative_features:
def kernel_feature_creator(data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True):
return nonnegative_softmax_kernel_feature_creator(data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data, numerical_stabilizer)
else:
def kernel_feature_creator(data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True):
del is_query
return sincos_softmax_kernel_feature_creator(data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data)
attention_fn = FastAttentionviaLowRankDecomposition(matrix_creator, kernel_feature_creator, renormalize_attention=renormalize_attention, numerical_stabilizer=numerical_stabilizer, redraw_features=redraw_features, unidirectional=unidirectional, lax_scan_unroll=lax_scan_unroll).dot_product_attention
return attention_fn |
class TrainingModule(LightningModule):
def __init__(self, cfg):
super().__init__()
if (not logger.isEnabledFor(logging.INFO)):
setup_logger()
self.cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
self.storage: EventStorage = None
self.model = build_model(self.cfg)
self.start_iter = 0
self.max_iter = cfg.SOLVER.MAX_ITER
def on_save_checkpoint(self, checkpoint: Dict[(str, Any)]) -> None:
checkpoint['iteration'] = self.storage.iter
def on_load_checkpoint(self, checkpointed_state: Dict[(str, Any)]) -> None:
self.start_iter = checkpointed_state['iteration']
self.storage.iter = self.start_iter
def setup(self, stage: str):
if self.cfg.MODEL.WEIGHTS:
self.checkpointer = DetectionCheckpointer(self.model, self.cfg.OUTPUT_DIR)
logger.info(f'Load model weights from checkpoint: {self.cfg.MODEL.WEIGHTS}.')
self.checkpointer.load(self.cfg.MODEL.WEIGHTS)
self.iteration_timer = hooks.IterationTimer()
self.iteration_timer.before_train()
self.data_start = time.perf_counter()
self.writers = None
def training_step(self, batch, batch_idx):
data_time = (time.perf_counter() - self.data_start)
if (self.storage is None):
self.storage = EventStorage(0)
self.storage.__enter__()
self.iteration_timer.trainer = weakref.proxy(self)
self.iteration_timer.before_step()
self.writers = (default_writers(self.cfg.OUTPUT_DIR, self.max_iter) if comm.is_main_process() else {})
loss_dict = self.model(batch)
SimpleTrainer.write_metrics(loss_dict, data_time)
opt = self.optimizers()
self.storage.put_scalar('lr', opt.param_groups[self._best_param_group_id]['lr'], smoothing_hint=False)
self.iteration_timer.after_step()
self.storage.step()
self.iteration_timer.before_step()
if ((self.storage.iter % 20) == 0):
for writer in self.writers:
writer.write()
return sum(loss_dict.values())
def training_step_end(self, training_step_outpus):
self.data_start = time.perf_counter()
return training_step_outpus
def training_epoch_end(self, training_step_outputs):
self.iteration_timer.after_train()
if comm.is_main_process():
self.checkpointer.save('model_final')
for writer in self.writers:
writer.write()
writer.close()
self.storage.__exit__(None, None, None)
def _process_dataset_evaluation_results(self) -> OrderedDict:
results = OrderedDict()
for (idx, dataset_name) in enumerate(self.cfg.DATASETS.TEST):
results[dataset_name] = self._evaluators[idx].evaluate()
if comm.is_main_process():
print_csv_format(results[dataset_name])
if (len(results) == 1):
results = list(results.values())[0]
return results
def _reset_dataset_evaluators(self):
self._evaluators = []
for dataset_name in self.cfg.DATASETS.TEST:
evaluator = build_evaluator(self.cfg, dataset_name)
evaluator.reset()
self._evaluators.append(evaluator)
def on_validation_epoch_start(self, _outputs):
self._reset_dataset_evaluators()
def validation_epoch_end(self, _outputs):
results = self._process_dataset_evaluation_results(_outputs)
flattened_results = flatten_results_dict(results)
for (k, v) in flattened_results.items():
try:
v = float(v)
except Exception as e:
raise ValueError("[EvalHook] eval_function should return a nested dict of float. Got '{}: {}' instead.".format(k, v)) from e
self.storage.put_scalars(**flattened_results, smoothing_hint=False)
def validation_step(self, batch, batch_idx: int, dataloader_idx: int=0) -> None:
if (not isinstance(batch, List)):
batch = [batch]
outputs = self.model(batch)
self._evaluators[dataloader_idx].process(batch, outputs)
def configure_optimizers(self):
optimizer = build_optimizer(self.cfg, self.model)
self._best_param_group_id = hooks.LRScheduler.get_best_param_group_id(optimizer)
scheduler = build_lr_scheduler(self.cfg, optimizer)
return ([optimizer], [{'scheduler': scheduler, 'interval': 'step'}]) |
def plot_numerical_error():
print(f'generating numerical benchmark plots. reading data from {TEST_RESULTS_PICKLE}...')
try:
with open(TEST_RESULTS_PICKLE, 'rb') as f:
results = pickle.load(f)
except FileNotFoundError:
print('no data of numerical errors found. run the numerical tests first')
return
plot_num_err_heatmap(results)
plot_num_error_growth_comparison(results)
plot_num_coeffs2num_ops(results)
print('plotting numerical benchmarks done.') |
def sum_concat_then_mlp_gnn(make_mlp_fn):
node_block = ConcatThenMLPBlock(tf.unsorted_segment_sum, make_mlp_fn)
return NodeBlockGNN(node_block) |
_loss('bce_kl_combined')
class CombinedLoss(nn.Module):
def __init__(self, weight_softmax):
super().__init__()
self.weight_softmax = weight_softmax
def forward(self, sample_list, model_output):
pred_score = model_output['scores']
target_score = sample_list['targets']
tar_sum = torch.sum(target_score, dim=1, keepdim=True)
tar_sum_is_0 = torch.eq(tar_sum, 0)
tar_sum.masked_fill_(tar_sum_is_0, 1e-06)
tar = (target_score / tar_sum)
res = F.log_softmax(pred_score, dim=1)
loss1 = kl_div(res, tar)
loss1 = (torch.sum(loss1) / loss1.size(0))
loss2 = F.binary_cross_entropy_with_logits(pred_score, target_score, reduction='mean')
loss2 *= target_score.size(1)
loss = ((self.weight_softmax * loss1) + loss2)
return loss |
class DeadReckoningNodeMultiRobot(DeadReckoningNode):
def __init__(self) -> None:
super().__init__()
def init_node(self, ns='~') -> None:
self.imu_pose = rospy.get_param((ns + 'imu_pose'))
self.imu_pose = n2g(self.imu_pose, 'Pose3')
self.imu_rot = self.imu_pose.rotation()
self.dvl_max_velocity = rospy.get_param((ns + 'dvl_max_velocity'))
self.keyframe_duration = rospy.get_param((ns + 'keyframe_duration'))
self.keyframe_translation = rospy.get_param((ns + 'keyframe_translation'))
self.keyframe_rotation = rospy.get_param((ns + 'keyframe_rotation'))
self.dvl_sub = Subscriber(DVL_TOPIC, DVL)
self.gyro_sub = Subscriber(GYRO_INTEGRATION_TOPIC, Odometry)
self.depth_sub = Subscriber(DEPTH_TOPIC, Depth)
self.depth_cache = Cache(self.depth_sub, 1)
if (rospy.get_param((ns + 'imu_version')) == 1):
self.imu_sub = Subscriber(IMU_TOPIC, Imu)
elif (rospy.get_param((ns + 'imu_version')) == 2):
self.imu_sub = Subscriber(IMU_TOPIC_MK_II, Imu)
self.traj_pub = rospy.Publisher('traj_dead_reck', PointCloud2, queue_size=10)
self.odom_pub = rospy.Publisher(LOCALIZATION_ODOM_TOPIC, Odometry, queue_size=10)
self.use_gyro = rospy.get_param((ns + 'use_gyro'))
if self.use_gyro:
self.ts = ApproximateTimeSynchronizer([self.imu_sub, self.dvl_sub, self.gyro_sub], 300, 0.1)
self.ts.registerCallback(self.callback_with_gyro)
else:
self.ts = ApproximateTimeSynchronizer([self.imu_sub, self.dvl_sub], 200, 0.1)
self.ts.registerCallback(self.callback)
self.tf = tf.TransformBroadcaster()
print(self.pose)
rospy.loginfo('Localization node is initialized')
def publish_pose(self, flag) -> None:
if (self.pose is None):
return
header = rospy.Header()
header.stamp = self.prev_time
header.frame_id = (self.rov_id + '_odom')
odom_msg = Odometry()
odom_msg.header = header
odom_msg.pose.pose = g2r(self.pose)
odom_msg.child_frame_id = (self.rov_id + '_base_link')
odom_msg.twist.twist.linear.x = 0
odom_msg.twist.twist.linear.y = 0
odom_msg.twist.twist.linear.z = 0
odom_msg.twist.twist.angular.x = 0
odom_msg.twist.twist.angular.y = 0
odom_msg.twist.twist.angular.z = 0
self.odom_pub.publish(odom_msg)
p = odom_msg.pose.pose.position
q = odom_msg.pose.pose.orientation
self.tf.sendTransform((p.x, p.y, p.z), (q.x, q.y, q.z, q.w), header.stamp, (self.rov_id + '_base_link'), (self.rov_id + '_odom')) |
class PerceputalLoss(nn.modules.loss._Loss):
def __init__(self, input_range='sigmoid', net_type='vgg_torch', input_preprocessing='corresponding', match=[{'layers': [11, 20, 29], 'what': 'features'}]):
if (input_range not in ['sigmoid', 'tanh']):
assert False
self.net = get_pretrained_net(net_type).cuda()
self.matchers = [get_matcher(self.net, match_opts) for match_opts in match]
preprocessing_correspondence = {'vgg19_torch': vgg_preprocess_caffe, 'vgg16_torch': vgg_preprocess_caffe, 'vgg19_pytorch': vgg_preprocess_pytorch, 'vgg19_pytorch_modified': vgg_preprocess_pytorch}
if (input_preprocessing == 'corresponding'):
self.preprocess_input = preprocessing_correspondence[net_type]
else:
self.preprocessing = preprocessing_correspondence[input_preprocessing]
def preprocess_input(self, x):
if (self.input_range == 'tanh'):
x = ((x + 1.0) / 2.0)
return self.preprocess(x)
def __call__(self, x, y):
self.matcher_content.mode = 'store'
self.net(self.preprocess_input(y))
self.matcher_content.mode = 'match'
self.net(self.preprocess_input(x))
return sum([sum(matcher.losses.values()) for matcher in self.matchers]) |
def isect_segments__naive(segments):
isect = []
if (Real is float):
segments = [((s[0], s[1]) if (s[0][X] <= s[1][X]) else (s[1], s[0])) for s in segments]
else:
segments = [(((Real(s[0][0]), Real(s[0][1])), (Real(s[1][0]), Real(s[1][1]))) if (s[0] <= s[1]) else ((Real(s[1][0]), Real(s[1][1])), (Real(s[0][0]), Real(s[0][1])))) for s in segments]
n = len(segments)
for i in range(n):
(a0, a1) = segments[i]
for j in range((i + 1), n):
(b0, b1) = segments[j]
if ((a0 not in (b0, b1)) and (a1 not in (b0, b1))):
ix = isect_seg_seg_v2_point(a0, a1, b0, b1)
if (ix is not None):
isect.append(ix)
return isect |
def ts2xy(ts, xaxis, yaxis):
if (xaxis == X_TIMESTEPS):
x = np.cumsum(ts.l.values)
elif (xaxis == X_EPISODES):
x = np.arange(len(ts))
elif (xaxis == X_WALLTIME):
x = (ts.t.values / 3600.0)
else:
raise NotImplementedError
if (yaxis == Y_REWARD):
y = ts.r.values
elif (yaxis == Y_TIMESTEPS):
y = ts.l.values
else:
raise NotImplementedError
return (x, y) |
def test_statcast_pitchers_expected_stats() -> None:
min_pa = 100
result: pd.DataFrame = statcast_pitcher_expected_stats(2019, min_pa)
assert (result is not None)
assert (not result.empty)
assert (len(result.columns) == 18)
assert (len(result) > 0)
assert (len(result[(result['pa'] < min_pa)]) == 0) |
def setup_orderdict():
from collections import OrderedDict
yaml.add_representer(OrderedDict, represent_dictionary_order) |
_model_architecture('dual_input_wav_transformer', 'dualinputs2twavtransformer_base')
def dualinputs2twavtransformer_base(args):
args.dropout_input = getattr(args, 'dropout_input', 0)
args.dropout_features = getattr(args, 'dropout_features', 0)
args.speech_mask_length = getattr(args, 'speech_mask_length', 10)
args.speech_mask_prob = getattr(args, 'speech_mask_prob', 0.65)
args.speech_mask_selection = getattr(args, 'speech_mask_selection', 'static')
args.speech_mask_other = getattr(args, 'speech_mask_other', 0)
args.speech_mask_min_space = getattr(args, 'speech_mask_min_space', 1)
args.speech_no_mask_overlap = getattr(args, 'speech_no_mask_overlap', False)
args.speech_conv_bias = getattr(args, 'speech_conv_bias', False)
args.speech_extractor_mode = getattr(args, 'speech_extractor_mode', 'default')
args.no_strict_check_pretrain_model = getattr(args, 'no_strict_check_pretrain_model', False)
args.speech_mask_channel_length = getattr(args, 'speech_mask_channel_length', 10)
args.speech_mask_channel_prob = getattr(args, 'speech_mask_channel_prob', 0.0)
args.speech_mask_channel_selection = getattr(args, 'speech_mask_channel_selection', 'static')
args.speech_mask_channel_other = getattr(args, 'speech_mask_channel_other', 0)
args.speech_mask_channel_min_space = getattr(args, 'speech_mask_channel_min_space', 1)
args.speech_no_mask_channel_overlap = getattr(args, 'speech_no_mask_channel_overlap', False)
args.no_scale_feature = getattr(args, '', False)
args.feature_grad_mult = getattr(args, 'feature_grad_mult', 0.0)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', (args.encoder_embed_dim * 4))
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 12)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_layerdrop = getattr(args, 'encoder_layerdrop', 0.1)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', args.encoder_attention_heads)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False)
args.dropout = getattr(args, 'dropout', 0.1)
args.attention_dropout = getattr(args, 'attention_dropout', 0)
args.activation_dropout = getattr(args, 'activation_dropout', args.dropout)
args.activation_fn = getattr(args, 'activation_fn', 'relu')
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False)
args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False)
args.adaptive_input = getattr(args, 'adaptive_input', False)
args.decoder_layerdrop = getattr(args, 'decoder_layerdrop', 0.0)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', False)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', False)
args.quant_noise_pq = getattr(args, 'quant_noise_pq', 0)
args.speech_encoder_layers = getattr(args, 'speech_encoder_layers', 12)
args.text_encoder_layers = getattr(args, 'text_encoder_layers', 6)
args.encoder_shared_text_layers_from_begin = getattr(args, 'encoder_shared_text_layers_from_begin', 6)
args.decoder_layers = getattr(args, 'decoder_layers', 6) |
def plot_roc_curve(human_scores, gpt_scores):
A = human_scores
B = gpt_scores
scores = (A + B)
labels = (([0] * len(A)) + ([1] * len(B)))
(fpr, tpr, thresholds) = roc_curve(labels, scores)
roc_auc = auc(fpr, tpr)
plt.figure()
plt.plot(fpr, tpr, color='darkorange', lw=2, label=('ROC curve (area = %0.4f)' % roc_auc))
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve: Open-gen w/ GPT3.5-Reddit w prompts')
plt.legend(loc='lower right')
plt.show()
for (idx, fpr_) in enumerate(fpr):
if (fpr_ > 0.01):
print(f'TPR at 1% FPR: {tpr[idx]:.4f}')
break |
def make_mmcif_features(mmcif_object: mmcif_parsing.MmcifObject, chain_id: str) -> FeatureDict:
input_sequence = mmcif_object.chain_to_seqres[chain_id]
description = '_'.join([mmcif_object.file_id, chain_id])
num_res = len(input_sequence)
mmcif_feats = {}
mmcif_feats.update(make_sequence_features(sequence=input_sequence, description=description, num_res=num_res))
(all_atom_positions, all_atom_mask) = mmcif_parsing.get_atom_coords(mmcif_object=mmcif_object, chain_id=chain_id)
mmcif_feats['all_atom_positions'] = all_atom_positions
mmcif_feats['all_atom_mask'] = all_atom_mask
mmcif_feats['resolution'] = np.array([mmcif_object.header['resolution']], dtype=np.float32)
mmcif_feats['release_date'] = np.array([mmcif_object.header['release_date'].encode('utf-8')], dtype=np.object_)
mmcif_feats['is_distillation'] = np.array(0.0, dtype=np.float32)
return mmcif_feats |
def incorrect_edges_per_graph(true_adj, pred_adj, n_node, abs_tol=0.5):
diff = remove_diag(tf.math.abs((true_adj - pred_adj)))
num_incorrect = tf.where(tf.greater(diff, abs_tol), tf.ones_like(diff), tf.zeros_like(diff))
num_incorrect = tf.reduce_sum(num_incorrect, axis=0)
indices = repeat_1d(tf.range(tf.shape(n_node)[0]), n_node)
num_incorrect = tf.segment_sum(num_incorrect, indices)
return tf.cast((num_incorrect / 2), dtype=tf.int32) |
def get_caseIDs_from_splitted_dataset_folder(folder):
files = subfiles(folder, suffix='.nii.gz', join=False)
files = [i[:(- 12)] for i in files]
files = np.unique(files)
return files |
def velocity_of_P_given_A(vel: T2value, omega: float, vec_ap: T2value) -> T2value:
return (vel + (omega * (_rot90 vec_ap))) |
class Wide_ResNet(nn.Module):
def __init__(self, depth, widen_factor, dropout_rate, num_classes):
super(Wide_ResNet, self).__init__()
self.in_planes = 16
assert (((depth - 4) % 6) == 0), 'Wide-resnet depth should be 6n+4'
n = ((depth - 4) / 6)
k = widen_factor
nStages = [16, (16 * k), (32 * k), (64 * k)]
self.conv1 = conv3x3(3, nStages[0])
self.layer1 = self._wide_layer(wide_basic, nStages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(wide_basic, nStages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(wide_basic, nStages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nStages[3], momentum=0.9)
self.linear = nn.Linear(nStages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = ([stride] + ([1] * (int(num_blocks) - 1)))
names = (['ds_block'] + (['n_block'] * (int(num_blocks) - 1)))
layers = OrderedDict()
for (i, stride) in enumerate(strides):
layers[names[i]] = block(self.in_planes, planes, dropout_rate, stride)
self.in_planes = planes
return nn.Sequential(layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def _make_scratch_ccm(scratch, in_channels, cout, expand=False):
out_channels = ([cout, (cout * 2), (cout * 4), (cout * 8)] if expand else ([cout] * 4))
scratch.layer0_ccm = nn.Conv2d(in_channels[0], out_channels[0], kernel_size=1, stride=1, padding=0, bias=True)
scratch.layer1_ccm = nn.Conv2d(in_channels[1], out_channels[1], kernel_size=1, stride=1, padding=0, bias=True)
scratch.layer2_ccm = nn.Conv2d(in_channels[2], out_channels[2], kernel_size=1, stride=1, padding=0, bias=True)
scratch.layer3_ccm = nn.Conv2d(in_channels[3], out_channels[3], kernel_size=1, stride=1, padding=0, bias=True)
scratch.CHANNELS = out_channels
return scratch |
def test(testloader, model, criterion, epoch, use_cuda, optimizer=None):
global best_acc
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
live_top1 = AverageMeter()
live_top5 = AverageMeter()
dead_top1 = AverageMeter()
dead_top5 = AverageMeter()
if (optimizer is not None):
optimizer.eval()
model.eval()
global_avg_pool = nr.GlobalAvgPool2d(1)
torch.set_grad_enabled(False)
end = time.time()
bar = Bar('Processing', max=len(testloader))
for (batch_idx, (inputs, targets)) in enumerate(testloader):
data_time.update((time.time() - end))
if use_cuda:
(inputs, targets) = (inputs.cuda(), targets.cuda())
(inputs, targets) = (torch.autograd.Variable(inputs), torch.autograd.Variable(targets))
conv_outputs = model(inputs)
outputs = global_avg_pool(conv_outputs)
loss = criterion(outputs, targets)
losses.update(loss.data.item(), inputs.size(0))
if (len(outputs.size()) <= 2):
(prec1, prec5) = accuracy(outputs.data, targets.data, topk=(1, 5))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
elif (outputs.data.size(2) > 1):
(prec1, prec5) = accuracy(outputs.data.narrow(2, 0, 1).squeeze(), targets.data, topk=(1, 5))
live_top1.update(prec1.item(), inputs.size(0))
live_top5.update(prec5.item(), inputs.size(0))
(prec1, prec5) = accuracy(outputs.data.narrow(2, 1, 1).squeeze(), targets.data, topk=(1, 5))
dead_top1.update(prec1.item(), inputs.size(0))
dead_top5.update(prec5.item(), inputs.size(0))
outputs = (outputs.data.narrow(2, 0, 1) + outputs.data.narrow(2, 1, 1))
(prec1, prec5) = accuracy(outputs.squeeze(), targets.data, topk=(1, 5))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
else:
(prec1, prec5) = accuracy(outputs.data.narrow(2, 0, 1).squeeze(), targets.data, topk=(1, 5))
top1.update(prec1.item(), inputs.size(0))
top5.update(prec5.item(), inputs.size(0))
batch_time.update((time.time() - end))
end = time.time()
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f} | live_top1: {ltop1: .4f} | dead_top1: {dtop1: .4f}'.format(batch=(batch_idx + 1), size=len(testloader), data=data_time.avg, bt=batch_time.avg, total=bar.elapsed_td, eta=bar.eta_td, loss=losses.avg, top1=top1.avg, top5=top5.avg, ltop1=live_top1.avg, dtop1=dead_top1.avg)
bar.next()
bar.finish()
return (losses.avg, top1.avg) |
def difficulty_judgement_ratings(df):
ev1_difficulty = df['Question Difficulty_EV_1'].dropna().apply(map_difficulty)
ev2_difficulty = df['Question Difficulty_EV_2'].dropna().apply(map_difficulty)
ev_difficulty = (ev1_difficulty + ev2_difficulty)
print(f'EV1 Difficulty: {ev1_difficulty.mean()} / 4')
print(f'EV2 Difficulty: {ev2_difficulty.mean()} / 4')
print(f'Average Difficulty: {(ev_difficulty.mean() / 2)} / 4') |
class PD_Stats(object):
def __init__(self, path, columns):
self.path = path
if os.path.isfile(self.path):
self.stats = pd.read_pickle(self.path)
assert (list(self.stats.columns) == list(columns))
else:
self.stats = pd.DataFrame(columns=columns)
def update(self, row, save=True):
self.stats.loc[len(self.stats.index)] = row
if save:
self.stats.to_pickle(self.path) |
class EncDecBaseConfig(FairseqDataclass):
embed_path: Optional[str] = field(default=None, metadata={'help': 'path to pre-trained embedding'})
embed_dim: Optional[int] = field(default=512, metadata={'help': 'embedding dimension'})
ffn_embed_dim: int = field(default=2048, metadata={'help': 'embedding dimension for FFN'})
layers: int = field(default=6, metadata={'help': 'number of layers'})
attention_heads: int = field(default=8, metadata={'help': 'number of attention heads'})
normalize_before: bool = field(default=False, metadata={'help': 'apply layernorm before each block'})
learned_pos: bool = field(default=False, metadata={'help': 'use learned positional embeddings'})
layerdrop: float = field(default=0, metadata={'help': 'LayerDrop probability'})
layers_to_keep: Optional[List[int]] = field(default=None, metadata={'help': 'which layers to *keep* when pruning'})
xformers_att_config: Optional[str] = field(default=None, metadata={'help': 'config for xFormers attention, defined in xformers.components.attention.AttentionConfig'}) |
.parametrize('device', list_devices())
def test_tensormap_modify(device):
tm = o3d.t.geometry.TensorMap('positions')
tm.a = o3c.Tensor([100], device=device)
a_alias = tm.a
a_alias[:] = o3c.Tensor([200], device=device)
np.testing.assert_equal(a_alias.cpu().numpy(), [200])
np.testing.assert_equal(tm.a.cpu().numpy(), [200])
tm = o3d.t.geometry.TensorMap('positions')
tm.a = o3c.Tensor([100], device=device)
a_alias = tm.a
tm.a[:] = o3c.Tensor([200], device=device)
np.testing.assert_equal(a_alias.cpu().numpy(), [200])
np.testing.assert_equal(tm.a.cpu().numpy(), [200])
tm = o3d.t.geometry.TensorMap('positions')
tm.a = o3c.Tensor([100], device=device)
a_alias = tm.a
a_alias = o3c.Tensor([200], device=device)
np.testing.assert_equal(a_alias.cpu().numpy(), [200])
np.testing.assert_equal(tm.a.cpu().numpy(), [100])
tm = o3d.t.geometry.TensorMap('positions')
tm.a = o3c.Tensor([100], device=device)
a_alias = tm.a
tm.a = o3c.Tensor([200], device=device)
np.testing.assert_equal(a_alias.cpu().numpy(), [100])
np.testing.assert_equal(tm.a.cpu().numpy(), [200])
tm = o3d.t.geometry.TensorMap('positions')
tm.a = o3c.Tensor([100], device=device)
a_alias = tm.a
assert (id(a_alias) != id(tm.a))
tm = o3d.t.geometry.TensorMap('positions')
tm.a = o3c.Tensor([100], device=device)
a_alias = tm.a
assert (len(tm) == 1)
del tm.a
assert (len(tm) == 0)
np.testing.assert_equal(a_alias.cpu().numpy(), [100])
a_alias[:] = 200
np.testing.assert_equal(a_alias.cpu().numpy(), [200])
tm = o3d.t.geometry.TensorMap('positions')
tm.a = o3c.Tensor([100], device=device)
tm.b = o3c.Tensor([200], device=device)
a_alias = tm.a
b_alias = tm.b
(tm.a, tm.b) = (tm.b, tm.a)
np.testing.assert_equal(a_alias.cpu().numpy(), [100])
np.testing.assert_equal(b_alias.cpu().numpy(), [200])
np.testing.assert_equal(tm.a.cpu().numpy(), [200])
np.testing.assert_equal(tm.b.cpu().numpy(), [100]) |
def accuracy(dataset, model):
total = 0
for feat in dataset:
feature = feat[0]
feature = torch.tensor(feature, dtype=torch.float)
y_pred = model(feature)
y_true = feat[1]
loss = custom_loss(y_pred, y_true, model.name)
total += loss
return (total / len(dataset)) |
class QNet(BaseModule):
def __init__(self, n_units, n_classes):
super(QNet, self).__init__()
self.model = nn.Sequential(nn.Linear((2 * n_classes), n_units), nn.ReLU(True), nn.Linear(n_units, n_classes))
def forward(self, zcat):
zzt = self.model(zcat)
return zzt |
def _valid_accuracy_field(key, scope, error):
assert (bool(('relative' in scope['accuracy_criterion'])) != bool(('absolute' in scope['accuracy_criterion']))) |
.skipif((not hasattr(m, 'has_exp_optional')), reason='no <experimental/optional>')
def test_exp_optional():
assert (m.double_or_zero_exp(None) == 0)
assert (m.double_or_zero_exp(42) == 84)
pytest.raises(TypeError, m.double_or_zero_exp, 'foo')
assert (m.half_or_none_exp(0) is None)
assert (m.half_or_none_exp(42) == 21)
pytest.raises(TypeError, m.half_or_none_exp, 'foo')
assert (m.test_nullopt_exp() == 42)
assert (m.test_nullopt_exp(None) == 42)
assert (m.test_nullopt_exp(42) == 42)
assert (m.test_nullopt_exp(43) == 43)
assert (m.test_no_assign_exp() == 42)
assert (m.test_no_assign_exp(None) == 42)
assert (m.test_no_assign_exp(m.NoAssign(43)) == 43)
pytest.raises(TypeError, m.test_no_assign_exp, 43)
holder = m.OptionalExpHolder()
mvalue = holder.member
assert mvalue.initialized
assert holder.member_initialized()
props = m.OptionalExpProperties()
assert (int(props.access_by_ref) == 42)
assert (int(props.access_by_copy) == 42) |
def get_gcc_version(run_lambda):
return run_and_parse_first_match(run_lambda, 'gcc --version', 'gcc (.*)') |
class BalancedDataParallel(DataParallel):
def __init__(self, gpu0_bsz, *args, **kwargs):
self.gpu0_bsz = gpu0_bsz
super().__init__(*args, **kwargs)
def forward(self, *inputs, **kwargs):
if (not self.device_ids):
return self.module(*inputs, **kwargs)
if (self.gpu0_bsz == 0):
device_ids = self.device_ids[1:]
else:
device_ids = self.device_ids
(inputs, kwargs) = self.scatter(inputs, kwargs, device_ids)
if (len(self.device_ids) == 1):
return self.module(*inputs[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids)
if (self.gpu0_bsz == 0):
replicas = replicas[1:]
outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs)
return self.gather(outputs, self.output_device)
def parallel_apply(self, replicas, device_ids, inputs, kwargs):
return parallel_apply(replicas, inputs, kwargs, device_ids)
def scatter(self, inputs, kwargs, device_ids):
bsz = inputs[0].size(self.dim)
num_dev = len(self.device_ids)
gpu0_bsz = self.gpu0_bsz
bsz_unit = ((bsz - gpu0_bsz) // (num_dev - 1))
if (gpu0_bsz < bsz_unit):
chunk_sizes = ([gpu0_bsz] + ([bsz_unit] * (num_dev - 1)))
delta = (bsz - sum(chunk_sizes))
for i in range(delta):
chunk_sizes[(i + 1)] += 1
if (gpu0_bsz == 0):
chunk_sizes = chunk_sizes[1:]
else:
return super().scatter(inputs, kwargs, device_ids)
return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim) |
def TreeGeneration(ArgSet, depth: int, equiv: bool, rarity: bool):
for LArgSet in range(0, (ArgSet + 1)):
if ((ArgSet & LArgSet) == LArgSet):
RArgSet = (LArgSet ^ ArgSet)
for ldepth in range(0, depth):
rdepth = ((depth - 1) - ldepth)
for Ltree in TreeDB.getTreeSet(ldepth, LArgSet):
for Rtree in TreeDB.getTreeSet(rdepth, RArgSet):
valid_op_set = ['+', '-', '*', '/', '%', 'min', 'max']
for op in valid_op_set:
if Unique(op, Ltree, Rtree, equiv, rarity):
new_tree = ArithExpTree(op=op, lson=Ltree, rson=Rtree, ArgSet=ArgSet)
TreeDB.Add(depth, ArgSet, new_tree) |
class Dataset(data.Dataset):
def __init__(self, root, load_bytes=False, transform=None, class_map=''):
class_to_idx = None
if class_map:
class_to_idx = load_class_map(class_map, root)
(images, class_to_idx) = find_images_and_targets(root, class_to_idx=class_to_idx)
if (len(images) == 0):
raise RuntimeError(((('Found 0 images in subfolders of: ' + root) + '\nSupported image extensions are: ') + ','.join(IMG_EXTENSIONS)))
self.root = root
self.samples = images
self.imgs = self.samples
self.class_to_idx = class_to_idx
self.load_bytes = load_bytes
self.transform = transform
def __getitem__(self, index):
(path, target) = self.samples[index]
img = (open(path, 'rb').read() if self.load_bytes else Image.open(path).convert('RGB'))
if (self.transform is not None):
img = self.transform(img)
if (target is None):
target = torch.zeros(1).long()
return (img, target)
def __len__(self):
return len(self.imgs)
def filenames(self, indices=[], basename=False):
if indices:
if basename:
return [os.path.basename(self.samples[i][0]) for i in indices]
else:
return [self.samples[i][0] for i in indices]
elif basename:
return [os.path.basename(x[0]) for x in self.samples]
else:
return [x[0] for x in self.samples] |
def t_integ_attr_(b):
res = np.zeros(b.shape)
for i in range(b.shape[1]):
res[0][i] = integrate.quad(t_attr, 0, b[0][i], points=[0])[0]
return res |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.