code stringlengths 101 5.91M |
|---|
def dense_layer(inp: int, out: int, activation: str, p: float, bn: bool, linear_first: bool):
act_fn = get_activation_fn(activation)
layers = ([nn.BatchNorm1d((out if linear_first else inp))] if bn else [])
if (p != 0):
layers.append(nn.Dropout(p))
lin = [nn.Linear(inp, out, bias=(not bn)), act_fn]
layers = ((lin + layers) if linear_first else (layers + lin))
return nn.Sequential(*layers) |
def shared_convl1_lrelu(shape, nb_filters, kernel, stride=(1, 1), **kwargs):
c = Convolution2D(nb_filters, kernel, padding='same', kernel_initializer='he_uniform', kernel_regularizer=l1(0.01), strides=(stride, stride), input_shape=shape)
l = LeakyReLU()
return Sequential([c, l], **kwargs) |
class MostVisitedPositiveExtract(AbstractExtract):
def __call__(self, node):
nodes = [node]
while ((not node.terminal) and (len(node.children) > 0)):
if (max([child.avg_reward for child in node.children]) > 0):
allowed = (lambda child: (child.avg_reward > 0))
else:
allowed = (lambda child: True)
visits = [(i, child.n_visits) for (i, child) in enumerate(node.children) if allowed(child)]
(max_idx, max_num_visits) = max(visits, key=operator.itemgetter(1))
node = node.children[max_idx]
nodes.append(node)
return nodes |
def parse_args():
parser = argparse.ArgumentParser(description='TangoBERT')
parser.add_argument('--task_name', type=str, help='Name of the GLUE task.', choices=list(task_to_keys.keys()), required=True)
parser.add_argument('--small_model_name_or_path', type=str, help='Path to the small pretrained model or model identifier from huggingface.co/models.', required=True)
parser.add_argument('--big_model_name_or_path', type=str, help='Path to the big pretrained model or model identifier from huggingface.co/models.', required=True)
parser.add_argument('--device_small', default=0, help='Defines the device (e.g., "cpu", "cuda:1", "mps", or a GPU ordinal rank like 1) on which small model pipeline will be allocated. ')
parser.add_argument('--device_big', default=0, help='Defines the device (e.g., "cpu", "cuda:1", "mps", or a GPU ordinal rank like 1) on which this big model pipeline will be allocated. ')
parser.add_argument('--per_device_eval_batch_size-small', type=int, default=32, help='Batch size (per device) for small model inference.')
parser.add_argument('--per_device_eval_batch_size-big', type=int, default=32, help='Batch size (per device) for big model inference.')
parser.add_argument('--confidence_threshold', type=float, default=0.85, action=range_action(0.5, 1.0), help='Confidence threshold for small model prediction (must be in range 0.5..1.0).')
args = parser.parse_args()
return args |
def y_pred_header(outcome, underscore=False):
return ((str(outcome) + ('_' if underscore else '-')) + 'y_pred1') |
class HypergraphConv(MessagePassing):
def __init__(self, in_channels, out_channels, symdegnorm=False, use_attention=False, heads=1, concat=True, negative_slope=0.2, dropout=0, bias=True, **kwargs):
kwargs.setdefault('aggr', 'add')
super(HypergraphConv, self).__init__(node_dim=0, **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.use_attention = use_attention
self.symdegnorm = symdegnorm
if self.use_attention:
self.heads = heads
self.concat = concat
self.negative_slope = negative_slope
self.dropout = dropout
self.weight = Parameter(torch.Tensor(in_channels, (heads * out_channels)))
self.att = Parameter(torch.Tensor(1, heads, (2 * out_channels)))
else:
self.heads = 1
self.concat = True
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if (bias and concat):
self.bias = Parameter(torch.Tensor((heads * out_channels)))
elif (bias and (not concat)):
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
if self.use_attention:
glorot(self.att)
zeros(self.bias)
def forward(self, x: Tensor, hyperedge_index: Tensor, hyperedge_weight: Optional[Tensor]=None) -> Tensor:
(num_nodes, num_edges) = (x.size(0), 0)
if (hyperedge_index.numel() > 0):
num_edges = (int(hyperedge_index[1].max()) + 1)
if (hyperedge_weight is None):
hyperedge_weight = x.new_ones(num_edges)
x = torch.matmul(x, self.weight)
alpha = None
if self.use_attention:
assert (num_edges <= num_edges)
x = x.view((- 1), self.heads, self.out_channels)
(x_i, x_j) = (x[hyperedge_index[0]], x[hyperedge_index[1]])
alpha = (torch.cat([x_i, x_j], dim=(- 1)) * self.att).sum(dim=(- 1))
alpha = F.leaky_relu(alpha, self.negative_slope)
alpha = softmax(alpha, hyperedge_index[0], num_nodes=x.size(0))
alpha = F.dropout(alpha, p=self.dropout, training=self.training)
if (not self.symdegnorm):
D = scatter_add(hyperedge_weight[hyperedge_index[1]], hyperedge_index[0], dim=0, dim_size=num_nodes)
D = (1.0 / D)
D[(D == float('inf'))] = 0
B = scatter_add(x.new_ones(hyperedge_index.size(1)), hyperedge_index[1], dim=0, dim_size=num_edges)
B = (1.0 / B)
B[(B == float('inf'))] = 0
self.flow = 'source_to_target'
out = self.propagate(hyperedge_index, x=x, norm=B, alpha=alpha, size=(num_nodes, num_edges))
self.flow = 'target_to_source'
out = self.propagate(hyperedge_index, x=out, norm=D, alpha=alpha, size=(num_edges, num_nodes))
else:
D = scatter_add(hyperedge_weight[hyperedge_index[1]], hyperedge_index[0], dim=0, dim_size=num_nodes)
D = (1.0 / (D ** 0.5))
D[(D == float('inf'))] = 0
B = scatter_add(x.new_ones(hyperedge_index.size(1)), hyperedge_index[1], dim=0, dim_size=num_edges)
B = (1.0 / B)
B[(B == float('inf'))] = 0
x = (D.unsqueeze((- 1)) * x)
self.flow = 'source_to_target'
out = self.propagate(hyperedge_index, x=x, norm=B, alpha=alpha, size=(num_nodes, num_edges))
self.flow = 'target_to_source'
out = self.propagate(hyperedge_index, x=out, norm=D, alpha=alpha, size=(num_edges, num_nodes))
if (self.concat is True):
out = out.view((- 1), (self.heads * self.out_channels))
else:
out = out.mean(dim=1)
if (self.bias is not None):
out = (out + self.bias)
return out
def message(self, x_j: Tensor, norm_i: Tensor, alpha: Tensor) -> Tensor:
(H, F) = (self.heads, self.out_channels)
out = (norm_i.view((- 1), 1, 1) * x_j.view((- 1), H, F))
if (alpha is not None):
out = (alpha.view((- 1), self.heads, 1) * out)
return out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels, self.out_channels) |
def train_epoch(encoder, classifier, classifiers, batch_trains, dev, test, optimizer_encoder, optimizer_classifier, start, I):
all_time = dev_time = all_tagged = this_tagged = this_loss = 0
mtl_criterion = nn.CrossEntropyLoss()
moe_criterion = nn.NLLLoss()
domain_encs = None
for (ind, batch) in enumerate(batch_trains):
optimizer_encoder.zero_grad()
optimizer_classifier.zero_grad()
loss_mtl = []
loss_moe = []
ms_outputs = []
hiddens = []
train_labels = []
for (source_ind, s) in enumerate(batch):
I += 1
(words, golds) = zip(*s)
hidden = encoder(words)
outputs = []
hiddens.append(hidden)
for sthi in classifiers:
output = sthi(hidden)
outputs.append(output)
ms_outputs.append(outputs)
train_labels.append(encoder.get_var(torch.LongTensor([encoder.vt.w2i[t] for t in golds])))
preds = ms_outputs[source_ind][source_ind]
loss = mtl_criterion(preds, train_labels[(- 1)])
loss_mtl.append(loss)
loss_mtl = sum(loss_mtl)
loss = loss_mtl
loss.backward()
optimizer_encoder.step()
optimizer_classifier.step()
print('\n\nEnded last epoch.\n')
print(I)
return (encoder, classifier, optimizer_encoder, optimizer_classifier) |
def densenet161(num_classes=1000, pretrained='imagenet'):
model = models.densenet161(pretrained=False)
if (pretrained is not None):
settings = pretrained_settings['densenet161'][pretrained]
model = load_pretrained(model, num_classes, settings)
model = modify_densenets(model)
return model |
class EffiInvResUnit(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, expansion_factor, bn_eps, activation, tf_mode):
super(EffiInvResUnit, self).__init__()
self.kernel_size = kernel_size
self.stride = stride
self.tf_mode = tf_mode
self.residual = ((in_channels == out_channels) and (stride == 1))
mid_channels = (in_channels * expansion_factor)
dwconv_block_fn = (dwconv3x3_block if (kernel_size == 3) else (dwconv5x5_block if (kernel_size == 5) else None))
self.conv1 = conv1x1_block(in_channels=in_channels, out_channels=mid_channels, bn_eps=bn_eps, activation=activation)
self.conv2 = dwconv_block_fn(in_channels=mid_channels, out_channels=mid_channels, stride=stride, padding=(0 if tf_mode else (kernel_size // 2)), bn_eps=bn_eps, activation=activation)
self.se = SEBlock(channels=mid_channels, reduction=24, activation=activation)
self.conv3 = conv1x1_block(in_channels=mid_channels, out_channels=out_channels, bn_eps=bn_eps, activation=None)
def forward(self, x):
if self.residual:
identity = x
x = self.conv1(x)
if self.tf_mode:
x = F.pad(x, pad=calc_tf_padding(x, kernel_size=self.kernel_size, stride=self.stride))
x = self.conv2(x)
x = self.se(x)
x = self.conv3(x)
if self.residual:
x = (x + identity)
return x |
def matching_cascade(distance_metric, max_distance, cascade_depth, tracks, detections, track_indices=None, detection_indices=None):
if (track_indices is None):
track_indices = list(range(len(tracks)))
if (detection_indices is None):
detection_indices = list(range(len(detections)))
unmatched_detections = detection_indices
matches = []
for level in range(cascade_depth):
if (len(unmatched_detections) == 0):
break
track_indices_l = [k for k in track_indices if (tracks[k].time_since_update == (1 + level))]
if (len(track_indices_l) == 0):
continue
(matches_l, _, unmatched_detections) = min_cost_matching(distance_metric, max_distance, tracks, detections, track_indices_l, unmatched_detections)
matches += matches_l
unmatched_tracks = list((set(track_indices) - set((k for (k, _) in matches))))
return (matches, unmatched_tracks, unmatched_detections) |
def convert_tokens_seq(eval_file, qa_id, symbols, probs, id2word, map_to_orig):
def _get_penalty(syms):
trigrams = [tuple(syms[i:(i + 3)]) for i in range((len(syms) - 2))]
repeat_trigram = (list(filter((lambda x: (x > 1)), list(Counter(trigrams).values()))) != [])
return repeat_trigram
answer_dict = {}
for (qid, prob, bsyms) in zip(qa_id, probs, zip(*symbols)):
answers = []
try:
bsyms = zip(*bsyms)
except:
bsyms = [bsyms]
for (p, syms) in zip(prob, bsyms):
context_tokens = eval_file[str(qid)]['paragraph']
if (102 in syms):
syms = syms[:syms.index(102)]
syms = [(id2word[sym] if (sym in id2word) else context_tokens[(sym - len(id2word))]) for sym in syms]
tokens = convert_to_words(map_to_orig, syms)
answer = u' '.join(tokens)
(lp, penalty) = ((len(tokens) + 1), 0.0)
if _get_penalty(tokens):
penalty = 1.0
answers.append((((p / lp) - penalty), answer))
answer_dict[str(qid)] = answers
return answer_dict |
def test_hash():
class Hashable(object):
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
class Unhashable(object):
__hash__ = None
assert (m.hash_function(Hashable(42)) == 42)
with pytest.raises(TypeError):
m.hash_function(Unhashable()) |
def build_batchers(data_dir, batch_size):
def coll(batch):
(art_batch, abs_batch) = unzip(batch)
art_sents = list(filter(bool, map(tokenize(None), art_batch)))
abs_sents = list(filter(bool, map(tokenize(None), abs_batch)))
return (art_sents, abs_sents)
loader = DataLoader(RLDataset('train', data_dir), batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=coll)
val_loader = DataLoader(RLDataset('val', data_dir), batch_size=batch_size, shuffle=False, num_workers=4, collate_fn=coll)
return (cycle(loader), val_loader) |
def parser_sample(parser):
parser.add_argument('-in', '--input_img', type=str, required=True, help='path of input image')
parser.add_argument('--sigma', type=float, default=0.75, required=False, help='noise level to adjust the variatonality of the new sample. default is 0.75 (float)')
parser.add_argument('--alpha', type=float, default=0.005, required=False, help='alpha parameter of the normalizing distance matrix. small alpha encourages completeness. default is 0.005 (float)')
parser.add_argument('--task', type=str, default='random_sample')
return parser |
def fit(model, loss, opt, train_dataset, epochs, train_batch_size, max_steps=None):
pbar = tqdm(train_dataset)
for (i, batch) in enumerate(pbar):
with tf.GradientTape() as tape:
(inputs, targets) = batch
outputs = model(batch)
loss_value = loss(targets, outputs.logits)
if SDP_ENABLED:
tape = sdp.DistributedGradientTape(tape, sparse_as_dense=True)
grads = tape.gradient(loss_value, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
pbar.set_description(f'Loss: {loss_value:.4f}')
if (SDP_ENABLED and (i == 0)):
sdp.broadcast_variables(model.variables, root_rank=0)
sdp.broadcast_variables(opt.variables(), root_rank=0)
if (max_steps and (i >= max_steps)):
break
train_results = {'loss': loss_value.numpy()}
return train_results |
class FormatterNoInfo(logging.Formatter):
def __init__(self, fmt='%(levelname)s: %(message)s'):
logging.Formatter.__init__(self, fmt)
def format(self, record):
if (record.levelno == logging.INFO):
return str(record.getMessage())
return logging.Formatter.format(self, record) |
def test_nasfcos_fpn():
NASFCOS_FPN(in_channels=[8, 16, 32, 64], out_channels=8, start_level=0, end_level=3, num_outs=4)
NASFCOS_FPN(in_channels=[8, 16, 32, 64], out_channels=8, start_level=0, end_level=(- 1), num_outs=4)
with pytest.raises(AssertionError):
NASFCOS_FPN(in_channels=[8, 16, 32, 64], out_channels=8, start_level=1, end_level=4, num_outs=2)
with pytest.raises(AssertionError):
NASFCOS_FPN(in_channels=[8, 16, 32, 64], out_channels=8, start_level=1, end_level=2, num_outs=3) |
def generate_configs(base_config, dest_dir, embeddings, grid, refresh, ckpts_path, target):
with open(base_config, 'r') as f:
base = json.load(f)
with open(ckpts_path, 'r') as f:
ckpts = json.load(f)
model_family = {'smallnet': {'preproc': {'crop': 15, 'imwidth': 100}, 'name': 'SmallNet'}, 'hourglass': {'preproc': {'crop': 20, 'imwidth': 136}, 'name': 'HourglassNet'}}
for model_name in embeddings:
tokens = model_name.split('-')
if (tokens[(- 1)] == 'dve'):
tokens.pop()
(model_type, embedding_dim) = (tokens[(- 2)], int(tokens[(- 1)][:(- 1)]))
preproc_kwargs = model_family[model_type]['preproc']
hparam_vals = [x for x in grid.values()]
grid_vals = list(itertools.product(*hparam_vals))
hparams = list(grid.keys())
if ('-ft-keypoints' in target):
prefix = target.replace('-keypoints', '')
prefix = prefix.replace('-limit-annos', '')
prefix = prefix.replace('-no-aug', '')
ckpt_name = f'{prefix}-{model_name}'
else:
ckpt_name = model_name
epoch = ckpts[ckpt_name]['epoch']
for cfg_vals in grid_vals:
config = copy.deepcopy(base)
for (hparam, val) in zip(hparams, cfg_vals):
if (hparam == 'smax'):
config['keypoint_regressor']['softmaxarg_mul'] = val
elif (hparam == 'lr'):
config['optimizer']['args']['lr'] = val
elif (hparam == 'bs'):
val = int(val)
config['batch_size'] = val
elif (hparam == 'upsample'):
val = bool(val)
config['keypoint_regressor_upsample'] = val
elif (hparam == 'annos'):
config['restrict_annos'] = int(val)
else:
raise ValueError(f'unknown hparam: {hparam}')
ckpt = f'checkpoint-epoch{epoch}.pth'
timestamp = ckpts[ckpt_name]['timestamp']
ckpt_path = (((Path('data/saved/models') / ckpt_name) / timestamp) / ckpt)
config['arch']['type'] = model_family[model_type]['name']
config['arch']['args']['num_output_channels'] = embedding_dim
config['dataset']['args'].update(preproc_kwargs)
config['finetune_from'] = str(ckpt_path)
if (('-ft' in str(dest_dir)) and ('-keypoints' not in str(dest_dir))):
loss = 'dense_correlation_loss'
if ('dve' in model_name):
loss += '_dve'
config['loss'] = loss
if ('hourglass' in model_name):
config['batch_size'] = 16
if ('annos' in grid):
model_name_ = f"{config['restrict_annos']}-annos-{model_name}"
else:
model_name_ = model_name
if (len(grid['lr']) > 1):
model_name_ = f"{model_name_}-lr{config['optimizer']['args']['lr']}"
if (len(grid['bs']) > 1):
model_name_ = f"{model_name_}-bs{config['batch_size']}"
dest_path = (Path(dest_dir) / f'{model_name_}.json')
dest_path.parent.mkdir(exist_ok=True, parents=True)
if ((not dest_path.exists()) or refresh):
with open(str(dest_path), 'w') as f:
json.dump(config, f, indent=4, sort_keys=False)
else:
print(f'config file at {str(dest_path)} exists, skipping....')
print(f'Wrote {len(grid_vals)} configs to disk') |
def test_funcall_kwarg():
run_cell('\n def f(y):\n return 2 * y + 8\n ')
run_cell('x = 7')
run_cell('z = f(y=x)')
run_cell('x = 8')
run_cell('logging.info(z)')
assert_detected('`z` depends on stale `x`') |
def check_models_are_tested(module, test_file):
defined_models = get_models(module)
tested_models = find_tested_models(test_file)
if (tested_models is None):
if (test_file in TEST_FILES_WITH_NO_COMMON_TESTS):
return
return [((f'{test_file} should define `all_model_classes` to apply common tests to the models it tests. ' + 'If this intentional, add the test filename to `TEST_FILES_WITH_NO_COMMON_TESTS` in the file ') + '`utils/check_repo.py`.')]
failures = []
for (model_name, _) in defined_models:
if ((model_name not in tested_models) and (model_name not in IGNORE_NON_TESTED)):
failures.append((((f'{model_name} is defined in {module.__name__} but is not tested in ' + f'{os.path.join(PATH_TO_TESTS, test_file)}. Add it to the all_model_classes in that file.') + 'If common tests should not applied to that model, add its name to `IGNORE_NON_TESTED`') + 'in the file `utils/check_repo.py`.'))
return failures |
class InfoGen(nn.Module):
def __init__(self, t_emb, output_size):
super(InfoGen, self).__init__()
self.tconv1 = nn.ConvTranspose2d(t_emb, 512, 3, 2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(512)
self.tconv2 = nn.ConvTranspose2d(512, 128, 3, 2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(128)
self.tconv3 = nn.ConvTranspose2d(128, 64, 3, 2, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(64)
self.tconv4 = nn.ConvTranspose2d(64, output_size, 3, (2, 1), padding=(1, 0), bias=False)
self.bn4 = nn.BatchNorm2d(output_size)
def forward(self, t_embedding):
x = F.relu(self.bn1(self.tconv1(t_embedding)))
x = F.relu(self.bn2(self.tconv2(x)))
x = F.relu(self.bn3(self.tconv3(x)))
x = F.relu(self.bn4(self.tconv4(x)))
return x |
class Hadron(Ion):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.is_hadron() |
def get_datasets():
result = []
for items in CONFIG_GROUPS.values():
result += items['datasets']
return result |
.parametrize('spin, spin_angle, launch_angle, launch_direction_angle, expected', [(0, 1, 1, 1, (0, 0, 0))])
def test_spin_components(spin, spin_angle, launch_angle, launch_direction_angle, expected):
(wx, wy, wz) = spin_components(spin, spin_angle, launch_angle, launch_direction_angle)
for (a, b) in zip((wx, wy, wz), expected):
assert (a == pytest.approx(b)) |
def imagenet_vgg16_pretrained(output_dim):
model = torchvision.models.vgg16(pretrained=True)
return _vgg_replace_fc(model, output_dim) |
class InitiateNewTrainStage(BaseCallback):
def __init__(self, n_envs: int=1, treshhold_type: str='succ', upper_threshold: float=0, lower_threshold: float=0, task_mode: str='staged', verbose=0):
super(InitiateNewTrainStage, self).__init__(verbose=verbose)
self.n_envs = n_envs
self.threshhold_type = treshhold_type
assert ((self.threshhold_type == 'rew') or (self.threshhold_type == 'succ')), "given theshhold type neither 'rew' or 'succ'"
if ((self.threshhold_type == 'rew') and (upper_threshold == 0)):
self.upper_threshold = 13
self.lower_threshold = 7
elif ((self.threshhold_type == 'succ') and (upper_threshold == 0)):
self.upper_threshold = 0.85
self.lower_threshold = 0.6
else:
self.upper_threshold = upper_threshold
self.lower_threshold = lower_threshold
assert (self.upper_threshold > self.lower_threshold), 'upper threshold has to be bigger than lower threshold'
assert ((self.upper_threshold >= 0) and (self.lower_threshold >= 0)), 'upper/lower threshold have to be positive numbers'
if (self.threshhold_type == 'succ'):
assert ((self.upper_threshold <= 1) and (self.lower_threshold >= 0)), 'succ thresholds have to be between [1.0, 0.0]'
self.verbose = verbose
self.activated = bool((task_mode == 'staged'))
if self.activated:
rospy.set_param('/last_stage_reached', False)
self._instantiate_publishers()
self._trigger = Bool()
self._trigger.data = True
def _instantiate_publishers(self):
self._publishers_next = []
self._publishers_previous = []
self._publishers_next.append(rospy.Publisher(f'/eval_sim/next_stage', Bool, queue_size=1))
self._publishers_previous.append(rospy.Publisher(f'/eval_sim/previous_stage', Bool, queue_size=1))
for env_num in range(self.n_envs):
self._publishers_next.append(rospy.Publisher(f'/sim_{(env_num + 1)}/next_stage', Bool, queue_size=1))
self._publishers_previous.append(rospy.Publisher(f'/sim_{(env_num + 1)}/previous_stage', Bool, queue_size=1))
def _on_step(self, EvalObject: EvalCallback) -> bool:
assert isinstance(EvalObject, EvalCallback), f'InitiateNewTrainStage must be called within EvalCallback'
if self.activated:
if (EvalObject.n_eval_episodes < 20):
warnings.warn(('Only %d evaluation episodes considered for threshold monitoring,results might not represent agent performance well' % EvalObject.n_eval_episodes))
if (((self.threshhold_type == 'rew') and (EvalObject.best_mean_reward <= self.lower_threshold)) or ((self.threshhold_type == 'succ') and (EvalObject.last_success_rate <= self.lower_threshold))):
for (i, pub) in enumerate(self._publishers_previous):
pub.publish(self._trigger)
if (i == 0):
self.log_curr_stage(EvalObject.logger)
if (((self.threshhold_type == 'rew') and (EvalObject.best_mean_reward >= self.upper_threshold)) or ((self.threshhold_type == 'succ') and (EvalObject.last_success_rate >= self.upper_threshold))):
if (not rospy.get_param('/last_stage_reached')):
EvalObject.best_mean_reward = (- np.inf)
EvalObject.last_success_rate = (- np.inf)
for (i, pub) in enumerate(self._publishers_next):
pub.publish(self._trigger)
if (i == 0):
self.log_curr_stage(EvalObject.logger)
def log_curr_stage(self, logger):
time.sleep(1)
curr_stage = rospy.get_param('/curr_stage', (- 1))
logger.record('train_stage/stage_idx', curr_stage) |
def ChunkedSourceIterator(source_items: List, num_instances: int=1, instance_rank: int=0):
chunk_size = math.ceil((len(source_items) / num_instances))
chunk = source_items[(instance_rank * chunk_size):((instance_rank + 1) * chunk_size)]
return NativeCheckpointableIterator(chunk) |
class keypoint_outputs(nn.Module):
def __init__(self, dim_in):
super().__init__()
self.upsample_heatmap = (cfg.KRCNN.UP_SCALE > 1)
if cfg.KRCNN.USE_DECONV:
self.deconv = nn.ConvTranspose2d(dim_in, cfg.KRCNN.DECONV_DIM, cfg.KRCNN.DECONV_KERNEL, 2, padding=(int((cfg.KRCNN.DECONV_KERNEL / 2)) - 1))
dim_in = cfg.KRCNN.DECONV_DIM
if cfg.KRCNN.USE_DECONV_OUTPUT:
self.classify = nn.ConvTranspose2d(dim_in, cfg.KRCNN.NUM_KEYPOINTS, cfg.KRCNN.DECONV_KERNEL, 2, padding=int(((cfg.KRCNN.DECONV_KERNEL / 2) - 1)))
else:
self.classify = nn.Conv2d(dim_in, cfg.KRCNN.NUM_KEYPOINTS, 1, 1, padding=0)
if self.upsample_heatmap:
self.upsample = mynn.BilinearInterpolation2d(cfg.KRCNN.NUM_KEYPOINTS, cfg.KRCNN.NUM_KEYPOINTS, cfg.KRCNN.UP_SCALE)
self._init_weights()
def _init_weights(self):
if cfg.KRCNN.USE_DECONV:
init.normal_(self.deconv.weight, std=0.01)
init.constant_(self.deconv.bias, 0)
if (cfg.KRCNN.CONV_INIT == 'GaussianFill'):
init.normal_(self.classify.weight, std=0.001)
elif (cfg.KRCNN.CONV_INIT == 'MSRAFill'):
mynn.init.MSRAFill(self.classify.weight)
else:
raise ValueError(cfg.KRCNN.CONV_INIT)
init.constant_(self.classify.bias, 0)
def detectron_weight_mapping(self):
detectron_weight_mapping = {}
if cfg.KRCNN.USE_DECONV:
detectron_weight_mapping.update({'deconv.weight': 'kps_deconv_w', 'deconv.bias': 'kps_deconv_b'})
if self.upsample_heatmap:
blob_name = 'kps_score_lowres'
detectron_weight_mapping.update({'upsample.upconv.weight': None, 'upsample.upconv.bias': None})
else:
blob_name = 'kps_score'
detectron_weight_mapping.update({'classify.weight': (blob_name + '_w'), 'classify.bias': (blob_name + '_b')})
return (detectron_weight_mapping, [])
def forward(self, x):
if cfg.KRCNN.USE_DECONV:
x = F.relu(self.deconv(x), inplace=True)
x = self.classify(x)
if self.upsample_heatmap:
x = self.upsample(x)
return x |
class TestClassAssociationRule(unittest.TestCase):
def test_compare(self):
row1 = [1, 1, 0]
header1 = ['A', 'B', 'C']
transaction1 = Transaction(row1, header1, ('Class', 0))
item1 = Item('A', 1)
item2 = Item('B', 1)
item3 = Item('C', 0)
item4 = Item('B', 5)
ant1 = Antecedent([item1, item2])
ant2 = Antecedent([item2])
ant3 = Antecedent([item3])
ant4 = Antecedent([item4])
cons1 = Consequent('Y', 1)
cons2 = Consequent('Y', 2)
cons3 = Consequent('Y', 3)
car1 = ClassAssocationRule(ant1, cons1, 0.5, 0.9)
car2 = ClassAssocationRule(ant2, cons2, 0.5, 0.9)
car3 = ClassAssocationRule(ant3, cons3, 0.5, 0.9)
car4 = ClassAssocationRule(ant4, cons3, 0.5, 1)
sorted_cars = sorted([car1, car2, car3, car4], reverse=True)
assert (car1 < car2)
assert (car2 > car3)
assert (car3 < car2)
assert (car4 > car3)
assert (car1.antecedent <= transaction1)
assert (car2.antecedent <= transaction1)
assert (car3.antecedent <= transaction1)
assert (not (car4.antecedent <= transaction1))
assert (sorted_cars[0] == car4)
def test_len(self):
item1 = Item('A', 1)
item2 = Item('B', 1)
ant1 = Antecedent([item1, item2])
cons1 = Consequent('Y', 1)
car1 = ClassAssocationRule(ant1, cons1, 0.5, 0.9)
assert (len(car1) == 3) |
class SparseMM(torch.autograd.Function):
def forward(self, matrix1, matrix2):
self.save_for_backward(matrix1, matrix2)
return torch.mm(matrix1, matrix2)
def backward(self, grad_output):
(matrix1, matrix2) = self.saved_tensors
grad_matrix1 = grad_matrix2 = None
if self.needs_input_grad[0]:
grad_matrix1 = torch.mm(grad_output, matrix2.t())
if self.needs_input_grad[1]:
grad_matrix2 = torch.mm(matrix1.t(), grad_output)
return (grad_matrix1, grad_matrix2) |
def test_chroma_cqt(waveform):
chroma_cqt = waveform.chroma_cqt()
assert isinstance(chroma_cqt, np.ndarray) |
def train(model):
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.L1Loss().cuda()
losses = []
for _ in range(500):
(x, y) = gen_data(batch_size=(2 ** 10), max_length=10)
(x, y) = (torch.from_numpy(x).float().cuda(), torch.from_numpy(y).float().cuda())
loss = criterion(model(x), y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
return losses |
class AttrDict(dict):
IMMUTABLE = '__immutable__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__dict__[AttrDict.IMMUTABLE] = False
def __getattr__(self, name):
if (name in self.__dict__):
return self.__dict__[name]
elif (name in self):
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if (not self.__dict__[AttrDict.IMMUTABLE]):
if (name in self.__dict__):
self.__dict__[name] = value
else:
self[name] = value
else:
raise AttributeError('Attempted to set "{}" to "{}", but AttrDict is immutable'.format(name, value))
def immutable(self, is_immutable):
self.__dict__[AttrDict.IMMUTABLE] = is_immutable
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
for v in self.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
def is_immutable(self):
return self.__dict__[AttrDict.IMMUTABLE] |
class SingleLinearClassifier(nn.Module):
def __init__(self, hidden_size, num_label):
super(SingleLinearClassifier, self).__init__()
self.num_label = num_label
self.classifier = nn.Linear(hidden_size, num_label)
def forward(self, input_features):
features_output = self.classifier(input_features)
return features_output |
def _groupByClip(dict_text: Dict[(str, str)]):
sentence_ids = list(dict_text.keys())
sentence_ids.sort(key=natural_keys)
dict_text_video = {}
for utt_id in sentence_ids:
if (utt_id[:11] in dict_text_video):
dict_text_video[utt_id[:11]] += dict_text[utt_id].replace('\n', ' ')
else:
dict_text_video[utt_id[:11]] = dict_text[utt_id].replace('\n', ' ')
return dict_text_video |
class ConcatLayer(MergeLayer):
def __init__(self, incomings, axis=1, cropping=None, **kwargs):
super(ConcatLayer, self).__init__(incomings, **kwargs)
self.axis = axis
if (cropping is not None):
cropping = list(cropping)
cropping[axis] = None
self.cropping = cropping
def get_output_shape_for(self, input_shapes):
input_shapes = autocrop_array_shapes(input_shapes, self.cropping)
output_shape = [next((s for s in sizes if (s is not None)), None) for sizes in zip(*input_shapes)]
def match(shape1, shape2):
return ((len(shape1) == len(shape2)) and all((((i == self.axis) or (s1 is None) or (s2 is None) or (s1 == s2)) for (i, (s1, s2)) in enumerate(zip(shape1, shape2)))))
if (not all((match(shape, output_shape) for shape in input_shapes))):
raise ValueError('Mismatch: input shapes must be the same except in the concatenation axis')
sizes = [input_shape[self.axis] for input_shape in input_shapes]
concat_size = (None if any(((s is None) for s in sizes)) else sum(sizes))
output_shape[self.axis] = concat_size
return tuple(output_shape)
def get_output_for(self, inputs, **kwargs):
inputs = autocrop(inputs, self.cropping)
return T.concatenate(inputs, axis=self.axis) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, shortcut, bn_aff, stride=1, downsample=None, groups=1, base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
if ((groups != 1) or (base_width != 64)):
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if (dilation > 1):
raise NotImplementedError('Dilation > 1 not supported in BasicBlock')
self.bn_aff = bn_aff
self.shortcut = shortcut
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes, affine=self.bn_aff)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes, affine=self.bn_aff)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if (self.downsample is not None):
identity = self.downsample(x)
if self.shortcut:
out += identity
out = self.relu(out)
return out |
(allow_output_mutation=True)
def load_models():
if LOAD_DENSE_INDEX:
qar_tokenizer = AutoTokenizer.from_pretrained('yjernite/retribert-base-uncased')
qar_model = AutoModel.from_pretrained('yjernite/retribert-base-uncased').to('cuda:0')
_ = qar_model.eval()
else:
(qar_tokenizer, qar_model) = (None, None)
if (MODEL_TYPE == 'bart'):
s2s_tokenizer = AutoTokenizer.from_pretrained('yjernite/bart_eli5')
s2s_model = AutoModelForSeq2SeqLM.from_pretrained('yjernite/bart_eli5').to('cuda:0')
save_dict = torch.load('seq2seq_models/eli5_bart_model_blm_2.pth')
s2s_model.load_state_dict(save_dict['model'])
_ = s2s_model.eval()
else:
(s2s_tokenizer, s2s_model) = make_qa_s2s_model(model_name='t5-small', from_file='seq2seq_models/eli5_t5_model_1024_4.pth', device='cuda:0')
return (qar_tokenizer, qar_model, s2s_tokenizer, s2s_model) |
class DPTImageProcessingTester(unittest.TestCase):
def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]):
size = (size if (size is not None) else {'height': 18, 'width': 18})
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {'image_mean': self.image_mean, 'image_std': self.image_std, 'do_normalize': self.do_normalize, 'do_resize': self.do_resize, 'size': self.size} |
def adb_devices():
serialnos = []
p = re.compile('(\\S+)\\s+device')
for line in split_stdout(sh.adb('devices')):
m = p.match(line)
if m:
serialnos.append(m.group(1))
return serialnos |
class PrefixConstrainedLogitsProcessor(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def get_view_a2j_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--dataset', default='nyu')
parser.add_argument('--phase', default='train')
parser.add_argument('--split', default=1, type=int, help='Divide the train dataset into s parts, each as an epoch.')
parser.add_argument('--batch_size', default=16, type=int)
parser.add_argument('--num_epoch', default=20, type=int)
parser.add_argument('--gpus', type=int, nargs='+')
parser.add_argument('--optim', default='Adam', help='Must be Adam or SGD.')
parser.add_argument('--learning_rate', default=0.001, type=float)
parser.add_argument('--model_saved_path', default='./checkpoint/base')
parser.add_argument('--pre_a2j', default=None)
parser.add_argument('--pre_model_path', default=None)
parser.add_argument('--log_dir', default='./logs/base')
parser.add_argument('--resume_training', default=False, type=bool, help='If resume_training is False, log dir will not be remove.')
parser.add_argument('--learning_decay_rate', default=0.8, help='Learning decay rate each epoch.')
parser.add_argument('--reg_weight', default=0.001, type=float, help='Regularization weight.')
parser.add_argument('--level', default=3, type=int, help='Specify the number of virtual views. Levels 1, 2, 3, 4 have 3, 9, 15 and 25 views, respectively.')
parser.add_argument('--n_head', default=1, type=int)
parser.add_argument('--d_attn', default=256, type=int)
parser.add_argument('--d_k', default=64, type=int)
parser.add_argument('--d_v', default=64, type=int)
parser.add_argument('--d_inner', default=256, type=int)
parser.add_argument('--dropout_rate', default=0.5, type=float)
parser.add_argument('--num_select', default=3, type=int)
parser.add_argument('--alpha', default=0.5, type=float)
parser.add_argument('--conf_factor', default=100.0, type=float)
parser.add_argument('--random_select', default=False, type=bool)
parser.add_argument('--num_worker', default=4, type=int, help='Number worker of Dataloader.')
parser.add_argument('--max_jitter', default=10.0, type=float, help='Sigma of jittering center of mass.')
parser.add_argument('--depth_sigma', default=1.0, type=float, help='Sigma of jittering depth of pixel.')
parser.add_argument('--cube_len', default=None, type=float)
parser.add_argument('--min_scale', default=1.0, type=float)
parser.add_argument('--max_scale', default=1.0, type=float)
parser.add_argument('--offset', default=20.0, type=float, help='Offset of bounding box.')
parser.add_argument('--hand_thickness', default=20.0, type=float)
parser.add_argument('--random_flip', default=False, type=bool, help='Whether flip image randomly.')
parser.add_argument('--save_result', default=False, type=bool)
parser.add_argument('--use_joint', default=False, type=bool)
parser.add_argument('--adjust_cube', default=False, type=bool)
parser.add_argument('--config', default='./config/nyu/train_a2j.yaml')
return parser |
_model
def nfnet_f7s(pretrained=False, **kwargs):
return _create_normfreenet('nfnet_f7s', pretrained=pretrained, **kwargs) |
def main():
parser = get_parser()
args = parser.parse_args()
source_path = osp.join(args.source, args.split)
data_poth = ((source_path + '_unfiltered') if args.unfiltered else source_path)
print(f'data path: {data_poth}')
features = np.load((data_poth + '.npy'), mmap_mode='r')
pca_A = torch.from_numpy(np.load((args.pca_path + '_A.npy'))).cuda()
pca_b = torch.from_numpy(np.load((args.pca_path + '_b.npy'))).cuda()
os.makedirs(args.save_dir, exist_ok=True)
save_path = osp.join(args.save_dir, args.split)
copyfile((source_path + '.tsv'), (save_path + '.tsv'))
copyfile((data_poth + '.lengths'), (save_path + '.lengths'))
if osp.exists((source_path + '.phn')):
copyfile((source_path + '.phn'), (save_path + '.phn'))
if osp.exists((source_path + '.wrd')):
copyfile((source_path + '.wrd'), (save_path + '.wrd'))
if osp.exists((save_path + '.npy')):
os.remove((save_path + '.npy'))
npaa = NpyAppendArray((save_path + '.npy'))
batches = math.ceil((features.shape[0] / args.batch_size))
with torch.no_grad():
for b in tqdm.trange(batches):
start = (b * args.batch_size)
end = (start + args.batch_size)
x = torch.from_numpy(features[start:end]).cuda()
x = (torch.matmul(x, pca_A) + pca_b)
npaa.append(x.cpu().numpy()) |
def write_uchars(fd, values, fmt='>{:d}B'):
fd.write(struct.pack(fmt.format(len(values)), *values))
return (len(values) * 1) |
def print_stats(args, epoch, num_samples, trainloader, metrics):
if ((num_samples % args.log_interval) == 1):
print('Epoch:{:2d}\tSample:{:5d}/{:5d}\tLoss:{:.4f}\tAccuracy:{:.2f}\tPPV:{:.3f}\tsensitivity{:.3f}'.format(epoch, num_samples, (len(trainloader) * args.batch_size), metrics.avg('loss'), metrics.avg('accuracy'), metrics.avg('ppv'), metrics.avg('sensitivity'))) |
class DataLoader(object):
def __init__(self, batch_size, seq_len, dataset_name, task_name, data_dir, tokenizer_dir):
self.batch_size = batch_size
dataset = load_dataset(dataset_name, cache_dir=data_dir, split='validation')
tokenizer = AutoTokenizer.from_pretrained(tokenizer_dir)
self.dataset = dataset.map((lambda e: tokenizer(e['text'], truncation=True, padding='max_length', max_length=seq_len)), batched=True)
def __getitem__(self, idx):
start = (idx * self.batch_size)
end = (start + self.batch_size)
if (end > len(self.dataset)):
input_ids_data = self.dataset[start:]['input_ids']
input_mask_data = self.dataset[start:]['attention_mask']
label_data = self.dataset[start:]['label']
else:
input_ids_data = self.dataset[start:end]['input_ids']
input_mask_data = self.dataset[start:end]['attention_mask']
label_data = self.dataset[start:end]['label']
sample_size = (len(input_ids_data) if isinstance(input_ids_data, list) else 1)
return ([np.array(input_ids_data).reshape(sample_size, (- 1)).astype('int32'), np.array(input_mask_data).reshape(sample_size, (- 1)).astype('int32')], np.array(label_data).reshape(sample_size, (- 1)).astype('int32'))
def __len__(self):
return math.ceil((len(self.dataset) / self.batch_size)) |
def get_confusion_matrix(correct_seg, segmentation, elements=None):
(height, width) = correct_seg.shape
if (elements is None):
elements = set(np.unique(correct_seg))
elements = elements.union(set(np.unique(segmentation)))
logging.debug('elements parameter not given to get_confusion_matrix')
logging.debug(" assume '%s'", elements)
confusion_matrix = {}
for i in elements:
confusion_matrix[i] = {}
for j in elements:
confusion_matrix[i][j] = 0
for x in range(width):
for y in range(height):
confusion_matrix[correct_seg[y][x]][segmentation[y][x]] += 1
return confusion_matrix |
def get_l32_config():
config = get_l16_config()
config.patches.size = (32, 32)
return config |
_LAYERS.register_module()
class ExampleConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True, norm_cfg=None):
super(ExampleConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.bias = bias
self.norm_cfg = norm_cfg
self.output_padding = (0, 0, 0)
self.transposed = False
self.conv0 = nn.Conv2d(in_channels, out_channels, kernel_size)
self.init_weights()
def forward(self, x):
x = self.conv0(x)
return x
def init_weights(self):
nn.init.constant_(self.conv0.weight, 0) |
def at_hat_loss(model, x, y, optimizer, step_size=0.007, epsilon=0.031, perturb_steps=10, h=3.5, beta=1.0, gamma=1.0, attack='linf-pgd', hr_model=None, label_smoothing=0.1):
criterion_ce_smooth = SmoothCrossEntropyLoss(reduction='mean', smoothing=label_smoothing)
criterion_ce = nn.CrossEntropyLoss()
model.train()
track_bn_stats(model, False)
attack = create_attack(model, criterion_ce, attack, epsilon, perturb_steps, step_size)
with ctx_noparamgrad_and_eval(model):
(x_adv, _) = attack.perturb(x, y)
model.train()
x_hr = (x + (h * (x_adv - x)))
with ctx_noparamgrad_and_eval(hr_model):
y_hr = hr_model(x_adv).argmax(dim=1)
optimizer.zero_grad()
track_bn_stats(model, True)
(out_clean, out_adv, out_help) = (model(x), model(x_adv), model(x_hr))
loss_clean = criterion_ce_smooth(out_clean, y)
loss_adv = criterion_ce(out_adv, y)
loss_help = F.cross_entropy(out_help, y_hr, reduction='mean')
loss = ((loss_clean + (beta * loss_adv)) + (gamma * loss_help))
batch_metrics = {'loss': loss.item()}
batch_metrics.update({'adversarial_acc': accuracy(y, out_adv.detach()), 'helper_acc': accuracy(y_hr, out_help.detach())})
batch_metrics.update({'clean_acc': accuracy(y, out_clean.detach())})
return (loss, batch_metrics) |
.mujoco
.no_cover
.timeout(30)
def test_maml_vpg():
assert (subprocess.run([str((EXAMPLES_ROOT_DIR / 'torch/maml_vpg_half_cheetah_dir.py')), '--epochs', '1', '--rollouts_per_task', '1', '--meta_batch_size', '1'], check=False).returncode == 0) |
class Linear(torch.nn.Linear):
def forward(self, x):
if ((x.numel() == 0) and obsolete_torch_version(TORCH_VERSION, (1, 5))):
out_shape = [x.shape[0], self.out_features]
empty = NewEmptyTensorOp.apply(x, out_shape)
if self.training:
dummy = (sum((x.view((- 1))[0] for x in self.parameters())) * 0.0)
return (empty + dummy)
else:
return empty
return super().forward(x) |
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super(WideBasic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.dropout = nn.Dropout(p=P)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if ((stride != 1) or (in_planes != planes)):
self.shortcut = nn.Sequential(nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True))
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out |
def circle_path(obs_all, radius, k):
global tracked
global direction
global old_distance
if (k == 1):
tracked = False
direction = (- 1)
actions = np.array([[[]]])
for obs_env in obs_all:
for obs in obs_env:
agent_pos = np.matrix([obs[2], obs[3]]).T
agent_ang = np.arctan2(obs.item(1), obs.item(0))
if (agent_ang < 0.0):
agent_ang += (2 * np.pi)
landmark_pos = np.matrix([obs[4], obs[5]]).T
angle_agent_landmark = np.arctan2(landmark_pos.item(1), landmark_pos.item(0))
if (angle_agent_landmark < 0.0):
angle_agent_landmark += (2 * np.pi)
distance = np.sqrt(((landmark_pos.item(0) ** 2) + (landmark_pos.item(1) ** 2)))
alpha_radius = (64.181 * (radius ** (- 0.872)))
radius_circ = (radius / 1000.0)
if ((distance > (radius_circ * 2)) or (k < 4)):
const = 0.5
if (((agent_ang - angle_agent_landmark) > np.pi) and (agent_ang > angle_agent_landmark)):
direction = 1.0
angle = direction
elif (((agent_ang - angle_agent_landmark) < np.pi) and (agent_ang > angle_agent_landmark)):
direction = (- 1.0)
if ((agent_ang - angle_agent_landmark) < ((np.pi / 2.0) / 2.0)):
angle = 0.0
else:
angle = direction
elif (((angle_agent_landmark - agent_ang) > np.pi) and (agent_ang < angle_agent_landmark)):
direction = (- 1.0)
angle = direction
elif (((angle_agent_landmark - agent_ang) < np.pi) and (agent_ang < angle_agent_landmark)):
direction = 1.0
if ((angle_agent_landmark - agent_ang) < ((np.pi / 2.0) / 2.0)):
angle = 0.0
else:
angle = direction
else:
angle = 0.0
else:
aux = abs((agent_ang - angle_agent_landmark))
vector_1 = np.array([obs.item(1), obs.item(0)])
vector_2 = np.array([landmark_pos.item(1), landmark_pos.item(0)])
unit_vector_1 = (vector_1 / np.linalg.norm(vector_1))
unit_vector_2 = (vector_2 / np.linalg.norm(vector_2))
dot_product = np.dot(unit_vector_1, unit_vector_2)
aux = np.arccos(dot_product)
angle = ((0.4 * (aux + 0.7)) * direction)
angle -= (1 - alpha_radius)
if (k < 2):
angle = (- 1.0)
return np.array([[[angle]]]) |
def evaluate(features, support, labels, mask, placeholders):
t_test = time.time()
feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders)
outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val)
return (outs_val[0], outs_val[1], (time.time() - t_test)) |
def frame_generator(frame_duration_ms, audio, sample_rate):
n = int(((sample_rate * (frame_duration_ms / 1000.0)) * 2))
offset = 0
timestamp = 0.0
duration = ((float(n) / sample_rate) / 2.0)
while ((offset + n) < len(audio)):
(yield Frame(audio[offset:(offset + n)], timestamp, duration))
timestamp += duration
offset += n |
def _run_process(children, report_queue, global_t, is_stopped, is_paused, env_fn):
try:
if ((not hasattr(children, 'create_env')) or (children.create_env is None)):
children.create_env = env_fn(children)
if hasattr(children, 'initialize'):
children.initialize()
def _process(process, *args, **kwargs):
result = process(*args, **kwargs)
report_queue.put(result)
return result
def _run(process, **kwargs):
children._global_t = 0
children._is_stopped = False
while (not children._is_stopped):
children._global_t = global_t.value
(tdiff, _, _) = process(mode='train', context=dict())
children._global_t += tdiff
if is_paused.value:
report_queue.put('paused')
while (is_paused.value and (not is_stopped.value)):
sleep(0.005)
children._global_t = global_t.value
children._is_stopped = is_stopped.value
return None
return _run(process=partial(_process, process=children.process))
except Exception as e:
report_queue.put('error')
raise e |
class IfTimeout():
def __init__(self, timeout):
self.start_time = time.time()
self.ignored_time = 0.0
if (timeout is None):
self.target_time = None
else:
self.target_time = (self.start_time + timeout)
self.interval = None
def is_timeout(self):
if (self.target_time is None):
return False
else:
cur_time = time.time()
if (((cur_time - self.target_time) - self.ignored_time) > 0):
if (self.interval is None):
self.interval = ((cur_time - self.start_time) - self.ignored_time)
return True
else:
return False
def add_ignored_time(self, time_amount):
self.ignored_time += time_amount |
def convert_examples_to_features(examples, label2id, max_seq_length, tokenizer, special_tokens, mode='text'):
def get_special_token(w):
if (w not in special_tokens):
special_tokens[w] = ('[unused%d]' % (len(special_tokens) + 1))
return special_tokens[w]
num_tokens = 0
num_fit_examples = 0
num_shown_examples = 0
features = []
for (ex_index, example) in enumerate(examples):
if ((ex_index % 10000) == 0):
logger.info(('Writing example %d of %d' % (ex_index, len(examples))))
tokens = [CLS]
SUBJECT_START = get_special_token('SUBJ_START')
SUBJECT_END = get_special_token('SUBJ_END')
OBJECT_START = get_special_token('OBJ_START')
OBJECT_END = get_special_token('OBJ_END')
SUBJECT_NER = get_special_token(('SUBJ=%s' % example.ner1))
OBJECT_NER = get_special_token(('OBJ=%s' % example.ner2))
if mode.startswith('text'):
for (i, token) in enumerate(example.sentence):
if (i == example.span1[0]):
tokens.append(SUBJECT_START)
if (i == example.span2[0]):
tokens.append(OBJECT_START)
for sub_token in tokenizer.tokenize(token):
tokens.append(sub_token)
if (i == example.span1[1]):
tokens.append(SUBJECT_END)
if (i == example.span2[1]):
tokens.append(OBJECT_END)
if (mode == 'text_ner'):
tokens = (tokens + [SEP, SUBJECT_NER, SEP, OBJECT_NER, SEP])
else:
tokens.append(SEP)
else:
subj_tokens = []
obj_tokens = []
for (i, token) in enumerate(example.sentence):
if (i == example.span1[0]):
tokens.append(SUBJECT_NER)
if (i == example.span2[0]):
tokens.append(OBJECT_NER)
if ((i >= example.span1[0]) and (i <= example.span1[1])):
for sub_token in tokenizer.tokenize(token):
subj_tokens.append(sub_token)
elif ((i >= example.span2[0]) and (i <= example.span2[1])):
for sub_token in tokenizer.tokenize(token):
obj_tokens.append(sub_token)
else:
for sub_token in tokenizer.tokenize(token):
tokens.append(sub_token)
if (mode == 'ner_text'):
tokens.append(SEP)
for sub_token in subj_tokens:
tokens.append(sub_token)
tokens.append(SEP)
for sub_token in obj_tokens:
tokens.append(sub_token)
tokens.append(SEP)
num_tokens += len(tokens)
if (len(tokens) > max_seq_length):
tokens = tokens[:max_seq_length]
else:
num_fit_examples += 1
segment_ids = ([0] * len(tokens))
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = ([1] * len(input_ids))
padding = ([0] * (max_seq_length - len(input_ids)))
input_ids += padding
input_mask += padding
segment_ids += padding
label_id = label2id[example.label]
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
assert (len(segment_ids) == max_seq_length)
if (num_shown_examples < 20):
if ((ex_index < 5) or (label_id > 0)):
num_shown_examples += 1
logger.info('*** Example ***')
logger.info(('guid: %s' % example.guid))
logger.info(('tokens: %s' % ' '.join([str(x) for x in tokens])))
logger.info(('input_ids: %s' % ' '.join([str(x) for x in input_ids])))
logger.info(('input_mask: %s' % ' '.join([str(x) for x in input_mask])))
logger.info(('segment_ids: %s' % ' '.join([str(x) for x in segment_ids])))
logger.info(('label: %s (id = %d)' % (example.label, label_id)))
features.append(InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id))
logger.info(('Average #tokens: %.2f' % ((num_tokens * 1.0) / len(examples))))
logger.info(('%d (%.2f %%) examples can fit max_seq_length = %d' % (num_fit_examples, ((num_fit_examples * 100.0) / len(examples)), max_seq_length)))
return features |
class ModelArguments():
save_adapter_weights: bool = field(default=True, metadata={'help': 'Save the weights for the task-specific adapter.'})
load_adapter_weights: bool = field(default=False, metadata={'help': 'Load the weights used to task-sepcific adapters.'})
adapter_dir: str = field(default=None, metadata={'help': 'Path to load task-specific adapters'})
model_name_or_path: str = field(default=None, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'})
load_prefix_embeddings: bool = field(default=False, metadata={'help': 'load prefix embeddings or not'})
save_prefix_only: bool = field(default=False, metadata={'help': 'save prefix embeddings only'})
prompt_embedding_path: Optional[List[str]] = field(default=None, metadata={'help': 'A list of the paths to prefix embeddings'})
target_prompt_embedding_path: Optional[str] = field(default=None, metadata={'help': 'a path to the target prompt embedding'})
attn_prefix_tuning: bool = field(default=False, metadata={'help': 'Set true if you try ATTEMPT.'})
attn_method: Optional[str] = field(default='sub', metadata={'help': 'Attention model for attn_prefix. We currently support the following methods: linear, sub (our main method), and constant (gives the constant and equal weights to all of the prompts.)'})
shared_attn: bool = field(default=False, metadata={'help': 'shared attention'})
load_attention: bool = field(default=False, metadata={'help': 'Set true if you want to load pre-trained attention weights'})
attn_path: Optional[str] = field(default=None, metadata={'help': 'path to attention weights (linear attentions). '})
attn_path_sub: Optional[List[str]] = field(default=None, metadata={'help': 'list of the path to attention weights (sub attentions). [path_to_down_projection_weights, path_to_up_projection_weights]'})
ignore_target: bool = field(default=False, metadata={'help': 'Whether to ignore the new target tokens. Mainly for ablation.'})
fix_attention: bool = field(default=False, metadata={'help': 'this will make the attention weights frozen during training. Mainly for ablation.'})
temperature: float = field(default=2000, metadata={'help': 'set the soft max temperature of ATTEMPT.'})
attn_learning_rate: float = field(default=None, metadata={'help': 'set the learning rate for the attention modules.'})
load_layer_norm: bool = field(default=False, metadata={'help': 'Set true if you want to load pre-trained layer-norm weight and biases.'})
layer_norm_dir: Optional[List[str]] = field(default=None, metadata={'help': 'Layer norm dir. [path_to_layer_norm_weight.pt, path_to_layer_norm_bias.pt]'})
prefix_num: Optional[int] = field(default=1, metadata={'help': 'the number of prefix'}) |
class DIV2K(multiscalesrdata.SRData):
def __init__(self, args, name='DIV2K', train=True, benchmark=False):
super(DIV2K, self).__init__(args, name=name, train=train, benchmark=benchmark)
def _scan(self):
(names_hr, names_lr) = super(DIV2K, self)._scan()
names_hr = names_hr[(self.begin - 1):self.end]
names_lr = [n[(self.begin - 1):self.end] for n in names_lr]
return (names_hr, names_lr)
def _set_filesystem(self, dir_data):
super(DIV2K, self)._set_filesystem(dir_data)
self.dir_hr = os.path.join(self.apath, 'HR')
self.dir_lr = os.path.join(self.apath, 'LR_bicubic') |
class NCBIDataset(BaseDataset):
def __init__(self, dataset):
super().__init__(dataset=dataset) |
class UnrolledAdam(UnrolledOptimizer):
'The Adam optimizer defined in
_State = collections.namedtuple('State', ['t', 'm', 'u'])
def __init__(self, lr=0.1, lr_fn=None, beta1=0.9, beta2=0.999, epsilon=1e-09, colocate_gradients_with_ops=False):
super(UnrolledAdam, self).__init__(colocate_gradients_with_ops=colocate_gradients_with_ops)
self._lr = lr
self._lr_fn = ((lambda l, i: l) if (lr_fn is None) else lr_fn)
self._beta1 = beta1
self._beta2 = beta2
self._epsilon = epsilon
def init_state(self, x):
return self._State(t=tf.constant(0, dtype=tf.float32), m=[tf.zeros_like(v) for v in x], u=[tf.zeros_like(v) for v in x])
def _apply_gradients(self, grads, x, optim_state):
lr = self._lr_fn(self._lr, optim_state.t)
new_optim_state = self._State(t=(optim_state.t + 1.0), m=([None] * len(x)), u=([None] * len(x)))
t = new_optim_state.t
new_x = ([None] * len(x))
for i in range(len(x)):
g = grads[i]
m_old = optim_state.m[i]
u_old = optim_state.u[i]
new_optim_state.m[i] = ((self._beta1 * m_old) + ((1.0 - self._beta1) * g))
new_optim_state.u[i] = ((self._beta2 * u_old) + (((1.0 - self._beta2) * g) * g))
m_hat = (new_optim_state.m[i] / (1.0 - tf.pow(self._beta1, t)))
u_hat = (new_optim_state.u[i] / (1.0 - tf.pow(self._beta2, t)))
new_x[i] = (x[i] - ((lr * m_hat) / (tf.sqrt(u_hat) + self._epsilon)))
return (new_x, new_optim_state)
def minimize(self, loss, x, optim_state):
grads = tf.gradients(loss, x, colocate_gradients_with_ops=self._colocate_gradients_with_ops)
return self._apply_gradients(grads, x, optim_state) |
_methods
class Model(HPOMixin, tf.keras.Model):
def __init__(self, **kwargs):
super().__init__()
self.model_class = tf.keras.Model
self.kwargs = kwargs
self.lazyinputs_ = kwargs.get('inputs', None)
self.lazyoutputs_ = kwargs.get('outputs', None)
def _model_init_args(self, trial):
(in_tensors, out_tensors) = CallCache.execute(self.lazyinputs_, self.lazyoutputs_, trial, self.backend)
self.kwargs['inputs'] = in_tensors
self.kwargs['outputs'] = out_tensors
return self.kwargs
def _get_model_init_args_func_kwargs(self):
return {'lazyinputs': self.lazyinputs_, 'lazyoutputs': self.lazyoutputs_, 'kwargs': self.kwargs, 'backend': self.backend}
def _model_init_args_func(trial, lazyinputs, lazyoutputs, kwargs, backend):
(in_tensors, out_tensors) = CallCache.execute(lazyinputs, lazyoutputs, trial, backend)
kwargs['inputs'] = in_tensors
kwargs['outputs'] = out_tensors
return kwargs |
def _get_nerf_inner(hparams: Namespace, appearance_count: int, layer_dim: int, xyz_dim: int, weight_key: str) -> nn.Module:
if (hparams.container_path is not None):
container = torch.jit.load(hparams.container_path, map_location='cpu')
if (xyz_dim == 3):
return MegaNeRF([getattr(container, 'sub_module_{}'.format(i)) for i in range(len(container.centroids))], container.centroids, hparams.boundary_margin, False, container.cluster_2d)
else:
return MegaNeRF([getattr(container, 'bg_sub_module_{}'.format(i)) for i in range(len(container.centroids))], container.centroids, hparams.boundary_margin, True, container.cluster_2d)
elif hparams.use_cascade:
nerf = Cascade(_get_single_nerf_inner(hparams, appearance_count, (layer_dim if (xyz_dim == 4) else layer_dim), xyz_dim), _get_single_nerf_inner(hparams, appearance_count, layer_dim, xyz_dim))
elif (hparams.train_mega_nerf is not None):
centroid_metadata = torch.load(hparams.train_mega_nerf, map_location='cpu')
centroids = centroid_metadata['centroids']
nerf = MegaNeRF([_get_single_nerf_inner(hparams, appearance_count, layer_dim, xyz_dim) for _ in range(len(centroids))], centroids, 1, (xyz_dim == 4), centroid_metadata['cluster_2d'], True)
else:
nerf = _get_single_nerf_inner(hparams, appearance_count, layer_dim, xyz_dim)
if (hparams.ckpt_path is not None):
state_dict = torch.load(hparams.ckpt_path, map_location='cpu')[weight_key]
consume_prefix_in_state_dict_if_present(state_dict, prefix='module.')
model_dict = nerf.state_dict()
model_dict.update(state_dict)
nerf.load_state_dict(model_dict)
return nerf |
def make_cosine_lr_schedule(init_lr, total_steps):
def schedule(step):
t = (step / total_steps)
return ((0.5 * init_lr) * (1 + jnp.cos((t * onp.pi))))
return schedule |
def reduce_tensor(tensor, n_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= n_gpus
return rt |
def eval_nmi(pred, gold):
from sklearn.metrics import normalized_mutual_info_score
res = dict()
res['nmi'] = normalized_mutual_info_score(pred, gold)
return res |
def get_chamfer_average(model_id, pre_sampled=True, cat_desc=None, **kwargs):
import os
from shapenet.core import cat_desc_to_id
manager = get_chamfer_manager(model_id, pre_sampled, **kwargs)
values = None
if os.path.isfile(manager.path):
with manager.get_saving_dataset('r') as ds:
values = np.array(tuple(ds.values()))
if ((values is None) or (len(values) == 0)):
manager.save_all()
with manager.get_saving_dataset('r') as ds:
if (cat_desc is not None):
if (not isinstance(cat_desc, (list, tuple, set))):
cat_desc = [cat_desc]
cat_id = set(cat_desc_to_id(cat_desc))
ds = ds.filter_keys((lambda key: (key[0] in cat_id)))
values = np.array(tuple(ds.values()))
return np.mean(values) |
class WideResNet(nn.Module):
def __init__(self, conv_layer, linear_layer, depth=34, num_classes=10, widen_factor=10, dropRate=0.0):
super(WideResNet, self).__init__()
nChannels = [16, (16 * widen_factor), (32 * widen_factor), (64 * widen_factor)]
assert (((depth - 4) % 6) == 0)
n = ((depth - 4) / 6)
block = BasicBlock
self.conv1 = conv_layer(3, nChannels[0], kernel_size=3, stride=1, padding=1, bias=False)
self.block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, conv_layer, 1, dropRate)
self.sub_block1 = NetworkBlock(n, nChannels[0], nChannels[1], block, conv_layer, 1, dropRate)
self.block2 = NetworkBlock(n, nChannels[1], nChannels[2], block, conv_layer, 2, dropRate)
self.block3 = NetworkBlock(n, nChannels[2], nChannels[3], block, conv_layer, 2, dropRate)
self.bn1 = nn.BatchNorm2d(nChannels[3])
self.relu = nn.ReLU(inplace=True)
self.fc = linear_layer(nChannels[3], num_classes)
self.nChannels = nChannels[3]
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = ((m.kernel_size[0] * m.kernel_size[1]) * m.out_channels)
m.weight.data.normal_(0, math.sqrt((2.0 / n)))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, linear_layer):
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.block1(out)
out = self.block2(out)
out = self.block3(out)
out = self.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view((- 1), self.nChannels)
return self.fc(out) |
def DistributedFairseqModel(args, model):
assert isinstance(model, BaseFairseqModel)
if (args.ddp_backend == 'c10d'):
ddp_class = parallel.DistributedDataParallel
init_kwargs = dict(module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=False, bucket_cap_mb=args.bucket_cap_mb)
if ('check_reduction' in inspect.getargspec(ddp_class)[0]):
init_kwargs['check_reduction'] = True
if ('find_unused_parameters' in inspect.getargspec(ddp_class)[0]):
init_kwargs['find_unused_parameters'] = args.find_unused_parameters
elif (args.ddp_backend == 'no_c10d'):
ddp_class = LegacyDistributedDataParallel
init_kwargs = dict(module=model, world_size=args.distributed_world_size, buffer_size=(2 ** 28))
else:
raise ValueError(('Unknown --ddp-backend: ' + args.ddp_backend))
class _DistributedFairseqModel(ddp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
wrapped_module = super().__getattr__('module')
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
return _DistributedFairseqModel(**init_kwargs) |
def make_position_amplitude_gaussian_proposal(model_apply: ModelApply[P], get_std_move: Callable[([PositionAmplitudeData], chex.Scalar)]) -> Callable[([P, PositionAmplitudeData, PRNGKey], Tuple[(PositionAmplitudeData, PRNGKey)])]:
def proposal_fn(params: P, data: PositionAmplitudeData, key: PRNGKey):
std_move = get_std_move(data)
(proposed_position, key) = metropolis.gaussian_proposal(data['walker_data']['position'], std_move, key)
proposed_amplitude = model_apply(params, proposed_position)
return (make_position_amplitude_data(proposed_position, proposed_amplitude, data['move_metadata']), key)
return proposal_fn |
def data_to_extract(username, args):
labels = {}
labels['title'] = 'PPI Link Prediction'
labels['x_label'] = 'Iterations'
labels['y_label'] = 'Percent'
if args.local:
param_str = 'Local'
else:
param_str = 'Global'
labels['train_metric_auc'] = (('Train_' + param_str) + '_Graph_')
labels['train_metric_ap'] = (('Train_' + param_str) + '_Graph_')
labels['test_metric_auc'] = (('Test_' + param_str) + '_Graph_')
labels['test_metric_ap'] = (('Test_' + param_str) + '_Graph_')
if (username == 'joeybose'):
labels['experiments_key'] = [[args.one_maml], [args.two_maml], [args.random_exp], [args.no_finetune], [args.finetune], [args.adamic_adar]]
if args.local:
labels['experiments_name'] = ['1-MAML', '2-MAML', 'NoFinetune', 'Finetune', 'Adamic-Adar']
else:
labels['experiments_name'] = ['1-MAML', '2-MAML', 'Random', 'NoFinetune', 'Finetune']
return labels |
def shufflenet_v2_mpncov_x2_0(pretrained=False, progress=True, **kwargs):
return _shufflenetv2_mpncov('shufflenetv2_mpncov_x2.0', pretrained, progress, [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs) |
def season_game_logs(season):
GH_TOKEN = os.getenv('GH_TOKEN', '')
g = Github(GH_TOKEN)
repo = g.get_repo('chadwickbureau/retrosheet')
gamelogs = [f.path[(f.path.rfind('/') + 1):] for f in repo.get_contents('gamelog')]
file_name = f'GL{season}.TXT'
if (file_name not in gamelogs):
raise ValueError(f'Season game logs not available for {season}')
s = get_text_file(gamelog_url.format(season))
data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"')
data.columns = gamelog_columns
return data |
def find_average_velocity(df):
df['vbar'] = three_comp_average(df['vxbar'], df['vybar'], df['vzbar'])
return df |
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = (len(loader) - 1)
with torch.no_grad():
for (batch_idx, (input, target)) in enumerate(loader):
last_batch = (batch_idx == last_idx)
if (not args.prefetcher):
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
reduce_factor = args.tta
if (reduce_factor > 1):
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0:target.size(0):reduce_factor]
loss = loss_fn(output, target)
(acc1, acc5) = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update((time.time() - end))
end = time.time()
if ((args.local_rank == 0) and (last_batch or ((batch_idx % args.log_interval) == 0))):
log_name = ('Test' + log_suffix)
_logger.info('{0}: [{1:>4d}/{2}] Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) : {top1.val:>7.4f} ({top1.avg:>7.4f}) : {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(log_name, batch_idx, last_idx, batch_time=batch_time_m, loss=losses_m, top1=top1_m, top5=top5_m))
metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)])
return metrics |
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
try:
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
iterable = iter(obj)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, obj) |
class BaseEncoder(nn.Module):
def __init__(self):
super(BaseEncoder, self).__init__()
def forward(self, inputs, inputs_mask, **kargs):
raise NotImplementedError
def inference(self, inputs, inputs_mask, cache=None, **kargs):
raise NotImplementedError |
class IBNConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False, use_ibn=False, activate=True):
super(IBNConvBlock, self).__init__()
self.activate = activate
self.use_ibn = use_ibn
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
if self.use_ibn:
self.ibn = IBN(channels=out_channels)
else:
self.bn = nn.BatchNorm2d(num_features=out_channels)
if self.activate:
self.activ = nn.ReLU(inplace=True)
def forward(self, x):
x = self.conv(x)
if self.use_ibn:
x = self.ibn(x)
else:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x |
def _renorm(x, dim=0, inplace=False, eps=1e-12):
if (not inplace):
x = x.clone()
return x.div_(_norm_exclude_dim(x, dim, keepdim=True)) |
def plot_acc(acc, val_acc, epochs, val_epochs, save_path, plot_name):
plt.figure()
plt.plot(epochs, acc, label='training')
plt.plot(val_epochs, val_acc, label='validation')
plt.title('Training and validation acc')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.xlim(min(epochs), max(epochs))
plt.ylim(0, 100)
plt.legend(loc='upper right')
plt.savefig((save_path + '{}.png'.format(plot_name)))
plt.show() |
def data_type_dict():
return {'float16': torch.float16, 'float32': torch.float32, 'float64': torch.float64, 'uint8': torch.uint8, 'int8': torch.int8, 'int16': torch.int16, 'int32': torch.int32, 'int64': torch.int64, 'bool': torch.bool} |
class KNNSearch(torch.nn.Module):
def __init__(self, metric='L2', ignore_query_point=False, return_distances=False, index_dtype=torch.int32, **kwargs):
self.metric = metric
self.ignore_query_point = ignore_query_point
self.return_distances = return_distances
assert (index_dtype in [torch.int32, torch.int64])
self.index_dtype = index_dtype
super().__init__()
def forward(self, points, queries, k, points_row_splits=None, queries_row_splits=None):
if (points_row_splits is None):
points_row_splits = torch.LongTensor([0, points.shape[0]])
if (queries_row_splits is None):
queries_row_splits = torch.LongTensor([0, queries.shape[0]])
result = ops.knn_search(ignore_query_point=self.ignore_query_point, return_distances=self.return_distances, metric=self.metric, points=points, queries=queries, k=k, points_row_splits=points_row_splits, queries_row_splits=queries_row_splits, index_dtype=self.index_dtype)
return result |
def pyramidnet110_a270_cifar100(num_classes=100, **kwargs):
return get_pyramidnet_cifar(num_classes=num_classes, blocks=110, alpha=270, bottleneck=False, model_name='pyramidnet110_a270_cifar100', **kwargs) |
class DebertaTokenizeTransform(TokenizeTransform):
def __init__(self, train):
super().__init__(tokenizer=DebertaV2Tokenizer.from_pretrained('microsoft/deberta-v3-base'))
del train |
def make_mirror(src_module, dst_module):
def setattr_recursive(mod, key, value):
(key, *next_key) = key.split('.', maxsplit=1)
if (not next_key):
setattr(mod, key, value)
else:
setattr_recursive(getattr(mod, key), next_key[0], value)
for (name, param) in src_module.named_parameters():
setattr_recursive(dst_module, name, param) |
def _add_file_handler(logger, path, level='INFO'):
for h in logger.handlers:
if isinstance(h, logging.FileHandler):
if (os.path.abspath(path) == h.baseFilename):
return
if os.path.exists(path):
assert os.path.isfile(path)
warnings.warn('log already exists in {}'.format(path))
dirname = os.path.abspath(os.path.dirname(path))
os.makedirs(dirname, exist_ok=True)
file_handler = logging.FileHandler(path, mode='a')
file_handler.setLevel(_get_level(level))
file_formatter = logging.Formatter(fmt='%(asctime)s - %(module)s - [%(levelname)s] - %(message)s', datefmt='%Y/%m/%d %H:%M:%S')
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler) |
class SampleCountingLoader():
def __init__(self, loader):
self.loader = loader
def __iter__(self):
it = iter(self.loader)
storage = get_event_storage()
while True:
try:
batch = next(it)
num_inst_per_dataset = {}
for data in batch:
dataset_name = data['dataset']
if (dataset_name not in num_inst_per_dataset):
num_inst_per_dataset[dataset_name] = 0
num_inst = len(data['instances'])
num_inst_per_dataset[dataset_name] += num_inst
for dataset_name in num_inst_per_dataset:
storage.put_scalar(f'batch/{dataset_name}', num_inst_per_dataset[dataset_name])
(yield batch)
except StopIteration:
break |
def test_run_emits_events_if_successful(run):
run()
observer = run.observers[0]
assert observer.started_event.called
assert observer.heartbeat_event.called
assert observer.completed_event.called
assert (not observer.interrupted_event.called)
assert (not observer.failed_event.called) |
def torch_nn_conv1d(self, input):
l_in = input.shape[(- 1)]
shape = None
padding = self.padding
if (padding == 'valid'):
padding = (0, 0)
if (padding == 'same'):
shape = list(input.shape)
if (shape is None):
shape = list(input.shape)
l_out = math.floor((((((l_in + (2 * padding[0])) - (self.dilation[0] * (self.kernel_size[0] - 1))) - 1) / self.stride[0]) + 1))
shape[(- 1)] = l_out
shape[(- 2)] = self.out_channels
return torch.empty(shape, device=_DEVICE) |
def create_model(model_type: str, deterministic: bool, enc_blocks: int, conv_type: str, dataset_type: str, decoder: str, r: float, temperature: float, *args: Any, **kwargs: Any):
if (dataset_type in ['mnist', 'bdp', 'pbt']):
if (model_type == 'euclidean'):
return FeedForwardVAE(*args, **kwargs)
else:
return FeedForwardHyperboloidVAE(*args, **kwargs)
elif (dataset_type in ['cora', 'pubmed', 'ppi', 'disease_lp', 'lobster', 'grid', 'prufer', 'csphd', 'phylo', 'diseases', 'wordnet-noun', 'wordnet-mammal']):
if (model_type == 'euclidean'):
return VGAE(Encoder(deterministic, enc_blocks, conv_type, *args, **kwargs), decoder, r, temperature)
else:
return HyperboloidVGAE(HyperboloidEncoder(deterministic, enc_blocks, conv_type, *args, **kwargs), decoder, r, temperature)
else:
raise ValueError(f"Unknown dataset type: '{dataset_type}'.") |
def get_input_data(input_file, seq_length, batch_size, num_labels):
def parser(record):
name_to_features = {'input_ids': tf.FixedLenFeature([seq_length], tf.int64), 'input_mask': tf.FixedLenFeature([seq_length], tf.int64), 'segment_ids': tf.FixedLenFeature([seq_length], tf.int64), 'label_ids': tf.FixedLenFeature([num_labels], tf.int64)}
example = tf.parse_single_example(record, features=name_to_features)
input_ids = example['input_ids']
input_mask = example['input_mask']
segment_ids = example['segment_ids']
labels = example['label_ids']
return (input_ids, input_mask, segment_ids, labels)
dataset = tf.data.TFRecordDataset(input_file)
dataset = dataset.map(parser).repeat().batch(batch_size).shuffle(buffer_size=1000)
iterator = dataset.make_one_shot_iterator()
(input_ids, input_mask, segment_ids, labels) = iterator.get_next()
return (input_ids, input_mask, segment_ids, labels) |
class SpatialEncoder(nn.Module):
def __init__(self, backbone='resnet18', pretrained=True, num_layers=3, index_interp='bilinear', index_padding='zeros', upsample_interp='bilinear', feature_scale=1.0, use_first_pool=True, norm_type='batch'):
super().__init__()
if (norm_type != 'batch'):
assert (not pretrained)
self.use_custom_resnet = (backbone == 'custom')
self.feature_scale = feature_scale
self.use_first_pool = use_first_pool
norm_layer = get_norm_layer(norm_type)
pretrained = cfg.pretrained
print('Using torchvision', backbone, 'encoder')
print(('Pretrained: ' + str(pretrained)))
self.model = getattr(torchvision.models, backbone)(pretrained=pretrained, norm_layer=norm_layer)
self.model.fc = nn.Sequential()
self.model.avgpool = nn.Sequential()
self.latent_size = [0, 64, 128, 256, 512, 1024][num_layers]
if (cfg.img_feat_size == 128):
num_layers = 2
elif (cfg.img_feat_size == 256):
num_layers = 3
elif (cfg.img_feat_size == 512):
num_layers = 4
self.reduction_layer = nn.Conv2d(cfg.img_feat_size, cfg.embed_size, 1)
self.num_layers = num_layers
self.index_interp = index_interp
self.index_padding = index_padding
self.upsample_interp = upsample_interp
def forward(self, x):
if (self.feature_scale != 1.0):
x = F.interpolate(x, scale_factor=self.feature_scale, mode=('bilinear' if (self.feature_scale > 1.0) else 'area'), align_corners=(True if (self.feature_scale > 1.0) else None), recompute_scale_factor=True)
pixel_feat_map = None
holder_feat_map = None
if self.use_custom_resnet:
self.latent = self.model(x)
else:
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
latents = [x]
if (self.num_layers > 1):
if self.use_first_pool:
x = self.model.maxpool(x)
x = self.model.layer1(x)
latents.append(x)
if (self.num_layers > 2):
x = self.model.layer2(x)
latents.append(x)
if (self.num_layers > 3):
x = self.model.layer3(x)
latents.append(x)
if (self.num_layers > 4):
x = self.model.layer4(x)
latents.append(x)
align_corners = (None if (self.index_interp == 'nearest ') else True)
latent_sz = latents[0].shape[(- 2):]
for i in range(len(latents)):
latents[i] = F.interpolate(latents[i], latent_sz, mode=self.upsample_interp, align_corners=align_corners)
pixel_feat_map = torch.cat(latents, dim=1)
pixel_feat_scale = np.array([pixel_feat_map.shape[(- 1)], pixel_feat_map.shape[(- 2)]])
pixel_feat_scale = ((pixel_feat_scale / (pixel_feat_scale - 1)) * 2.0)
holder_feat_map = self.reduction_layer(pixel_feat_map)
holder_feat_scale = np.array([holder_feat_map.shape[(- 1)], holder_feat_map.shape[(- 2)]])
holder_feat_scale = ((holder_feat_scale / (holder_feat_scale - 1)) * 2.0)
return (holder_feat_map, holder_feat_scale, pixel_feat_map, pixel_feat_scale)
def from_conf(cls, conf):
return cls(conf.get_string('backbone'), pretrained=conf.get_bool('pretrained', True), num_layers=conf.get_int('num_layers', 4), index_interp=conf.get_string('index_interp', 'bilinear'), index_padding=conf.get_string('index_padding', 'border'), upsample_interp=conf.get_string('upsample_interp', 'bilinear'), feature_scale=conf.get_float('feature_scale', 1.0), use_first_pool=conf.get_bool('use_first_pool', True)) |
def proxylessnas_cpu(**kwargs):
return get_proxylessnas(version='cpu', model_name='proxylessnas_cpu', **kwargs) |
def get_site():
m = re.search('([^.]+)\\.brainpp\\.cn$', socket.getfqdn())
if m:
return m.group(1) |
def GetService(name, config_class):
service = None
while (service is None):
rospy.wait_for_service(name)
service = rospy.ServiceProxy(name, config_class)
return service |
def board8x8() -> Board:
board = jnp.array([[2, 1, 1, 0, 2, 1, 1, 0], [2, 3, 1, 2, 2, 1, 1, 0], [2, 3, 1, 2, 2, 1, 1, 0], [2, 3, 0, 0, 2, 1, 1, 0], [1, 1, 0, 0, 0, 1, 1, 2], [0, 1, 1, 2, 2, 3, 0, 0], [1, 0, 0, 2, 1, 2, 3, 5], [2, 0, 0, 0, 2, 1, 1, 5]])
return board |
def inception_v2(inputs, num_classes=1000, is_training=True, dropout_keep_prob=0.8, min_depth=16, depth_multiplier=1.0, prediction_fn=slim.softmax, spatial_squeeze=True, reuse=None, scope='InceptionV2'):
if (depth_multiplier <= 0):
raise ValueError('depth_multiplier is not greater than zero.')
with tf.variable_scope(scope, 'InceptionV2', [inputs, num_classes], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):
(net, end_points) = inception_v2_base(inputs, scope=scope, min_depth=min_depth, depth_multiplier=depth_multiplier)
with tf.variable_scope('Logits'):
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID', scope='AvgPool_1a_{}x{}'.format(*kernel_size))
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None, normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return (logits, end_points) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.