code stringlengths 101 5.91M |
|---|
def get_shard_range(tot, nshard, rank):
assert ((rank < nshard) and (rank >= 0)), f'invaid rank/nshard {rank}/{nshard}'
start = round(((tot / nshard) * rank))
end = round(((tot / nshard) * (rank + 1)))
assert (start < end), f'start={start}, end={end}'
logger.info(f'rank {rank} of {nshard}, process {(end - start)} ({start}-{end}) out of {tot}')
return (start, end) |
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), (- 1))
(s, _) = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = (s + (tensor_flatten - s).exp().sum(dim=2, keepdim=True).log())
return outputs |
class ResNetBottleNeckLayer(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int=1, activation: str='relu', reduction: int=4):
super().__init__()
should_apply_shortcut = ((in_channels != out_channels) or (stride != 1))
reduces_channels = (out_channels // reduction)
self.shortcut = (ResNetShortCut(in_channels, out_channels, stride=stride) if should_apply_shortcut else nn.Identity())
self.layer = nn.Sequential(ResNetConvLayer(in_channels, reduces_channels, kernel_size=1), ResNetConvLayer(reduces_channels, reduces_channels, stride=stride), ResNetConvLayer(reduces_channels, out_channels, kernel_size=1, activation=None))
self.activation = ACT2FN[activation]
def forward(self, hidden_state):
residual = hidden_state
hidden_state = self.layer(hidden_state)
residual = self.shortcut(residual)
hidden_state += residual
hidden_state = self.activation(hidden_state)
return hidden_state |
def bidirectional_rnn(cell_fw, cell_bw, inputs, initial_state_fw=None, initial_state_bw=None, dtype=None, sequence_length=None, scope=None):
if (not isinstance(cell_fw, BaseCell)):
raise TypeError('cell_fw must be an instance of RNNCell')
if (not isinstance(cell_bw, BaseCell)):
raise TypeError('cell_bw must be an instance of RNNCell')
if (not isinstance(inputs, list)):
raise TypeError('inputs must be a list')
if (not inputs):
raise ValueError('inputs must not be empty')
name = (scope or 'BiRNN')
with vs.variable_scope((name + '_FW')) as fw_scope:
(output_fw, output_state_fw) = rnn(cell_fw, inputs, initial_state_fw, dtype, sequence_length, scope=fw_scope)
with vs.variable_scope((name + '_BW')) as bw_scope:
(tmp, output_state_bw) = rnn(cell_bw, _reverse_seq(inputs, sequence_length), initial_state_bw, dtype, sequence_length, scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
outputs = [array_ops.concat(1, [fw, bw]) for (fw, bw) in zip(output_fw, output_bw)]
return (outputs, output_state_fw, output_state_bw) |
class _BasePRank(BaseEstimator):
def score(self, X, y):
y_pred = self.predict(X)
return np.mean(np.abs((y - y_pred)))
def classes_(self):
return self._label_encoder.classes_ |
def test_BBPSSW_phi_plus_psi_plus():
counter = 0
for i in range(100):
(tl, kept1, kept2, meas1, meas2, ep1, ep2) = create_scenario(phi_plus, psi_plus, i)
assert (kept1.entangled_memory == kept2.entangled_memory == {'node_id': None, 'memo_id': None})
assert (ep1.meas_res != ep2.meas_res)
ket1 = tl.quantum_manager.get(kept1.qstate_key)
ket2 = tl.quantum_manager.get(kept2.qstate_key)
assert (id(ket1) != id(ket2))
assert (len(ket1.keys) == len(ket2.keys) == 1)
if (ep1.meas_res == 0):
counter += 1
assert (abs((counter - 50)) < 10) |
def unscaled_dropout(inputs, keep_prob, noise_shape=None):
if isinstance(noise_shape, (tuple, list)):
noise_shape = tf.stack(noise_shape)
return (tf.nn.dropout(inputs, keep_prob=keep_prob, noise_shape=noise_shape) * keep_prob) |
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, use_se=True, aa_layer=None):
super(BasicBlock, self).__init__()
if (stride == 1):
self.conv1 = conv2d_iabn(inplanes, planes, stride=1, act_param=0.001)
elif (aa_layer is None):
self.conv1 = conv2d_iabn(inplanes, planes, stride=2, act_param=0.001)
else:
self.conv1 = nn.Sequential(conv2d_iabn(inplanes, planes, stride=1, act_param=0.001), aa_layer(channels=planes, filt_size=3, stride=2))
self.conv2 = conv2d_iabn(planes, planes, stride=1, act_layer='identity')
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
reduce_layer_planes = max(((planes * self.expansion) // 4), 64)
self.se = (FastSEModule((planes * self.expansion), reduce_layer_planes) if use_se else None)
def forward(self, x):
if (self.downsample is not None):
residual = self.downsample(x)
else:
residual = x
out = self.conv1(x)
out = self.conv2(out)
if (self.se is not None):
out = self.se(out)
out += residual
out = self.relu(out)
return out |
class DiscriminatorMLP(nn.Module):
def __init__(self, input_dim):
super(DiscriminatorMLP, self).__init__()
self.model = nn.Sequential(nn.Linear((args.n_timesteps * input_dim), 1024), nn.LeakyReLU(0.2, inplace=True), nn.Linear(1024, 512), nn.LeakyReLU(0.2, inplace=True), nn.Linear(512, 256), nn.LeakyReLU(0.2, inplace=True), nn.Linear(256, 1))
def forward(self, x):
batch_size = x.shape[0]
return self.model(x.view(batch_size, (- 1))) |
class SawyerDoorCloseEnvV2(SawyerDoorEnvV2):
def __init__(self):
goal_low = (0.2, 0.65, 0.1499)
goal_high = (0.3, 0.75, 0.1501)
super().__init__()
self.init_config = {'obj_init_angle': 0.3, 'obj_init_pos': np.array([0.1, 0.95, 0.15], dtype=np.float32), 'hand_init_pos': np.array([0, 0.6, 0.2], dtype=np.float32), 'door_init_pos': 1.3}
self.goal = np.array([0.2, 0.8, 0.15])
self.obj_init_pos = self.init_config['obj_init_pos']
self.obj_init_angle = self.init_config['obj_init_angle']
self.hand_init_pos = self.init_config['hand_init_pos']
self.goal_space = Box(np.array(goal_low), np.array(goal_high))
def reset_model(self):
self._reset_hand()
self._target_pos = self.goal.copy()
self.objHeight = self.data.get_geom_xpos('handle')[2]
if self.random_init:
obj_pos = self._get_state_rand_vec()
self.obj_init_pos = obj_pos
goal_pos = (obj_pos.copy() + np.array([0.2, (- 0.2), 0.0]))
self._target_pos = goal_pos
self.sim.model.body_pos[self.model.body_name2id('door')] = self.obj_init_pos
self.sim.model.site_pos[self.model.site_name2id('goal')] = self._target_pos
self._set_obj_xyz(self.init_config['door_init_pos'])
self.maxPullDist = np.linalg.norm((self.data.get_geom_xpos('handle')[:(- 1)] - self._target_pos[:(- 1)]))
self.target_reward = ((1000 * self.maxPullDist) + (1000 * 2))
return self._get_obs()
def compute_reward(self, actions, obs):
del actions
objPos = obs[3:6]
(rightFinger, leftFinger) = (self._get_site_pos('rightEndEffector'), self._get_site_pos('leftEndEffector'))
fingerCOM = ((rightFinger + leftFinger) / 2)
pullGoal = self._target_pos
pullDist = np.linalg.norm((objPos[:(- 1)] - pullGoal[:(- 1)]))
reachDist = np.linalg.norm((objPos - fingerCOM))
reachRew = (- reachDist)
self.reachCompleted = (reachDist < 0.05)
def pullReward():
c1 = 1000
c2 = 0.01
c3 = 0.001
if self.reachCompleted:
pullRew = ((1000 * (self.maxPullDist - pullDist)) + (c1 * (np.exp(((- (pullDist ** 2)) / c2)) + np.exp(((- (pullDist ** 2)) / c3)))))
pullRew = max(pullRew, 0)
return pullRew
else:
return 0
pullRew = pullReward()
reward = (reachRew + pullRew)
return [reward, reachDist, pullDist] |
.parametrize('context, action, reward, pscore, description', invalid_input_of_nn_policy_learner_fit)
def test_nn_policy_learner_fit_using_invalid_inputs(context, action, reward, pscore, description):
with pytest.raises(ValueError, match=f'{description}*'):
dim_context = 2
pg_method = 'dpg'
learner = ContinuousNNPolicyLearner(dim_context=dim_context, pg_method=pg_method)
learner.fit(context=context, action=action, reward=reward, pscore=pscore) |
class Swish_SENet(nn.Module):
def __init__(self, block, num_blocks, num_classes=100):
super(Swish_SENet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512, num_classes)
self.swish = Swish()
def _make_layer(self, block, planes, num_blocks, stride):
strides = ([stride] + ([1] * (num_blocks - 1)))
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.swish(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def have_compatible_glibc(required_major, minimum_minor):
version_str = glibc_version_string()
if (version_str is None):
return False
return check_glibc_version(version_str, required_major, minimum_minor) |
class A083216(RecurrenceSequence2):
def __init__(self):
SloaneSequence.__init__(self, offset=0)
self._b = []
self._params = (, , 1, 1)
self._precompute(2)
def _repr_(self):
return 'Second-order linear recurrence sequence with a(n) = a(n-1) + a(n-2).' |
def plot_tensor(tensor):
plt.style.use('default')
(fig, ax) = plt.subplots(figsize=(12, 3))
im = ax.imshow(tensor, aspect='auto', origin='lower', interpolation='none')
plt.colorbar(im, ax=ax)
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data |
.parametrize('cv', [5, 'split'])
def test_check_cv_same_split_no_random_state(cv: BaseCrossValidator) -> None:
cv = check_cv(cv, random_state=None)
(train_indices_1, train_indices_2) = ([], [])
for (train_index, _) in cv.split(X):
train_indices_1.append(train_index)
for (train_index, _) in cv.split(X):
train_indices_2.append(train_index)
for i in range(cv.get_n_splits()):
np.testing.assert_allclose(train_indices_1[i], train_indices_2[i]) |
class Speaker(nn.Module):
def __init__(self, speaker_dim=20):
super().__init__()
self.embeds = nn.Sequential(nn.Embedding(3, speaker_dim, padding_idx=0), nn.Dropout(0.2))
def forward(self, speaker_labels):
return self.embeds(to_cuda(torch.tensor(speaker_labels))) |
def binary_eps_search(eps_lower_bound, eps_upper_bound, bab_function, quantization=0.001, mode='LB'):
assert (mode in ['LB', 'UB'])
print(f'Starting epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}')
while ((eps_upper_bound - eps_lower_bound) > quantization):
c_epsilon = ((eps_upper_bound + eps_lower_bound) / 2)
(bab_status, bab_runtime) = bab_function(c_epsilon)
print(f'BaB status {bab_status}, BaB runtime {bab_runtime}')
conditions = (['True'] if (mode == 'UB') else ['True', 'timeout'])
if (bab_status in conditions):
eps_upper_bound = c_epsilon
else:
eps_lower_bound = c_epsilon
print(f'Current epsilon bounds: LB: {eps_lower_bound}, UB: {eps_upper_bound}')
return_value = ((math.floor((eps_lower_bound / quantization)) * quantization) if (mode == 'UB') else (math.ceil((eps_upper_bound / quantization)) * quantization))
return return_value |
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--task_name', default=None, type=str, required=True, help='The name of the task to train.')
parser.add_argument('--cache_dir', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--data_label', default='', type=str, help='Where do you want to store the pre-trained models downloaded from s3')
parser.add_argument('--max_seq_length', default=128, type=int, help='The maximum total input sequence length after WordPiece tokenization. \nSequences longer than this will be truncated, and sequences shorter \nthan this will be padded.')
parser.add_argument('--do_train', action='store_true', help='Whether to run training.')
parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.')
parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.')
parser.add_argument('--train_batch_size', default=16, type=int, help='Total batch size for training.')
parser.add_argument('--eval_batch_size', default=64, type=int, help='Total batch size for eval.')
parser.add_argument('--learning_rate', default=1e-05, type=float, help='The initial learning rate for Adam.')
parser.add_argument('--num_train_epochs', default=3.0, type=float, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% of training.')
parser.add_argument('--no_cuda', action='store_true', help='Whether not to use CUDA when available')
parser.add_argument('--local_rank', type=int, default=(- 1), help='local_rank for distributed training on gpus')
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.')
parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit float precision instead of 32-bit')
parser.add_argument('--loss_scale', type=float, default=0, help='Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n0 (default value): dynamic loss scaling.\nPositive power of 2: static loss scaling value.\n')
parser.add_argument('--server_ip', type=str, default='', help='Can be used for distant debugging.')
parser.add_argument('--server_port', type=str, default='', help='Can be used for distant debugging.')
args = parser.parse_args()
processors = {'rte': RteProcessor}
output_modes = {'rte': 'classification'}
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info('device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}'.format(device, n_gpu, bool((args.local_rank != (- 1))), args.fp16))
if (args.gradient_accumulation_steps < 1):
raise ValueError('Invalid gradient_accumulation_steps parameter: {}, should be >= 1'.format(args.gradient_accumulation_steps))
args.train_batch_size = (args.train_batch_size // args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if (n_gpu > 0):
torch.cuda.manual_seed_all(args.seed)
if ((not args.do_train) and (not args.do_eval)):
raise ValueError('At least one of `do_train` or `do_eval` must be True.')
task_name = args.task_name.lower()
if (task_name not in processors):
raise ValueError(('Task not found: %s' % task_name))
processor = processors[task_name]()
output_mode = output_modes[task_name]
print('args.data_label:', args.data_label)
(threeway_train_examples, threeway_dev_examples) = processor.get_MNLI_train_and_dev('/export/home/Dataset/glue_data/MNLI/train.tsv', ['/export/home/Dataset/glue_data/MNLI/dev_mismatched.tsv', '/export/home/Dataset/glue_data/MNLI/dev_matched.tsv'])
train_examples = []
for ex in threeway_train_examples:
if ((ex.label == 'neutral') or (ex.label == 'contradiction')):
ex.label = 'not_entailment'
train_examples.append(ex)
label_list = ['entailment', 'not_entailment']
num_labels = len(label_list)
print('num_labels:', num_labels, 'training size:', len(train_examples))
num_train_optimization_steps = None
num_train_optimization_steps = (int(((len(train_examples) / args.train_batch_size) / args.gradient_accumulation_steps)) * args.num_train_epochs)
if (args.local_rank != (- 1)):
num_train_optimization_steps = (num_train_optimization_steps // torch.distributed.get_world_size())
model = RobertaForSequenceClassification(num_labels)
tokenizer = RobertaTokenizer.from_pretrained(pretrain_model_dir, do_lower_case=args.do_lower_case)
model.to(device)
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in param_optimizer if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in param_optimizer if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
max_test_acc = 0.0
max_dev_acc = 0.0
if args.do_train:
train_features = convert_examples_to_features(train_examples, label_list, args.max_seq_length, tokenizer, output_mode, cls_token_at_end=False, cls_token=tokenizer.cls_token, cls_token_segment_id=0, sep_token=tokenizer.sep_token, sep_token_extra=True, pad_on_left=False, pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0], pad_token_segment_id=0)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', len(train_examples))
logger.info(' Batch size = %d', args.train_batch_size)
logger.info(' Num steps = %d', num_train_optimization_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
iter_co = 0
final_test_performance = 0.0
for epoch_i in trange(int(args.num_train_epochs), desc='Epoch'):
tr_loss = 0
(nb_tr_examples, nb_tr_steps) = (0, 0)
for (step, batch) in enumerate(tqdm(train_dataloader, desc='Iteration')):
model.train()
batch = tuple((t.to(device) for t in batch))
(input_ids, input_mask, segment_ids, label_ids) = batch
logits = model(input_ids, input_mask)
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view((- 1), num_labels), label_ids.view((- 1)))
if (n_gpu > 1):
loss = loss.mean()
if (args.gradient_accumulation_steps > 1):
loss = (loss / args.gradient_accumulation_steps)
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
optimizer.step()
optimizer.zero_grad()
global_step += 1
iter_co += 1
'\n start evaluate on dev set after this epoch\n '
model.eval()
model_to_save = (model.module if hasattr(model, 'module') else model)
store_transformers_models(model_to_save, tokenizer, '/export/home/Dataset/BERT_pretrained_mine/paragraph_entail/2021', (('MNLI_binary_epoch_' + str(epoch_i)) + '.pt')) |
class TokenizerSettings():
query_token_id: str = DefaultVal('[unused0]')
doc_token_id: str = DefaultVal('[unused1]')
query_token: str = DefaultVal('[Q]')
doc_token: str = DefaultVal('[D]') |
def add_arguments_lipschitz(parser):
parser.add_argument('--lip', action='store_true', help='1-lipschitz network')
parser.add_argument('--global-lip', action='store_true') |
class BiFpn():
def __init__(self, config, feature_info, name):
self.num_levels = config.num_levels
norm_layer = (config.norm_layer or tf.keras.layers.BatchNormalization)
norm_kwargs = {**config.norm_kwargs}
norm_kwargs['epsilon'] = norm_kwargs.pop('eps', 0.001)
if config.norm_kwargs:
norm_layer = partial(norm_layer, **norm_kwargs)
act_layer = (get_act_layer(config.act_type) or _ACT_LAYER)
fpn_config = (config.fpn_config or get_fpn_config(config.fpn_name, min_level=config.min_level, max_level=config.max_level))
feat_sizes = get_feat_sizes(config.image_size, max_level=config.max_level)
prev_feat_size = feat_sizes[config.min_level]
self.resample = []
for level in range(config.num_levels):
feat_size = feat_sizes[(level + config.min_level)]
if (level < len(feature_info)):
in_chs = feature_info[level]['num_chs']
feature_info[level]['size'] = feat_size
else:
self.resample.append(ResampleFeatureMap(in_channels=in_chs, out_channels=config.fpn_channels, input_size=prev_feat_size, output_size=feat_size, pad_type=config.pad_type, downsample=config.downsample_type, upsample=config.upsample_type, norm_layer=norm_layer, apply_bn=config.apply_resample_bn, redundant_bias=config.redundant_bias, name=(name + f'/resample/{level}')))
in_chs = config.fpn_channels
feature_info.append(dict(num_chs=in_chs, size=feat_size))
prev_feat_size = feat_size
self.cell = []
for rep in range(config.fpn_cell_repeats):
logging.debug('building cell {}'.format(rep))
fpn_layer = BiFpnLayer(feature_info=feature_info, feat_sizes=feat_sizes, fpn_config=fpn_config, fpn_channels=config.fpn_channels, num_levels=config.num_levels, pad_type=config.pad_type, downsample=config.downsample_type, upsample=config.upsample_type, norm_layer=norm_layer, act_layer=act_layer, separable_conv=config.separable_conv, apply_resample_bn=config.apply_resample_bn, pre_act=(not config.conv_bn_relu_pattern), redundant_bias=config.redundant_bias, name=(name + f'/cell/{rep}'))
self.cell.append(fpn_layer)
feature_info = fpn_layer.feature_info
def __call__(self, x: List[tf.Tensor]):
for resample in self.resample:
x.append(resample(x[(- 1)]))
for _cell in self.cell:
x = _cell(x)
return x |
class SKUp(nn.Module):
def __init__(self, kernel_size, padding, bias, reduction, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d((in_channels // 2), (in_channels // 2), kernel_size=2, stride=2)
self.conv = SelectiveConv(kernel_size, padding, bias, reduction, in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = torch.tensor([(x2.size()[2] - x1.size()[2])])
diffX = torch.tensor([(x2.size()[3] - x1.size()[3])])
x1 = F.pad(x1, [(diffX // 2), (diffX - (diffX // 2)), (diffY // 2), (diffY - (diffY // 2))])
x = torch.cat([x2, x1], dim=1)
return self.conv(x) |
def test_recall_macro_2d_list():
y_true = [[1, 2], [1, 2]]
y_pred = [[1, 5, 6, 7], [1, 2, 3, 4]]
assert (0.75 == recall(y_true, y_pred, 'macro'))
assert (0.375 == recall(y_pred, y_true, 'macro')) |
def getcolumns(stream):
pipe = Pipeline()
pipe.append(ColumnsSelect())
return pipe(stream) |
class _Root():
def parent(n=1):
return _root_fb.root.parent(n)
def _loop_range():
return _root_fb.root._loop_range()
def _get_children():
return _root_fb.root._get_children()
def deactivate_all():
warning("'ti.root.deactivate_all()' would deactivate all finalized snodes.")
deactivate_all_snodes()
def shape(self):
return _root_fb.root.shape
def _id(self):
return _root_fb.root._id
def __getattr__(self, item):
return getattr(_root_fb, item)
def __repr__(self):
return 'ti.root' |
class DistributedGivenIterationSampler(Sampler):
def __init__(self, dataset, total_iter, batch_size, world_size=None, rank=None, last_iter=(- 1)):
if (world_size is None):
world_size = dist.get_world_size()
if (rank is None):
rank = dist.get_rank()
assert (rank < world_size)
self.dataset = dataset
self.total_iter = total_iter
self.batch_size = batch_size
self.world_size = world_size
self.rank = rank
self.last_iter = last_iter
self.total_size = (self.total_iter * self.batch_size)
self.indices = self.gen_new_list()
self.call = 0
def __iter__(self):
if (self.call == 0):
self.call = 1
return iter(self.indices[((self.last_iter + 1) * self.batch_size):])
else:
raise RuntimeError('this sampler is not designed to be called more than once!!')
def gen_new_list(self):
np.random.seed(0)
all_size = (self.total_size * self.world_size)
indices = np.arange(len(self.dataset))
indices = indices[:all_size]
num_repeat = (((all_size - 1) // indices.shape[0]) + 1)
indices = np.tile(indices, num_repeat)
indices = indices[:all_size]
np.random.shuffle(indices)
beg = (self.total_size * self.rank)
indices = indices[beg:(beg + self.total_size)]
assert (len(indices) == self.total_size)
return indices
def __len__(self):
return self.total_size |
class DataBundle():
def __init__(self, vocabs: dict=None, datasets: dict=None):
self.vocabs = (vocabs or {})
self.datasets = (datasets or {})
def set_vocab(self, vocab, field_name):
assert isinstance(vocab, Vocabulary), 'Only fastNLP.Vocabulary supports.'
self.vocabs[field_name] = vocab
return self
def set_dataset(self, dataset, name: str):
assert isinstance(dataset, DataSet), 'Only fastNLP.DataSet supports.'
self.datasets[name] = dataset
return self
def get_dataset(self, name: str) -> DataSet:
if (name in self.datasets.keys()):
return self.datasets[name]
else:
error_msg = f'DataBundle do NOT have DataSet named {name}. It should be one of {self.datasets.keys()}.'
logger.error(error_msg)
raise KeyError(error_msg)
def delete_dataset(self, name: str):
self.datasets.pop(name, None)
return self
def get_vocab(self, field_name: str) -> Vocabulary:
if (field_name in self.vocabs.keys()):
return self.vocabs[field_name]
else:
error_msg = f'DataBundle do NOT have Vocabulary named {field_name}. It should be one of {self.vocabs.keys()}.'
logger.error(error_msg)
raise KeyError(error_msg)
def delete_vocab(self, field_name: str):
self.vocabs.pop(field_name, None)
return self
def num_dataset(self):
return len(self.datasets)
def num_vocab(self):
return len(self.vocabs)
def set_input(self, *field_names, flag=True, use_1st_ins_infer_dim_type=True, ignore_miss_dataset=True):
for field_name in field_names:
for (name, dataset) in self.datasets.items():
if ((not ignore_miss_dataset) and (not dataset.has_field(field_name))):
raise KeyError(f'Field:{field_name} was not found in DataSet:{name}')
if (not dataset.has_field(field_name)):
continue
else:
dataset.set_input(field_name, flag=flag, use_1st_ins_infer_dim_type=use_1st_ins_infer_dim_type)
return self
def set_target(self, *field_names, flag=True, use_1st_ins_infer_dim_type=True, ignore_miss_dataset=True):
for field_name in field_names:
for (name, dataset) in self.datasets.items():
if ((not ignore_miss_dataset) and (not dataset.has_field(field_name))):
raise KeyError(f'Field:{field_name} was not found in DataSet:{name}')
if (not dataset.has_field(field_name)):
continue
else:
dataset.set_target(field_name, flag=flag, use_1st_ins_infer_dim_type=use_1st_ins_infer_dim_type)
return self
def set_pad_val(self, field_name, pad_val, ignore_miss_dataset=True):
for (name, dataset) in self.datasets.items():
if dataset.has_field(field_name=field_name):
dataset.set_pad_val(field_name=field_name, pad_val=pad_val)
elif (not ignore_miss_dataset):
raise KeyError(f'{field_name} not found DataSet:{name}.')
return self
def set_ignore_type(self, *field_names, flag=True, ignore_miss_dataset=True):
for (name, dataset) in self.datasets.items():
for field_name in field_names:
if dataset.has_field(field_name=field_name):
dataset.set_ignore_type(field_name, flag=flag)
elif (not ignore_miss_dataset):
raise KeyError(f'{field_name} not found DataSet:{name}.')
return self
def copy_field(self, field_name, new_field_name, ignore_miss_dataset=True):
for (name, dataset) in self.datasets.items():
if dataset.has_field(field_name=field_name):
dataset.copy_field(field_name=field_name, new_field_name=new_field_name)
elif (not ignore_miss_dataset):
raise KeyError(f'{field_name} not found DataSet:{name}.')
return self
def rename_field(self, field_name, new_field_name, ignore_miss_dataset=True, rename_vocab=True):
for (name, dataset) in self.datasets.items():
if dataset.has_field(field_name=field_name):
dataset.rename_field(field_name=field_name, new_field_name=new_field_name)
elif (not ignore_miss_dataset):
raise KeyError(f'{field_name} not found DataSet:{name}.')
if rename_vocab:
if (field_name in self.vocabs):
self.vocabs[new_field_name] = self.vocabs.pop(field_name)
return self
def delete_field(self, field_name, ignore_miss_dataset=True, delete_vocab=True):
for (name, dataset) in self.datasets.items():
if dataset.has_field(field_name=field_name):
dataset.delete_field(field_name=field_name)
elif (not ignore_miss_dataset):
raise KeyError(f'{field_name} not found DataSet:{name}.')
if delete_vocab:
if (field_name in self.vocabs):
self.vocabs.pop(field_name)
return self
def iter_datasets(self) -> Union[(str, DataSet)]:
for (name, dataset) in self.datasets.items():
(yield (name, dataset))
def get_dataset_names(self) -> List[str]:
return list(self.datasets.keys())
def get_vocab_names(self) -> List[str]:
return list(self.vocabs.keys())
def iter_vocabs(self) -> Union[(str, Vocabulary)]:
for (field_name, vocab) in self.vocabs.items():
(yield (field_name, vocab))
def apply_field(self, func, field_name: str, new_field_name: str, ignore_miss_dataset=True, **kwargs):
for (name, dataset) in self.datasets.items():
if dataset.has_field(field_name=field_name):
dataset.apply_field(func=func, field_name=field_name, new_field_name=new_field_name, **kwargs)
elif (not ignore_miss_dataset):
raise KeyError(f'{field_name} not found DataSet:{name}.')
return self
def apply_field_more(self, func, field_name, modify_fields=True, ignore_miss_dataset=True, **kwargs):
res = {}
for (name, dataset) in self.datasets.items():
if dataset.has_field(field_name=field_name):
res[name] = dataset.apply_field_more(func=func, field_name=field_name, modify_fields=modify_fields, **kwargs)
elif (not ignore_miss_dataset):
raise KeyError(f'{field_name} not found DataSet:{name} .')
return res
def apply(self, func, new_field_name: str, **kwargs):
for (name, dataset) in self.datasets.items():
dataset.apply(func, new_field_name=new_field_name, **kwargs)
return self
def apply_more(self, func, modify_fields=True, **kwargs):
res = {}
for (name, dataset) in self.datasets.items():
res[name] = dataset.apply_more(func, modify_fields=modify_fields, **kwargs)
return res
def add_collate_fn(self, fn, name=None):
for (_, dataset) in self.datasets.items():
dataset.add_collate_fn(fn=fn, name=name)
def delete_collate_fn(self, name=None):
for (_, dataset) in self.datasets.items():
dataset.delete_collate_fn(name=name)
def __repr__(self):
_str = ''
if len(self.datasets):
_str += 'In total {} datasets:\n'.format(self.num_dataset)
for (name, dataset) in self.datasets.items():
_str += '\t{} has {} instances.\n'.format(name, len(dataset))
if len(self.vocabs):
_str += 'In total {} vocabs:\n'.format(self.num_vocab)
for (name, vocab) in self.vocabs.items():
_str += '\t{} has {} entries.\n'.format(name, len(vocab))
return _str |
def binary_loss_array(recon_x, x, z_mu, z_var, z_0, z_k, ldj, beta=1.0):
batch_size = x.size(0)
if (len(ldj.size()) > 1):
ldj = ldj.view(ldj.size(0), (- 1)).sum((- 1))
bce = (- log_bernoulli(x.view(batch_size, (- 1)), recon_x.view(batch_size, (- 1)), dim=1))
log_p_zk = log_normal_standard(z_k, dim=1)
log_q_z0 = log_normal_diag(z_0, mean=z_mu, log_var=z_var.log(), dim=1)
logs = (log_q_z0 - log_p_zk)
loss = (bce + (beta * (logs - ldj)))
return loss |
def main_canonical_360(opt):
nerf = instant_nsr.NeRFNetwork()
nerf.load_state_dict(torch.load(opt.weights_path))
(center, up) = (np.array([0.0, 0.0, 0.0]), np.array([0.0, 1.0, 0.0]))
(body_poses, _) = render_utils.default_360_path(center, up, CANONICAL_CAMERA_DIST_VAL, opt.trajectory_resolution)
head_offset = np.array([0.0, 1.0, 0.0]).astype(np.float32)
head_offset = (head_offset * CAN_HEAD_OFFSET)
(head_poses, _) = render_utils.default_360_path((center + head_offset), up, CAN_HEAD_CAMERA_DIST, opt.trajectory_resolution)
for (pose_name, render_poses) in zip(['body', 'head'], [body_poses, head_poses]):
log_extrinsics = []
log_imgs = []
log_depths = []
for (i, rp) in enumerate(render_poses):
can_cap = ResizedPinholeCapture(PinholeCamera(opt.render_w, opt.render_h, (CANONICAL_ZOOM_FACTOR * opt.render_w), (CANONICAL_ZOOM_FACTOR * opt.render_h), (opt.render_w / 2.0), (opt.render_h / 2.0)), rp, tgt_size=opt.render_size)
if (opt.implicit_model == 'instant_nsr'):
(rays_o, rays_d) = render_utils.cap2rays(can_cap)
(rays_o, rays_d) = (rays_o.cuda(), rays_d.cuda())
nerf = nerf.cuda().eval()
(out, _, extra_out) = render_utils.render_instantnsr_naive(nerf, rays_o, rays_d, opt.batch_size, requires_grad=False, bkg_key=(WHITE_BKG if opt.white_bkg else BLACK_BKG), return_torch=True, perturb=False, return_raw=True, render_can=True)
out = out.detach().cpu().numpy()
out = einops.repeat(out, '(h w) c -> h w c', h=opt.render_h, w=opt.render_w)
if opt.log_extra:
depth = extra_out['depth'].detach().cpu().numpy()
depth = einops.repeat(depth, '(h w) 1 -> h w 1', h=opt.render_h, w=opt.render_w)
mask = (depth < 0.4)
depth[mask] = 0.45
depth = ((depth - depth.min()) / (depth.max() - depth.min()))
depth = (depth * 255)
depth = depth.astype(np.uint8)
depth = cv2.applyColorMap(depth, cv2.COLORMAP_JET)
depth[mask.repeat(3, axis=2)] = 0
log_depths.append(depth)
save_path_depth = os.path.join('./demo', 'canonical_360', opt.exp_name, f'{opt.exp_name}_{pose_name}_can_{str(i).zfill(4)}_depth.png')
if (not os.path.isdir(os.path.dirname(save_path_depth))):
os.makedirs(os.path.dirname(save_path_depth))
imageio.imsave(save_path_depth, depth)
log_extrinsics.append(can_cap.cam_pose.camera_to_world)
save_path = os.path.join('./demo', 'canonical_360', opt.exp_name, f'{opt.exp_name}_{pose_name}_can_{str(i).zfill(4)}.png')
if (not os.path.isdir(os.path.dirname(save_path))):
os.makedirs(os.path.dirname(save_path))
out = utils.integerify_img(out)
log_imgs.append(out)
imageio.imsave(save_path, out)
print(f'image saved: {save_path}')
imageio.mimsave(os.path.join('./demo', f'canonical_360', opt.exp_name, f'{opt.exp_name}_{pose_name}_can.gif'), log_imgs, fps=15)
print(f"gif saved: {os.path.join('./demo', 'canonical_360', opt.exp_name, f'{opt.exp_name}_{pose_name}_can.gif')}")
if opt.log_extra:
intrisic_path = os.path.join('./demo', 'canonical_360', opt.exp_name, f'{opt.exp_name}_{pose_name}_intrinsic.pkl')
with open(intrisic_path, 'wb') as f:
pickle.dump(can_cap.intrinsic_matrix, f)
extrinsic_path = os.path.join('./demo', 'canonical_360', opt.exp_name, f'{opt.exp_name}_{pose_name}_extrinsic.pkl')
all_extrinsics = np.stack(log_extrinsics, axis=0)
with open(extrinsic_path, 'wb') as f:
pickle.dump(all_extrinsics, f) |
def indent_level(code, level=0):
rtn_code = ''
for i in range(level):
rtn_code += ' '
rtn_code += code
return rtn_code |
def build_model(input_shape, n_cl_out=1, use_upsampling=False, dropout=0.2, print_summary=True, seed=816, depth=5, dropout_at=(2, 3), initial_filters=16, batch_norm=True, **kwargs):
if ((input_shape[0] % (2 ** depth)) > 0):
raise ValueError(f'Crop dimension must be a multiple of 2^(depth of U-Net) = {(2 ** depth)}')
inputs = tf.keras.layers.Input(input_shape, name='brats_mr_image')
activation = tf.keras.activations.relu
params = {'kernel_size': (3, 3, 3), 'activation': activation, 'padding': 'same', 'kernel_initializer': tf.keras.initializers.he_uniform(seed=seed)}
convb_layers = {}
net = inputs
filters = initial_filters
for i in range(depth):
name = f'conv{(i + 1)}a'
net = tf.keras.layers.Conv3D(name=name, filters=filters, **params)(net)
if (i in dropout_at):
net = tf.keras.layers.Dropout(dropout)(net)
name = f'conv{(i + 1)}b'
net = tf.keras.layers.Conv3D(name=name, filters=filters, **params)(net)
if batch_norm:
net = tf.keras.layers.BatchNormalization()(net)
convb_layers[name] = net
if (i != (depth - 1)):
name = f'pool{(i + 1)}'
net = tf.keras.layers.MaxPooling3D(name=name, pool_size=(2, 2, 2))(net)
filters *= 2
filters //= 2
for i in range((depth - 1)):
if use_upsampling:
up = tf.keras.layers.UpSampling3D(name=f'up{((depth + i) + 1)}', size=(2, 2, 2))(net)
else:
up = tf.keras.layers.Conv3DTranspose(name=f'transConv{((depth + i) + 1)}', filters=filters, kernel_size=(2, 2, 2), strides=(2, 2, 2), padding='same')(net)
net = tf.keras.layers.concatenate([up, convb_layers[f'conv{((depth - i) - 1)}b']], axis=(- 1))
net = tf.keras.layers.Conv3D(name=f'conv{((depth + i) + 1)}a', filters=filters, **params)(net)
net = tf.keras.layers.Conv3D(name=f'conv{((depth + i) + 1)}b', filters=filters, **params)(net)
filters //= 2
net = tf.keras.layers.Conv3D(name='prediction', filters=n_cl_out, kernel_size=(1, 1, 1), activation='sigmoid')(net)
model = tf.keras.models.Model(inputs=[inputs], outputs=[net])
return model |
def __scale_width(img, target_width, crop_width, method=Image.BICUBIC):
(ow, oh) = img.size
if ((ow == target_width) and (oh >= crop_width)):
return img
w = target_width
h = int(((target_width * oh) / ow))
return img.resize((w, h), method) |
def check(args):
_deck = deck.copy()
res = []
np.random.shuffle(_deck)
landlord = _deck[:17]
landlord.sort()
other = _deck[17:]
dic = {tuple(landlord): []}
for _ in range((10 * args.games)):
np.random.shuffle(other)
card_play_data = {'landlord': (landlord + other[:3]), 'landlord_up': other[3:20], 'landlord_down': other[20:37], 'three_landlord_cards': other[:3]}
for key in card_play_data:
card_play_data[key].sort()
res.append(card_play_data)
output_pickle = args.eval_data
print('saving pickle file...')
last = 0
win_bynow = 0
for num in [(args.games * i) for i in range(1, 11)]:
with open(output_pickle, 'wb') as g:
pickle.dump(res[last:num], g, pickle.HIGHEST_PROTOCOL)
win_nums = evaluate(args.landlord, args.landlord_up, args.landlord_down, args.eval_data, args.num_workers)
win_bynow += win_nums
win_rate = (win_bynow / num)
last = num
dic[tuple(landlord)].append(win_rate)
print(dic) |
class InputExample(object):
def __init__(self, id_, text, span, labels):
self.id = id_
self.text = text
self.span = span
self.labels = labels |
((device_cc() < 80), 'Device compute capability is insufficient for SM80 tests.')
class GemmF32nF32nF32nTensorOpF32Sm80(unittest.TestCase):
def test_SM80_Device_Gemm_f32t_f32n_f32t_tensor_op_bf16_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(instruction_shape=[16, 8, 8], element_a=cutlass.float32, element_b=cutlass.float32, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add_fast_bf16)
tile_description = TileDescription(threadblock_shape=[128, 128, 32], stages=3, warp_count=[2, 2, 1], math_instruction=math_inst)
A = TensorDescription(element=cutlass.float32, layout=cutlass.RowMajor, alignment=4)
B = TensorDescription(element=cutlass.float32, layout=cutlass.ColumnMajor, alignment=4)
C = TensorDescription(element=cutlass.float32, layout=cutlass.RowMajor, alignment=4)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(arch=80, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, 'universal'))
def test_SM80_Device_Gemm_f32n_f32n_f32t_tensor_op_f32_128x128x32_64x64x32(self):
math_inst = MathInstruction(instruction_shape=[16, 8, 8], element_a=cutlass.float32, element_b=cutlass.float32, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add)
tile_description = TileDescription(threadblock_shape=[128, 128, 32], stages=3, warp_count=[2, 2, 1], math_instruction=math_inst)
A = TensorDescription(element=cutlass.float32, layout=cutlass.ColumnMajor, alignment=4)
B = TensorDescription(element=cutlass.float32, layout=cutlass.ColumnMajor, alignment=4)
C = TensorDescription(element=cutlass.float32, layout=cutlass.RowMajor, alignment=4)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(arch=80, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, 'universal'))
def test_SM80_Device_Gemm_f32n_f32n_f32t_tensor_op_fast_accurate_f32_64x64x32_32x32x32(self):
math_inst = MathInstruction(instruction_shape=[16, 8, 8], element_a=cutlass.float32, element_b=cutlass.float32, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add_fast_f32)
tile_description = TileDescription(threadblock_shape=[64, 64, 32], stages=3, warp_count=[2, 2, 1], math_instruction=math_inst)
A = TensorDescription(element=cutlass.float32, layout=cutlass.ColumnMajor, alignment=4)
B = TensorDescription(element=cutlass.float32, layout=cutlass.ColumnMajor, alignment=4)
C = TensorDescription(element=cutlass.float32, layout=cutlass.RowMajor, alignment=4)
element_epilogue = cutlass.float32
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, element_epilogue)
swizzling_functor = cutlass.IdentitySwizzle1
operation = GemmOperationUniversal(arch=80, tile_description=tile_description, A=A, B=B, C=C, epilogue_functor=epilogue_functor, swizzling_functor=swizzling_functor)
self.assertTrue(test_all_gemm(operation, 'universal')) |
def warning(position, message, level=0):
if (level < LEVEL):
return
if (Options.warning_errors and position):
return error(position, message)
warn = CompileWarning(position, message)
line = ('warning: %s\n' % warn)
if listing_file:
listing_file.write(line)
if echo_file:
echo_file.write(line)
return warn |
class Field():
def __init__(self, *, prefix=None, desc=None, input, format=None):
self.prefix = prefix
self.desc = desc
self.format = format
def finalize(self, key, inferred_prefix):
if (self.prefix is None):
self.prefix = (inferred_prefix + ':')
if (self.desc is None):
self.desc = f'${{{key}}}'
def __repr__(self):
return f'{self.__class__.__name__}(prefix={self.prefix}, desc={self.desc})'
def __eq__(self, __value: object) -> bool:
return (self.__dict__ == __value.__dict__) |
def sort_dict_keys_by_vals_with_conditions(d: Dict[(int, float)], condition_func: Callable[([Tuple[(int, float)]], bool)]) -> List[int]:
sorted_items = sorted(list(d.items()), key=(lambda pair: pair[1]))
return [pair[0] for pair in sorted_items if condition_func(pair)] |
def train(configs):
print('Configurations:', len(configs))
for (output_mode, config_file_index) in configs:
(args, filename) = get_args_and_hdf5_file(output_mode, config_file_index)
if os.path.exists(filename):
print('Skipping test', filename)
else:
print('\n\nRun', filename)
subprocess.run(args, check=True)
print('\n\nDONE!') |
class PoseResNet(nn.Module):
def __init__(self, num_layers, heads, head_convs, _):
super(PoseResNet, self).__init__(heads, head_convs, 1, 64)
(block, layers) = resnet_spec[num_layers]
self.inplanes = 64
self.deconv_with_bias = False
self.heads = heads
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.deconv_layers = self._make_deconv_layer(3, [256, 256, 256], [4, 4, 4])
self.init_weights(num_layers, pretrained=True)
def img2feats(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.deconv_layers(x)
return [x]
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.Conv2d(self.inplanes, (planes * block.expansion), kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d((planes * block.expansion), momentum=BN_MOMENTUM))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = (planes * block.expansion)
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel, index):
if (deconv_kernel == 4):
padding = 1
output_padding = 0
elif (deconv_kernel == 3):
padding = 1
output_padding = 1
elif (deconv_kernel == 2):
padding = 0
output_padding = 0
return (deconv_kernel, padding, output_padding)
def _make_deconv_layer(self, num_layers, num_filters, num_kernels):
assert (num_layers == len(num_filters)), 'ERROR: num_deconv_layers is different len(num_deconv_filters)'
assert (num_layers == len(num_kernels)), 'ERROR: num_deconv_layers is different len(num_deconv_filters)'
layers = []
for i in range(num_layers):
(kernel, padding, output_padding) = self._get_deconv_cfg(num_kernels[i], i)
planes = num_filters[i]
layers.append(nn.ConvTranspose2d(in_channels=self.inplanes, out_channels=planes, kernel_size=kernel, stride=2, padding=padding, output_padding=output_padding, bias=self.deconv_with_bias))
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def init_weights(self, num_layers, pretrained=True):
if pretrained:
for (_, m) in self.deconv_layers.named_modules():
if isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, std=0.001)
if self.deconv_with_bias:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
for head in self.heads:
final_layer = self.__getattr__(head)
for (i, m) in enumerate(final_layer.modules()):
if isinstance(m, nn.Conv2d):
if (m.weight.shape[0] == self.heads[head]):
if ('hm' in head):
nn.init.constant_(m.bias, (- 2.19))
else:
nn.init.normal_(m.weight, std=0.001)
nn.init.constant_(m.bias, 0)
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
else:
print('=> imagenet pretrained model dose not exist')
print('=> please download it first')
raise ValueError('imagenet pretrained model does not exist') |
class GCT(nn.Module):
def __init__(self, num_channels, epsilon=1e-05, mode='l2', after_relu=False):
super(GCT, self).__init__()
self.alpha = nn.Parameter(torch.ones(1, num_channels, 1, 1))
self.gamma = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.beta = nn.Parameter(torch.zeros(1, num_channels, 1, 1))
self.epsilon = epsilon
self.mode = mode
self.after_relu = after_relu
def forward(self, x):
if (self.mode == 'l2'):
embedding = ((x.pow(2).sum((2, 3), keepdim=True) + self.epsilon).pow(0.5) * self.alpha)
norm = (self.gamma / (embedding.pow(2).mean(dim=1, keepdim=True) + self.epsilon).pow(0.5))
elif (self.mode == 'l1'):
if (not self.after_relu):
_x = torch.abs(x)
else:
_x = x
embedding = (_x.sum((2, 3), keepdim=True) * self.alpha)
norm = (self.gamma / (torch.abs(embedding).mean(dim=1, keepdim=True) + self.epsilon))
else:
print('Unknown mode!')
sys.exit()
gate = (1.0 + torch.tanh(((embedding * norm) + self.beta)))
return (x * gate) |
def DeclareList(sort):
List = Datatype(('List_of_%s' % sort.name()))
List.declare('cons', ('car', sort), ('cdr', List))
List.declare('nil')
return List.create() |
class Encoder(tf.keras.layers.Layer):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units, return_sequences=True, return_state=True, recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
(output, state) = self.gru(x, initial_state=hidden)
return (output, state)
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units)) |
def component_points(component, width: int, height: int, num: int):
if (component is not None):
lm = component.landmark
return (np.array([[(p.x * width), (p.y * height), p.z] for p in lm]), np.ones(num))
return (np.zeros((num, 3)), np.zeros(num)) |
class LLamaEngine(CausalEngine):
config_name: str = 'llama_engine'
def __init__(self, weights_path: Optional[Union[(str, Path)]]=None):
model_name = 'aleksickx/llama-7b-hf'
model = LlamaForCausalLM.from_pretrained(model_name, torch_dtype=DEFAULT_DTYPE)
tokenizer = LlamaTokenizer.from_pretrained(model_name, add_bos_token=False)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.pad_token_id = tokenizer.eos_token_id
super().__init__(weights_path=weights_path, model=model, tokenizer=tokenizer)
def save(self, saving_path: Union[(str, Path)]):
self.model.save_pretrained(saving_path)
self.tokenizer.save_pretrained(saving_path) |
class DCProblemAnalyticTests_Dirichlet(unittest.TestCase):
def setUp(self):
cs = 25.0
hx = [(cs, 7, (- 1.3)), (cs, 21), (cs, 7, 1.3)]
hy = [(cs, 7, (- 1.3)), (cs, 21), (cs, 7, 1.3)]
hz = [(cs, 7, (- 1.3)), (cs, 20), (cs, 7, (- 1.3))]
mesh = discretize.TensorMesh([hx, hy, hz], x0='CCC')
sigma = (np.ones(mesh.nC) * 0.01)
x = mesh.cell_centers_x[((mesh.cell_centers_x > (- 155.0)) & (mesh.cell_centers_x < 155.0))]
y = mesh.cell_centers_y[((mesh.cell_centers_y > (- 155.0)) & (mesh.cell_centers_y < 155.0))]
Aloc = np.r_[((- 200.0), 0.0, 0.0)]
Bloc = np.r_[(200.0, 0.0, 0.0)]
M = utils.ndgrid((x - 25.0), y, np.r_[0.0])
N = utils.ndgrid((x + 25.0), y, np.r_[0.0])
phiA = analytics.DCAnalytic_Pole_Dipole(Aloc, [M, N], 0.01, earth_type='wholespace')
phiB = analytics.DCAnalytic_Pole_Dipole(Bloc, [M, N], 0.01, earth_type='wholespace')
data_ana = (phiA - phiB)
rx = dc.receivers.Dipole(M, N)
src = dc.sources.Dipole([rx], Aloc, Bloc)
survey = dc.survey.Survey([src])
self.survey = survey
self.mesh = mesh
self.sigma = sigma
self.data_ana = data_ana
def test_Simulation3DCellCentered_Dirichlet(self, tolerance=0.05):
simulation = dc.simulation.Simulation3DCellCentered(self.mesh, survey=self.survey, sigma=self.sigma, bc_type='Dirichlet', solver=Solver)
data = simulation.dpred()
err = np.sqrt(((((data - self.data_ana) / self.data_ana) ** 2).sum() / self.survey.nD))
if (err < tolerance):
print(err)
passed = True
print('>> DC analytic test for Simulation3DCellCentered_Dirchlet is passed')
else:
print(err)
passed = False
print('>> DC analytic test for Simulation3DCellCentered_Dirchlet is failed')
self.assertTrue(passed) |
def get_pars(dim, full=False):
import numpy as nm
sym = (((dim + 1) * dim) // 2)
lam = 10.0
mu = 1.0
o = nm.array((([1.0] * dim) + ([0.0] * (sym - dim))), dtype=nm.float64)
oot = nm.outer(o, o)
if full:
return ((lam * oot) + (mu * nm.diag((o + 1.0))))
else:
return (lam, mu) |
def get_or_find_cached_data(dataset, data_cache_name, data_cache_base_path, **kwargs):
if (kwargs.get('target', None) is None):
del kwargs['target']
res = Pickler(data_cache_name, Path(data_cache_base_path)).find_or_create((lambda : dataset.load_stratified_targets(**kwargs)))
else:
kwargs = copy.copy(kwargs)
kwargs['target_c'] = [kwargs['target']]
del kwargs['target']
del kwargs['n_target_c']
del kwargs['return_target_c']
res = Pickler(data_cache_name, Path(data_cache_base_path)).find_or_create((lambda : dataset.load_src_stratified_trg(**kwargs)))
res = (res, kwargs['target_c'])
return res |
def match_case(row):
for (old, new) in [('pytorch', 'PyTorch'), ('tensorflow', 'TensorFlow'), ('lasagne', 'Lasagne'), ('keras', 'Keras'), ('theano', 'Theano'), ('cudnnLSTM', 'cuDNNLSTM')]:
row['bench'] = row['bench'].replace(old, new)
return row |
class BeneparComponent():
name = 'benepar'
def __init__(self, name, subbatch_max_tokens=500, disable_tagger=False, batch_size='ignored'):
self._parser = load_trained_model(name)
if torch.cuda.is_available():
self._parser.cuda()
self.subbatch_max_tokens = subbatch_max_tokens
self.disable_tagger = disable_tagger
self._label_vocab = self._parser.config['label_vocab']
label_vocab_size = (max(self._label_vocab.values()) + 1)
self._label_from_index = ([()] * label_vocab_size)
for (label, i) in self._label_vocab.items():
if label:
self._label_from_index[i] = tuple(label.split('::'))
else:
self._label_from_index[i] = ()
self._label_from_index = tuple(self._label_from_index)
if (not self.disable_tagger):
tag_vocab = self._parser.config['tag_vocab']
tag_vocab_size = (max(tag_vocab.values()) + 1)
self._tag_from_index = ([()] * tag_vocab_size)
for (tag, i) in tag_vocab.items():
self._tag_from_index[i] = tag
self._tag_from_index = tuple(self._tag_from_index)
else:
self._tag_from_index = None
def __call__(self, doc):
constituent_data = PartialConstituentData()
wrapped_sents = [SentenceWrapper(sent) for sent in doc.sents]
for (sent, parse) in zip(doc.sents, self._parser.parse(wrapped_sents, return_compressed=True, subbatch_max_tokens=self.subbatch_max_tokens)):
constituent_data.starts.append((parse.starts + sent.start))
constituent_data.ends.append((parse.ends + sent.start))
constituent_data.labels.append(parse.labels)
if ((parse.tags is not None) and (not self.disable_tagger)):
for (i, tag_id) in enumerate(parse.tags):
sent[i].tag_ = self._tag_from_index[tag_id]
doc._._constituent_data = constituent_data.finalize(doc, self._label_from_index)
return doc |
def load_protein0():
(X_train, y_train, X_test, y_test) = load_protein()
selected = (y_train == 0)
y_train[selected] = 1
y_train[(~ selected)] = 0
selected = (y_test == 0)
y_test[selected] = 1
y_test[(~ selected)] = 0
return (X_train, y_train, X_test, y_test) |
class ShuffleV2Block(nn.Module):
def __init__(self, bn_norm, inp, oup, mid_channels, *, ksize, stride):
super(ShuffleV2Block, self).__init__()
self.stride = stride
assert (stride in [1, 2])
self.mid_channels = mid_channels
self.ksize = ksize
pad = (ksize // 2)
self.pad = pad
self.inp = inp
outputs = (oup - inp)
branch_main = [nn.Conv2d(inp, mid_channels, 1, 1, 0, bias=False), get_norm(bn_norm, mid_channels), nn.ReLU(inplace=True), nn.Conv2d(mid_channels, mid_channels, ksize, stride, pad, groups=mid_channels, bias=False), get_norm(bn_norm, mid_channels), nn.Conv2d(mid_channels, outputs, 1, 1, 0, bias=False), get_norm(bn_norm, outputs), nn.ReLU(inplace=True)]
self.branch_main = nn.Sequential(*branch_main)
if (stride == 2):
branch_proj = [nn.Conv2d(inp, inp, ksize, stride, pad, groups=inp, bias=False), get_norm(bn_norm, inp), nn.Conv2d(inp, inp, 1, 1, 0, bias=False), get_norm(bn_norm, inp), nn.ReLU(inplace=True)]
self.branch_proj = nn.Sequential(*branch_proj)
else:
self.branch_proj = None
def forward(self, old_x):
if (self.stride == 1):
(x_proj, x) = self.channel_shuffle(old_x)
return torch.cat((x_proj, self.branch_main(x)), 1)
elif (self.stride == 2):
x_proj = old_x
x = old_x
return torch.cat((self.branch_proj(x_proj), self.branch_main(x)), 1)
def channel_shuffle(self, x):
(batchsize, num_channels, height, width) = x.data.size()
assert ((num_channels % 4) == 0)
x = x.reshape(((batchsize * num_channels) // 2), 2, (height * width))
x = x.permute(1, 0, 2)
x = x.reshape(2, (- 1), (num_channels // 2), height, width)
return (x[0], x[1]) |
def import_statement_string(module, names, lazy):
if lazy:
if (len(names) == 1):
(name, alias) = names[0]
if (name == alias):
if (name is None):
raise ValueError('cannot lazy import modules')
return ("lazy_import('%s', '%s')" % (module, name))
else:
return ("lazy_import('%s', '%s', '%s')" % (module, name, alias))
obj_names = (('[' + ', '.join(((("'" + name[0]) + "'") for name in names))) + ']')
obj_aliases = (('[' + ', '.join(((("'" + name[1]) + "'") for name in names))) + ']')
return ("lazy_import('%s', %s, %s)" % (module, obj_names, obj_aliases))
else:
import_module = False
name_list = []
for (name, alias) in names:
if (name == alias):
if (name is None):
import_module = True
continue
name_list.append(name)
else:
name_list.append(('%s as %s' % (name, alias)))
res = []
if import_module:
res.append(('import %s' % module))
if name_list:
res.append(('from %s import %s' % (module, ', '.join(name_list))))
return '\n'.join(res) |
def get_scheduler(indicator, lr):
if (indicator == 'warm-cos'):
multiplier = WarmupParamScheduler(CosineParamScheduler(lr, (lr * 0.001)), warmup_factor=0.001, warmup_length=0.05, warmup_method='linear')
else:
raise ValueError('Unknown indicator: {:}'.format(indicator))
return multiplier |
def test_multipart_form_open_api_3(assert_parameters, make_openapi_3_schema, user_jsonschema_with_file, open_api_3_user_with_file):
schema = make_openapi_3_schema({'required': True, 'content': {'multipart/form-data': {'schema': open_api_3_user_with_file}}})
assert_parameters(schema, PayloadAlternatives([OpenAPI30Body(definition={'schema': open_api_3_user_with_file}, media_type='multipart/form-data', required=True)]), [user_jsonschema_with_file]) |
def test_20news_length_consistency(fetch_20newsgroups_fxt):
data = fetch_20newsgroups_fxt(subset='all')
assert (len(data['data']) == len(data.data))
assert (len(data['target']) == len(data.target))
assert (len(data['filenames']) == len(data.filenames)) |
class CrossBatchMemory(ModuleWithRecords):
def __init__(self, loss, embedding_size, memory_size=1024, miner=None, **kwargs):
super().__init__(**kwargs)
self.loss = loss
self.miner = miner
self.embedding_size = embedding_size
self.memory_size = memory_size
self.embedding_memory = torch.zeros(self.memory_size, self.embedding_size)
self.label_memory = torch.zeros(self.memory_size).long()
self.has_been_filled = False
self.queue_idx = 0
self.add_to_recordable_attributes(list_of_names=['embedding_size', 'memory_size', 'queue_idx'], is_stat=False)
def forward(self, embeddings, labels, indices_tuple=None, enqueue_idx=None):
if (enqueue_idx is not None):
assert (len(enqueue_idx) <= len(self.embedding_memory))
assert (len(enqueue_idx) < len(embeddings))
else:
assert (len(embeddings) <= len(self.embedding_memory))
self.reset_stats()
device = embeddings.device
labels = c_f.to_device(labels, device=device)
self.embedding_memory = c_f.to_device(self.embedding_memory, device=device, dtype=embeddings.dtype)
self.label_memory = c_f.to_device(self.label_memory, device=device, dtype=labels.dtype)
if (enqueue_idx is not None):
mask = torch.zeros(len(embeddings), device=device, dtype=torch.bool)
mask[enqueue_idx] = True
emb_for_queue = embeddings[mask]
labels_for_queue = labels[mask]
embeddings = embeddings[(~ mask)]
labels = labels[(~ mask)]
do_remove_self_comparisons = False
else:
emb_for_queue = embeddings
labels_for_queue = labels
do_remove_self_comparisons = True
batch_size = len(embeddings)
queue_batch_size = len(emb_for_queue)
self.add_to_memory(emb_for_queue, labels_for_queue, queue_batch_size)
if (not self.has_been_filled):
E_mem = self.embedding_memory[:self.queue_idx]
L_mem = self.label_memory[:self.queue_idx]
else:
E_mem = self.embedding_memory
L_mem = self.label_memory
indices_tuple = self.create_indices_tuple(batch_size, embeddings, labels, E_mem, L_mem, indices_tuple, do_remove_self_comparisons)
combined_embeddings = torch.cat([embeddings, E_mem], dim=0)
combined_labels = torch.cat([labels, L_mem], dim=0)
loss = self.loss(combined_embeddings, combined_labels, indices_tuple)
return loss
def add_to_memory(self, embeddings, labels, batch_size):
self.curr_batch_idx = (torch.arange(self.queue_idx, (self.queue_idx + batch_size), device=labels.device) % self.memory_size)
self.embedding_memory[self.curr_batch_idx] = embeddings.detach()
self.label_memory[self.curr_batch_idx] = labels.detach()
prev_queue_idx = self.queue_idx
self.queue_idx = ((self.queue_idx + batch_size) % self.memory_size)
if ((not self.has_been_filled) and (self.queue_idx <= prev_queue_idx)):
self.has_been_filled = True
def create_indices_tuple(self, batch_size, embeddings, labels, E_mem, L_mem, input_indices_tuple, do_remove_self_comparisons):
if self.miner:
indices_tuple = self.miner(embeddings, labels, E_mem, L_mem)
else:
indices_tuple = lmu.get_all_pairs_indices(labels, L_mem)
if do_remove_self_comparisons:
indices_tuple = self.remove_self_comparisons(indices_tuple)
indices_tuple = c_f.shift_indices_tuple(indices_tuple, batch_size)
if (input_indices_tuple is not None):
if ((len(input_indices_tuple) == 3) and (len(indices_tuple) == 4)):
input_indices_tuple = lmu.convert_to_pairs(input_indices_tuple, labels)
elif ((len(input_indices_tuple) == 4) and (len(indices_tuple) == 3)):
input_indices_tuple = lmu.convert_to_triplets(input_indices_tuple, labels)
indices_tuple = tuple([torch.cat([x, c_f.to_device(y, x)], dim=0) for (x, y) in zip(indices_tuple, input_indices_tuple)])
return indices_tuple
def remove_self_comparisons(self, indices_tuple):
assert (len(indices_tuple) in [3, 4])
(s, e) = (self.curr_batch_idx[0], self.curr_batch_idx[(- 1)])
if (len(indices_tuple) == 3):
(a, p, n) = indices_tuple
keep_mask = self.not_self_comparisons(a, p, s, e)
a = a[keep_mask]
p = p[keep_mask]
n = n[keep_mask]
assert (len(a) == len(p) == len(n))
return (a, p, n)
elif (len(indices_tuple) == 4):
(a1, p, a2, n) = indices_tuple
keep_mask = self.not_self_comparisons(a1, p, s, e)
a1 = a1[keep_mask]
p = p[keep_mask]
assert (len(a1) == len(p))
assert (len(a2) == len(n))
return (a1, p, a2, n)
def not_self_comparisons(self, a, p, s, e):
curr_batch = torch.any((p.unsqueeze(1) == self.curr_batch_idx), dim=1)
a_c = a[curr_batch]
p_c = p[curr_batch]
p_c -= s
if (e <= s):
p_c[(p_c <= (e - s))] += self.memory_size
without_self_comparisons = curr_batch.clone()
without_self_comparisons[torch.where(curr_batch)[0][(a_c == p_c)]] = False
return (without_self_comparisons | (~ curr_batch)) |
class SyncMaster(object):
def __init__(self, master_callback):
self._master_callback = master_callback
self._queue = queue.Queue()
self._registry = collections.OrderedDict()
self._activated = False
def register_slave(self, identifier):
if self._activated:
assert self._queue.empty(), 'Queue is not clean before next initialization.'
self._activated = False
self._registry.clear()
future = FutureResult()
self._registry[identifier] = _MasterRegistry(future)
return SlavePipe(identifier, self._queue, future)
def run_master(self, master_msg):
self._activated = True
intermediates = [(0, master_msg)]
for i in range(self.nr_slaves):
intermediates.append(self._queue.get())
results = self._master_callback(intermediates)
assert (results[0][0] == 0), 'The first result should belongs to the master.'
for (i, res) in results:
if (i == 0):
continue
self._registry[i].result.put(res)
for i in range(self.nr_slaves):
assert (self._queue.get() is True)
return results[0][1]
def nr_slaves(self):
return len(self._registry) |
.parametrize('precision_level', ['32b', '64b'])
def test_set_precision_by_string(precision_level):
pyhf.set_backend(pyhf.tensorlib.name, precision=precision_level)
assert (pyhf.tensorlib.precision == precision_level.lower())
pyhf.set_backend(pyhf.tensor.numpy_backend(precision=precision_level))
assert (pyhf.tensorlib.precision == precision_level.lower()) |
_AA_and_QQbar
def _singular_normal(ideal):
from sage.libs.singular.function import singular_function, lib
lib('normal.lib')
normal = singular_function('normal')
execute = singular_function('execute')
try:
get_printlevel = singular_function('get_printlevel')
except NameError:
execute('proc get_printlevel {return (printlevel);}')
get_printlevel = singular_function('get_printlevel')
saved_printlevel = get_printlevel()
execute('printlevel=-1')
nor = normal(ideal)
execute('printlevel={}'.format(saved_printlevel))
return nor[1] |
def main():
np.random.seed(args['SEED'])
torch.manual_seed(args['SEED'])
gpuAvailable = torch.cuda.is_available()
device = torch.device(('cuda' if gpuAvailable else 'cpu'))
kwargs = ({'num_workers': args['NUM_WORKERS'], 'pin_memory': True} if gpuAvailable else {})
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
videoParams = {'videoFPS': args['VIDEO_FPS']}
testData = LRS2Main('test', args['DATA_DIRECTORY'], args['MAIN_REQ_INPUT_LENGTH'], args['CHAR_TO_INDEX'], args['STEP_SIZE'], videoParams)
testLoader = DataLoader(testData, batch_size=args['BATCH_SIZE'], collate_fn=collate_fn, shuffle=True, **kwargs)
if (args['TRAINED_MODEL_FILE'] is not None):
print(('\nTrained Model File: %s' % args['TRAINED_MODEL_FILE']))
model = VideoNet(args['TX_NUM_FEATURES'], args['TX_ATTENTION_HEADS'], args['TX_NUM_LAYERS'], args['PE_MAX_LENGTH'], args['TX_FEEDFORWARD_DIM'], args['TX_DROPOUT'], args['NUM_CLASSES'])
model.load_state_dict(torch.load((args['CODE_DIRECTORY'] + args['TRAINED_MODEL_FILE']), map_location=device))
model.to(device)
loss_function = nn.CTCLoss(blank=0, zero_infinity=False)
lm = LRS2CharLM()
lm.load_state_dict(torch.load(args['TRAINED_LM_FILE'], map_location=device))
lm.to(device)
if (not args['USE_LM']):
lm = None
print('\nTesting the trained model .... \n')
beamSearchParams = {'beamWidth': args['BEAM_WIDTH'], 'alpha': args['LM_WEIGHT_ALPHA'], 'beta': args['LENGTH_PENALTY_BETA'], 'threshProb': args['THRESH_PROBABILITY']}
testParams = {'decodeScheme': args['TEST_DEMO_DECODING'], 'beamSearchParams': beamSearchParams, 'spaceIx': args['CHAR_TO_INDEX'][' '], 'eosIx': args['CHAR_TO_INDEX']['<EOS>'], 'lm': lm}
(testLoss, testCER, testWER) = evaluate(model, testLoader, loss_function, device, testParams)
print(('Test Loss: %.6f || Test CER: %.3f || Test WER: %.3f' % (testLoss, testCER, testWER)))
print('\nTesting Done.\n')
else:
print('Path to the trained model file not specified.\n')
return |
def kullback_leibler_divergence(p, q):
p = np.asarray(p)
q = np.asarray(q)
filt = np.logical_and((p != 0), (q != 0))
return np.sum((p[filt] * np.log2((p[filt] / q[filt])))) |
def MkInfinitesimal(name='eps', ctx=None):
ctx = z3.get_ctx(ctx)
return RCFNum(Z3_rcf_mk_infinitesimal(ctx.ref()), ctx) |
def transform_pet_eprstmt(example, label_normalize_dict=None, is_test=False, pattern_id=0):
if is_test:
example['label_length'] = 1
if (pattern_id == 0):
example['sentence1'] = (u'<unk>!' + example['sentence'])
elif (pattern_id == 1):
example['sentence1'] = (u'<unk>!,' + example['sentence'])
elif (pattern_id == 2):
example['sentence1'] = (example['sentence'] + u'<unk>')
elif (pattern_id == 3):
example['sentence1'] = (example['sentence'] + u', <unk>')
return example
else:
origin_label = example['label']
example['text_label'] = label_normalize_dict[origin_label]
if (pattern_id == 0):
example['sentence1'] = (u'<unk>!' + example['sentence'])
elif (pattern_id == 1):
example['sentence1'] = (u'<unk>!,' + example['sentence'])
elif (pattern_id == 2):
example['sentence1'] = (example['sentence'] + u'<unk>')
elif (pattern_id == 3):
example['sentence1'] = (example['sentence'] + u', <unk>')
return example |
def test_restriced(rpool):
for i in range(20):
rpool.add_constant(i)
assert (rpool.get_all_constants_for(int) == OrderedSet([15, 16, 17, 18, 19])) |
def compute_features(eval_loader, model, args):
print('Computing features...')
model.eval()
features = torch.zeros(len(eval_loader.dataset), args.low_dim).cuda()
for (i, (images, index)) in enumerate(tqdm(eval_loader)):
with torch.no_grad():
images = images.cuda(non_blocking=True)
feat = model(images, is_eval=True)
features[index] = feat
dist.barrier()
dist.all_reduce(features, op=dist.ReduceOp.SUM)
return features.cpu() |
class ResNet(nn.Module):
def __init__(self, block: Type[Union[(BasicBlock, Bottleneck)]], layers: List[int], num_classes: int=1000, in_channels: int=3, zero_init_residual: bool=False, groups: int=1, width_per_group: int=64, replace_stride_with_dilation: Optional[List[bool]]=None, norm_layer: Optional[Callable[(..., nn.Module)]]=None) -> None:
super(ResNet, self).__init__()
if (norm_layer is None):
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if (replace_stride_with_dilation is None):
replace_stride_with_dilation = [False, False, False]
if (len(replace_stride_with_dilation) != 3):
raise ValueError('replace_stride_with_dilation should be None or a 3-element tuple, got {}'.format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear((512 * block.expansion), num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block: Type[Union[(BasicBlock, Bottleneck)]], planes: int, blocks: int, stride: int=1, dilate: bool=False) -> nn.Sequential:
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if ((stride != 1) or (self.inplanes != (planes * block.expansion))):
downsample = nn.Sequential(conv1x1(self.inplanes, (planes * block.expansion), stride), norm_layer((planes * block.expansion)))
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer))
self.inplanes = (planes * block.expansion)
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x) |
def _simple_validate_commit_rev(rev):
assert ((len(rev) >= _MinNumHashDigits) and _RevDigitsRe.match(rev)) |
def epoch_wrapup(pl_module):
phase = ('train' if pl_module.training else 'val')
the_metric = 0
the_metric_qar = 0
if (pl_module.hparams.config['get_recall_metric'] and (not pl_module.training)):
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module)
if (torch.distributed.get_rank() == 0):
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/ir_r1', ir_r1, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/ir_r5', ir_r5, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/ir_r10', ir_r10, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/tr_r1', tr_r1, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/tr_r5', tr_r5, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/tr_r10', tr_r10, pl_module.global_step)
the_metric += (ir_r1.item() + tr_r1.item())
if (pl_module.hparams.config['get_ind_recall_metric'] and (not pl_module.training)):
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_ind_irtr_recall(pl_module)
print((ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10), pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/ir_r1', ir_r1, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/ir_r5', ir_r5, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/ir_r10', ir_r10, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/tr_r1', tr_r1, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/tr_r5', tr_r5, pl_module.global_step)
pl_module.logger.experiment.add_scalar('recalls/tr_r10', tr_r10, pl_module.global_step)
the_metric += (ir_r1 + tr_r1)
for (loss_name, v) in pl_module.hparams.config['loss_names'].items():
if (v < 1):
continue
value = 0
qar_value = 0
if (loss_name == 'vqa'):
value = getattr(pl_module, f'{phase}_{loss_name}_score').compute()
pl_module.log(f'{loss_name}/{phase}/score_epoch', value)
getattr(pl_module, f'{phase}_{loss_name}_score').reset()
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
elif (loss_name == 'vcr_q2a'):
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
value = getattr(pl_module, f'{phase}_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/{phase}/accuracy_epoch', value)
getattr(pl_module, f'{phase}_{loss_name}_accuracy').reset()
pl_module.log(f'vcr_qar/{phase}/loss_epoch', getattr(pl_module, f'{phase}_vcr_qar_loss').compute())
getattr(pl_module, f'{phase}_vcr_qar_loss').reset()
qar_value = getattr(pl_module, f'{phase}_vcr_qar_accuracy').compute()
pl_module.log(f'vcr_qar/{phase}/accuracy_epoch', qar_value)
getattr(pl_module, f'{phase}_vcr_qar_accuracy').reset()
elif (loss_name == 'mc_vqa'):
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
value = getattr(pl_module, f'{phase}_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/{phase}/accuracy_epoch', value)
getattr(pl_module, f'{phase}_{loss_name}_accuracy').reset()
elif (loss_name == 'openend_vqa'):
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_vqa_loss').compute())
getattr(pl_module, f'{phase}_vqa_loss').reset()
value = getattr(pl_module, f'{phase}_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/{phase}/accuracy_epoch', value)
getattr(pl_module, f'{phase}_{loss_name}_accuracy').reset()
elif (loss_name == 'multiple_choice'):
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
value = getattr(pl_module, f'{phase}_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/{phase}/accuracy_epoch', value)
getattr(pl_module, f'{phase}_{loss_name}_accuracy').reset()
elif (loss_name == 'vcop'):
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
value = getattr(pl_module, f'{phase}_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/{phase}/accuracy_epoch', value)
getattr(pl_module, f'{phase}_{loss_name}_accuracy').reset()
elif (loss_name == 'nlvr2'):
if (phase == 'train'):
value = getattr(pl_module, f'train_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/train/accuracy_epoch', value)
getattr(pl_module, f'train_{loss_name}_accuracy').reset()
pl_module.log(f'{loss_name}/train/loss_epoch', getattr(pl_module, f'train_{loss_name}_loss').compute())
getattr(pl_module, f'train_{loss_name}_loss').reset()
else:
value = getattr(pl_module, f'dev_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/dev/accuracy_epoch', value)
getattr(pl_module, f'dev_{loss_name}_accuracy').reset()
pl_module.log(f'{loss_name}/dev/loss_epoch', getattr(pl_module, f'dev_{loss_name}_loss').compute())
getattr(pl_module, f'dev_{loss_name}_loss').reset()
value = getattr(pl_module, f'test_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/test/accuracy_epoch', value)
getattr(pl_module, f'test_{loss_name}_accuracy').reset()
pl_module.log(f'{loss_name}/test/loss_epoch', getattr(pl_module, f'test_{loss_name}_loss').compute())
getattr(pl_module, f'test_{loss_name}_loss').reset()
elif (loss_name == 'irtr'):
pl_module.log(f'{loss_name}/{phase}/irtr_loss_epoch', getattr(pl_module, f'{phase}_irtr_loss').compute())
getattr(pl_module, f'{phase}_irtr_loss').reset()
elif ((loss_name == 'mppd') or (loss_name == 'mpfr')):
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
elif (loss_name == 'vtm'):
value = getattr(pl_module, f'{phase}_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/{phase}/accuracy_epoch', value)
getattr(pl_module, f'{phase}_{loss_name}_accuracy').reset()
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
pl_module.log(f'{loss_name}/{phase}/wpa_loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_wpa_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_wpa_loss').reset()
pl_module.log(f'{loss_name}/{phase}/dino_loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_dino_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_dino_loss').reset()
elif (loss_name == 'dino'):
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
elif (loss_name == 'vtc'):
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
else:
value = getattr(pl_module, f'{phase}_{loss_name}_accuracy').compute()
pl_module.log(f'{loss_name}/{phase}/accuracy_epoch', value)
getattr(pl_module, f'{phase}_{loss_name}_accuracy').reset()
pl_module.log(f'{loss_name}/{phase}/loss_epoch', getattr(pl_module, f'{phase}_{loss_name}_loss').compute())
getattr(pl_module, f'{phase}_{loss_name}_loss').reset()
if (loss_name == 'vcr_q2a'):
the_metric += ((qar_value / 2) + (value / 2))
else:
the_metric += value
pl_module.log(f'{phase}/the_metric', the_metric) |
class AutoTokenizer():
def __init__(self):
raise EnvironmentError('AutoTokenizer is designed to be instantiated using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method.')
_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES)
def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
config = kwargs.pop('config', None)
kwargs['_from_auto'] = True
use_fast = kwargs.pop('use_fast', True)
tokenizer_type = kwargs.pop('tokenizer_type', None)
trust_remote_code = kwargs.pop('trust_remote_code', False)
if (tokenizer_type is not None):
tokenizer_class = None
tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None)
if (tokenizer_class_tuple is None):
raise ValueError(f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of {', '.join((c for c in TOKENIZER_MAPPING_NAMES.keys()))}.")
(tokenizer_class_name, tokenizer_fast_class_name) = tokenizer_class_tuple
if use_fast:
if (tokenizer_fast_class_name is not None):
tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name)
else:
logger.warning('`use_fast` is set to `True` but the tokenizer class does not have a fast version. Falling back to the slow version.')
if (tokenizer_class is None):
tokenizer_class = tokenizer_class_from_name(tokenizer_class_name)
if (tokenizer_class is None):
raise ValueError(f'Tokenizer class {tokenizer_class_name} is not currently imported.')
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
if ('_commit_hash' in tokenizer_config):
kwargs['_commit_hash'] = tokenizer_config['_commit_hash']
config_tokenizer_class = tokenizer_config.get('tokenizer_class')
tokenizer_auto_map = None
if ('auto_map' in tokenizer_config):
if isinstance(tokenizer_config['auto_map'], (tuple, list)):
tokenizer_auto_map = tokenizer_config['auto_map']
else:
tokenizer_auto_map = tokenizer_config['auto_map'].get('AutoTokenizer', None)
if (config_tokenizer_class is None):
if (not isinstance(config, PretrainedConfig)):
config = AutoConfig.from_pretrained(pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs)
config_tokenizer_class = config.tokenizer_class
if (hasattr(config, 'auto_map') and ('AutoTokenizer' in config.auto_map)):
tokenizer_auto_map = config.auto_map['AutoTokenizer']
if (config_tokenizer_class is not None):
tokenizer_class = None
if (tokenizer_auto_map is not None):
if (not trust_remote_code):
raise ValueError(f'Loading {pretrained_model_name_or_path} requires you to execute the tokenizer file in that repo on your local machine. Make sure you have read the code there to avoid malicious use, then set the option `trust_remote_code=True` to remove this error.')
if (use_fast and (tokenizer_auto_map[1] is not None)):
class_ref = tokenizer_auto_map[1]
else:
class_ref = tokenizer_auto_map[0]
tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
elif (use_fast and (not config_tokenizer_class.endswith('Fast'))):
tokenizer_class_candidate = f'{config_tokenizer_class}Fast'
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if (tokenizer_class is None):
tokenizer_class_candidate = config_tokenizer_class
tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
if (tokenizer_class is None):
raise ValueError(f'Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported.')
return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if isinstance(config, EncoderDecoderConfig):
if (type(config.decoder) is not type(config.encoder)):
logger.warning(f'The encoder model config class: {config.encoder.__class__} is different from the decoder model config class: {config.decoder.__class__}. It is not recommended to use the `AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder specific tokenizer classes.')
config = config.encoder
model_type = config_class_to_model_type(type(config).__name__)
if (model_type is not None):
(tokenizer_class_py, tokenizer_class_fast) = TOKENIZER_MAPPING[type(config)]
if (tokenizer_class_fast and (use_fast or (tokenizer_class_py is None))):
return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
elif (tokenizer_class_py is not None):
return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
else:
raise ValueError('This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed in order to use this tokenizer.')
raise ValueError(f'''Unrecognized configuration class {config.__class__} to build an AutoTokenizer.
Model type should be one of {', '.join((c.__name__ for c in TOKENIZER_MAPPING.keys()))}.''')
def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None):
if ((slow_tokenizer_class is None) and (fast_tokenizer_class is None)):
raise ValueError('You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class')
if ((slow_tokenizer_class is not None) and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast)):
raise ValueError('You passed a fast tokenizer in the `slow_tokenizer_class`.')
if ((fast_tokenizer_class is not None) and issubclass(fast_tokenizer_class, PreTrainedTokenizer)):
raise ValueError('You passed a slow tokenizer in the `fast_tokenizer_class`.')
if ((slow_tokenizer_class is not None) and (fast_tokenizer_class is not None) and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast) and (fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class)):
raise ValueError(f'The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not consistent with the slow tokenizer class you passed (fast tokenizer has {fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those so they match!')
if (config_class in TOKENIZER_MAPPING._extra_content):
(existing_slow, existing_fast) = TOKENIZER_MAPPING[config_class]
if (slow_tokenizer_class is None):
slow_tokenizer_class = existing_slow
if (fast_tokenizer_class is None):
fast_tokenizer_class = existing_fast
TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class)) |
def run_episode(env, agent, optimisers, total_episodic_rewards, i_episode, max_steps_per_episode=1000):
current_episodic_reward = 0.0
env.reset()
agent.set_initial_state()
for t in range(max_steps_per_episode):
observation = env.observe()
action = agent.get_action(observation)
observation = env.act(action)
current_episodic_reward += observation.reward
if observation.is_episode_over:
break
optimisers = agent.update_policy(optimisers, observation=observation)
total_episodic_rewards += current_episodic_reward
if ((i_episode % 1) == 0):
write_reward_log(episode_number=i_episode, current_episodic_reward=current_episodic_reward, average_episodic_reward=(total_episodic_rewards / i_episode), agent=agent.name, environment=env.name)
return (agent, optimisers, total_episodic_rewards) |
def mapper(x):
if (x == 'Very Difficult'):
return 1.0
elif (x == 'Difficult'):
return 2.0
elif (x == 'Neutral'):
return 3.0
elif (x == 'Easy'):
return 4.0
elif (x == 'Very Easy'):
return 5.0 |
def register_Ns3Dot11sHwmpProtocol_methods(root_module, cls):
cls.add_constructor([])
cls.add_method('AssignStreams', 'int64_t', [param('int64_t', 'stream')])
cls.add_method('DoDispose', 'void', [], is_virtual=True)
cls.add_method('GetRoutingTable', 'ns3::Ptr< ns3::dot11s::HwmpRtable >', [], is_const=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Install', 'bool', [param('ns3::Ptr< ns3::MeshPointDevice >', 'arg0')])
cls.add_method('PeerLinkStatus', 'void', [param('ns3::Mac48Address', 'meshPontAddress'), param('ns3::Mac48Address', 'peerAddress'), param('uint32_t', 'interface'), param('bool', 'status')])
cls.add_method('RemoveRoutingStuff', 'bool', [param('uint32_t', 'fromIface'), param('ns3::Mac48Address const', 'source'), param('ns3::Mac48Address const', 'destination'), param('ns3::Ptr< ns3::Packet >', 'packet'), param('uint16_t &', 'protocolType')], is_virtual=True)
cls.add_method('Report', 'void', [param('std::ostream &', 'arg0')], is_const=True)
cls.add_method('RequestRoute', 'bool', [param('uint32_t', 'sourceIface'), param('ns3::Mac48Address const', 'source'), param('ns3::Mac48Address const', 'destination'), param('ns3::Ptr< ns3::Packet const >', 'packet'), param('uint16_t', 'protocolType'), param('ns3::Callback< void, bool, ns3::Ptr< ns3::Packet >, ns3::Mac48Address, ns3::Mac48Address, unsigned short, unsigned int, ns3::empty, ns3::empty, ns3::empty >', 'routeReply')], is_virtual=True)
cls.add_method('ResetStats', 'void', [])
cls.add_method('SetNeighboursCallback', 'void', [param('ns3::Callback< std::vector< ns3::Mac48Address >, unsigned int, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >', 'cb')])
cls.add_method('SetRoot', 'void', [])
cls.add_method('UnsetRoot', 'void', [])
cls.add_method('DoInitialize', 'void', [], visibility='private', is_virtual=True)
return |
class ViTBase_pretrained(nn.Module):
def __init__(self):
super().__init__()
model_name = 'google/vit-base-patch16-224'
config = transformers.ViTConfig.from_pretrained(model_name)
config.update({'num_channels': 1})
config.update({'image_size': (129, 500)})
config.update({'patch_size': (8, 35)})
model = transformers.ViTForImageClassification.from_pretrained(model_name, config=config, ignore_mismatched_sizes=True)
model.vit.embeddings.patch_embeddings.projection = nn.Sequential(torch.nn.Conv2d(1, 768, kernel_size=(8, 36), stride=(8, 36), padding=(0, 2)), nn.BatchNorm2d(768))
model.classifier = torch.nn.Sequential(torch.nn.Linear(768, 1000, bias=True), torch.nn.Dropout(p=0.1), torch.nn.Linear(1000, 2, bias=True))
self.ViT = model
def forward(self, x):
x = self.ViT(x).logits
return x |
def load_soba_json(json_file, image_root, dataset_name=None):
from pysobatools.soba import SOBA
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
soba_api = SOBA(json_file)
if (timer.seconds() > 1):
logger.info('Loading {} takes {:.2f} seconds.'.format(json_file, timer.seconds()))
id_map = None
if (dataset_name is not None):
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(soba_api.getCatIds())
association_ids = soba_api.getAssoIds()
cats = soba_api.loadCats(cat_ids)
association = soba_api.loadAsso(association_ids)
thing_classes = [c['name'] for c in sorted(cats, key=(lambda x: x['id']))]
association_classes = [c['name'] for c in sorted(association, key=(lambda x: x['id']))]
meta.association_classes = association_classes
meta.thing_classes = thing_classes
meta.keypoint_names = ['Object', 'Shadow']
meta.keypoint_flip_map = {'Object': 'Shadow'}
meta.keypoint_connection_rules = [('Object', 'Shadow', (255, 255, 255))]
if (not ((min(cat_ids) == 1) and (max(cat_ids) == len(cat_ids)))):
if ('soba' not in dataset_name):
logger.warning("\nCategory ids in annotations are not in [1, #categories]! We'll apply a mapping for you.\n")
id_map = {v: i for (i, v) in enumerate(cat_ids)}
association_id_map = {v: i for (i, v) in enumerate(association_ids)}
meta.association_dataset_id_to_contiguous_id = association_id_map
meta.thing_dataset_id_to_contiguous_id = id_map
img_ids = sorted(list(soba_api.imgs.keys()))
imgs = soba_api.loadImgs(img_ids)
anns = [soba_api.imgToAnns[img_id] for img_id in img_ids]
assoAnns = [soba_api.imgToAssoAnns[img_id] for img_id in img_ids]
if ('minival' not in json_file):
ann_ids = [ann['id'] for anns_per_image in anns for ann in anns_per_image]
asso_ann_ids = [assoAnn['id'] for anns_per_image in assoAnns for assoAnn in anns_per_image]
assert (len(set(ann_ids)) == len(ann_ids)), "Annotation ids in '{}' are not unique!".format(json_file)
imgs_anns = list(zip(imgs, anns))
imgs_asso_anns = list(zip(imgs, assoAnns))
logger.info('Loaded {} images in SOBA format from {}'.format(len(imgs_anns), json_file))
dataset_dicts = []
DENSEPOSE_KEYS = ['dp_x', 'dp_y', 'dp_I', 'dp_U', 'dp_V', 'dp_masks']
num_instances_without_valid_segmentation = 0
for ((img_dict, anno_dict_list), (_, asso_anno_dict_list)) in zip(imgs_anns, imgs_asso_anns):
record = {}
record['file_name'] = os.path.join(image_root, img_dict['file_name'])
record['height'] = img_dict['height']
record['width'] = img_dict['width']
image_id = record['image_id'] = img_dict['id']
objs = []
for anno in anno_dict_list:
assert (anno['image_id'] == image_id)
assert (anno.get('ignore', 0) == 0)
obj = {field: anno[field] for field in (['iscrowd', 'bbox', 'keypoints', 'category_id'] + DENSEPOSE_KEYS) if (field in anno)}
segm = anno.get('segmentation', None)
if segm:
if (not isinstance(segm, dict)):
segm = [poly for poly in segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))]
if (len(segm) == 0):
num_instances_without_valid_segmentation += 1
continue
obj['segmentation'] = segm
keypts = anno.get('keypoints', None)
if keypts:
for (idx, v) in enumerate(keypts):
if ((idx % 3) != 2):
keypts[idx] = (v + 0.5)
obj['keypoints'] = keypts
obj['bbox_mode'] = BoxMode.XYWH_ABS
if id_map:
obj['category_id'] = id_map[obj['category_id']]
objs.append(obj)
record['annotations'] = objs
objs = []
for anno in asso_anno_dict_list:
assert (anno['image_id'] == image_id)
assert (anno.get('ignore', 0) == 0)
obj = {field: anno[field] for field in (['iscrowd', 'bbox', 'light', 'keypoints', 'category_id'] + DENSEPOSE_KEYS) if (field in anno)}
segm = anno.get('segmentation', None)
if segm:
if (not isinstance(segm, dict)):
segm = [poly for poly in segm if (((len(poly) % 2) == 0) and (len(poly) >= 6))]
if (len(segm) == 0):
num_instances_without_valid_segmentation += 1
continue
obj['segmentation'] = segm
keypts = anno.get('keypoints', None)
if keypts:
for (idx, v) in enumerate(keypts):
if ((idx % 3) != 2):
keypts[idx] = (v + 0.5)
obj['keypoints'] = keypts
obj['bbox_mode'] = BoxMode.XYWH_ABS
if id_map:
obj['category_id'] = id_map[obj['category_id']]
objs.append(obj)
record['association_anno'] = objs
dataset_dicts.append(record)
if (num_instances_without_valid_segmentation > 0):
logger.warn('Filtered out {} instances without valid segmentation. There might be issues in your dataset generation process.'.format(num_instances_without_valid_segmentation))
return dataset_dicts |
def MatchingPennies():
from sage.matrix.constructor import matrix
A = matrix([[1, (- 1)], [(- 1), 1]])
g = NormalFormGame([A])
g.rename(('Matching pennies - ' + repr(g)))
return g |
def GetAllImageIds():
page = 1
next = ('/api/v0/images/all?page=' + str(page))
ids = []
while True:
data = utils.RetrieveData(next)
ids.extend(data['results'])
if (data['next'] is None):
break
page += 1
next = ('/api/v0/images/all?page=' + str(page))
return ids |
def process_it_vit(paths, dataset_name, *args):
assert (dataset_name == 'it_vit')
convert_it_vit(paths, dataset_name) |
def _flatten_sparse_tensors(tensors):
flat_indices = _flatten_dense_tensors([t._indices() for t in tensors])
flat_values = _flatten_dense_tensors([t._values() for t in tensors])
return (flat_indices, flat_values) |
def run(task: Task, num_samples: int, num_simulations: int, num_observation: Optional[int]=None, observation: Optional[torch.Tensor]=None, num_rounds: int=10, neural_net: str='resnet', hidden_features: int=50, simulation_batch_size: int=1000, training_batch_size: int=10000, num_atoms: int=10, automatic_transforms_enabled: bool=True, mcmc_method: str='slice_np_vectorized', mcmc_parameters: Dict[(str, Any)]={'num_chains': 100, 'thin': 10, 'warmup_steps': 25, 'init_strategy': 'sir', 'init_strategy_parameters': {'num_candidate_samples': 10000}}, z_score_x: str='independent', z_score_theta: str='independent', variant: str='B', max_num_epochs: int=((2 ** 31) - 1)) -> Tuple[(torch.Tensor, int, Optional[torch.Tensor])]:
assert (not ((num_observation is None) and (observation is None)))
assert (not ((num_observation is not None) and (observation is not None)))
log = logging.getLogger(__name__)
if (num_rounds == 1):
log.info(f'Running NRE')
num_simulations_per_round = num_simulations
else:
log.info(f'Running SNRE')
num_simulations_per_round = math.floor((num_simulations / num_rounds))
if (simulation_batch_size > num_simulations_per_round):
simulation_batch_size = num_simulations_per_round
log.warn('Reduced simulation_batch_size to num_simulation_per_round')
if (training_batch_size > num_simulations_per_round):
training_batch_size = num_simulations_per_round
log.warn('Reduced training_batch_size to num_simulation_per_round')
prior = task.get_prior_dist()
if (observation is None):
observation = task.get_observation(num_observation)
simulator = task.get_simulator(max_calls=num_simulations)
transforms = task._get_transforms(automatic_transforms_enabled)['parameters']
if automatic_transforms_enabled:
prior = wrap_prior_dist(prior, transforms)
simulator = wrap_simulator_fn(simulator, transforms)
classifier = classifier_nn(model=neural_net.lower(), hidden_features=hidden_features, z_score_x=z_score_x, z_score_theta=z_score_theta)
if (variant == 'A'):
inference_class = inference.SNRE_A
training_kwargs = {}
elif (variant == 'B'):
inference_class = inference.SNRE_B
training_kwargs = {'num_atoms': num_atoms}
else:
raise NotImplementedError
inference_method = inference_class(classifier=classifier, prior=prior)
posteriors = []
proposal = prior
for r in range(num_rounds):
(theta, x) = inference.simulate_for_sbi(simulator, proposal, num_simulations=num_simulations_per_round, simulation_batch_size=simulation_batch_size)
ratio_estimator = inference_method.append_simulations(theta, x, from_round=r).train(training_batch_size=training_batch_size, retrain_from_scratch=False, discard_prior_samples=False, show_train_summary=True, max_num_epochs=max_num_epochs, **training_kwargs)
(potential_fn, theta_transform) = inference.ratio_estimator_based_potential(ratio_estimator, prior, observation, enable_transform=(not automatic_transforms_enabled))
posterior = inference.MCMCPosterior(potential_fn=potential_fn, proposal=prior, theta_transform=theta_transform, method=mcmc_method, **mcmc_parameters)
if (r > 1):
posterior.init_strategy = 'latest_sample'
posterior._mcmc_init_params = posteriors[(- 1)]._mcmc_init_params
proposal = posterior.set_default_x(observation)
posteriors.append(posterior)
posterior = wrap_posterior(posteriors[(- 1)], transforms)
assert (simulator.num_simulations == num_simulations)
samples = posterior.sample((num_samples,)).detach()
return (samples, simulator.num_simulations, None) |
def register_Ns3RngSeedManager_methods(root_module, cls):
cls.add_constructor([])
cls.add_constructor([param('ns3::RngSeedManager const &', 'arg0')])
cls.add_method('GetNextStreamIndex', 'uint64_t', [], is_static=True)
cls.add_method('GetRun', 'uint64_t', [], is_static=True)
cls.add_method('GetSeed', 'uint32_t', [], is_static=True)
cls.add_method('SetRun', 'void', [param('uint64_t', 'run')], is_static=True)
cls.add_method('SetSeed', 'void', [param('uint32_t', 'seed')], is_static=True)
return |
class ComputeTime():
def __call__(self, explanation, name):
return BenchmarkResult('compute time', name, value=(explanation.compute_time / explanation.shape[0])) |
class InceptionTest(tf.test.TestCase):
def testBuildLogits(self):
batch_size = 5
(height, width) = (299, 299)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
def testBuildEndPoints(self):
batch_size = 5
(height, width) = (299, 299)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(_, end_points) = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(('Logits' in end_points))
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertTrue(('AuxLogits' in end_points))
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(), [batch_size, num_classes])
pre_pool = end_points['PrePool']
self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 8, 8, 1536])
def testVariablesSetDevice(self):
batch_size = 5
(height, width) = (299, 299)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
(height, width) = (150, 150)
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, end_points) = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
pre_pool = end_points['PrePool']
self.assertListEqual(pre_pool.get_shape().as_list(), [batch_size, 3, 3, 1536])
def testUnknownBatchSize(self):
batch_size = 1
(height, width) = (299, 299)
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
(logits, _) = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.initialize_all_variables())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
(height, width) = (299, 299)
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = inception.inception_resnet_v2(eval_inputs, num_classes, is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
(height, width) = (150, 150)
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_resnet_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
(logits, _) = inception.inception_resnet_v2(eval_inputs, num_classes, is_training=False, reuse=True)
predictions = tf.argmax(logits, 1)
sess.run(tf.initialize_all_variables())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,)) |
def get_shape(val: object) -> typing.List[int]:
if val.isCompleteTensor():
r = val.type().sizes()
if (not r):
r = [1]
return r
elif (val.type().kind() in ('IntType', 'FloatType')):
return [1]
else:
raise ValueError() |
class RestPittSpec(DomainSpec):
name = 'rest_pitt'
greet = 'I am an expert about Pittsburgh restaurant.'
nlg_spec = {'loc': {'inform': ['I am at %s.', '%s.', "I'm interested in food at %s.", 'At %s.', 'In %s.'], 'request': ['Which city are you interested in?', 'Which place?']}, 'food_pref': {'inform': ['I like %s food.', '%s food.', '%s restaurant.', '%s.'], 'request': ['What kind of food do you like?', 'What type of restaurant?']}, 'open': {'inform': ['The restaurant is %s.', 'It is %s right now.'], 'request': ['Tell me if the restaurant is open.', "What's the hours?"], 'yn_question': {'open': ['Is the restaurant open?'], 'closed': ['Is it closed?']}}, 'parking': {'inform': ['The restaurant has %s.', 'This place has %s.'], 'request': ['What kind of parking does it have?.', 'How easy is it to park?'], 'yn_question': {'street parking': ['Does it have street parking?'], 'valet parking': ['Does it have valet parking?']}}, 'price': {'inform': ['The restaurant serves %s food.', 'The price is %s.'], 'request': ["What's the average price?", 'How expensive it is?'], 'yn_question': {'expensive': ['Is it expensive?'], 'moderate': ['Does it have moderate price?'], 'cheap': ['Is it cheap?']}}, 'default': {'inform': ['Restaurant %s is a good choice.'], 'request': ['I need a restaurant.', 'I am looking for a restaurant.', 'Recommend me a place to eat.']}}
usr_slots = [('loc', 'location city', ['Downtown', 'CMU', 'Forbes and Murray', 'Craig', 'Waterfront', 'Airport', 'U Pitt', 'Mellon Park', 'Lawrance', 'Monroveil', 'Shadyside', 'Squrill Hill']), ('food_pref', 'food preference', ['healthy', 'fried', 'panned', 'steamed', 'hot pot', 'grilled', 'salad', 'boiled', 'raw', 'stewed'])]
sys_slots = [('open', "if it's open now", ['open', 'going to start', 'going to close', 'closed']), ('price', 'average price per person', ['cheap', 'average', 'fancy']), ('parking', 'if it has parking', ['garage parking', 'street parking', 'no parking'])]
db_size = 150 |
def read_json(input_filename):
docs = []
blank = 0
unlabeled = 0
broken = 0
with open(input_filename, encoding='utf-8') as fin:
for (line_idx, line) in enumerate(fin):
doc = json.loads(line)
if (sorted(doc.keys()) == ['source']):
unlabeled += 1
continue
if ('source' not in doc):
blank += 1
continue
source = doc['source']
entities = None
for k in doc.keys():
if ((k == 'source') or k.endswith('metadata')):
continue
if ('annotations' not in doc[k]):
continue
annotations = doc[k]['annotations']
if ('entities' not in annotations):
continue
if ('entities' in annotations):
if (entities is not None):
raise ValueError(('Found a map with multiple annotations at line %d' % line_idx))
entities = annotations['entities']
if (entities is None):
unlabeled += 1
continue
is_broken = any((any(((x not in entity) for x in ('label', 'startOffset', 'endOffset'))) for entity in entities))
if is_broken:
broken += 1
if (broken == 1):
print('Found an entity which was missing either label, startOffset, or endOffset')
print(entities)
docs.append((source, entities))
print(('Found %d labeled lines. %d lines were blank, %d lines were broken, and %d lines were unlabeled' % (len(docs), blank, broken, unlabeled)))
return docs |
.environment
class cuTensor():
cmake_minimum_version = None
cmake_packages = ['CUDA']
cmake_variables = {}
cmake_includes = []
cmake_libraries = ['cutensor']
cmake_compile_flags = []
cmake_link_flags = ['-L -lcutensor']
cmake_files = []
headers = {'frame': ['../include/dace_cutensor.h'], 'cuda': ['../include/dace_cutensor.h']}
state_fields = ['dace::linalg::CuTensorHandle cutensor_handle;']
init_code = ''
finalize_code = ''
dependencies = []
def handle_setup_code(node):
location = node.location
if ((not location) or ('gpu' not in node.location)):
location = (- 1)
else:
try:
location = int(location['gpu'])
except ValueError:
raise ValueError('Invalid GPU identifier: {}'.format(location))
code = 'const int __dace_cuda_device = {location};\ncutensorHandle_t &__dace_cutensor_handle = __state->cutensor_handle.Get(__dace_cuda_device);\n// cutensorSetStream(__dace_cutensor_handle, __dace_current_stream);\n'
return code.format(location=location) |
def loadJson(dirname, epoch, rank):
filename = '/rollout_{0}_{1}.txt'.format(epoch, rank)
with open((dirname + filename), 'r') as file:
os = json.loads(file.read())
return os |
class Transition(nn.Module):
def __init__(self, in_planes, out_planes):
super(Transition, self).__init__()
self.bn = nn.BatchNorm2d(in_planes)
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False)
def forward(self, x):
out = self.conv(F.leaky_relu(self.bn(x)))
out = F.avg_pool2d(out, 2)
return out |
def _getEdgesIter(input_path, comparator):
logger.debug(('generate edges from: %s' % input_path))
edges = defaultdict(list)
if (not os.path.exists(input_path)):
return edges
if os.path.isfile(input_path):
base_filename = os.path.basename(input_path)
topic = re.sub('-.*$', '', base_filename)
edges[topic].extends(getEdgesFromFile(input_path, comparator))
if os.path.isdir(input_path):
for base_filename in os.listdir(input_path):
topic = re.sub('-.*$', '', base_filename)
input_file = os.path.join(input_path, base_filename)
edges[topic].extend(getEdgesFromFile(input_file, comparator))
return edges |
def newsample(nnn, ratio):
if (ratio > len(nnn)):
return random.sample((nnn * ((ratio // len(nnn)) + 1)), ratio)
else:
return random.sample(nnn, ratio) |
_quantizer(quantization_target=QuantizationTarget.Weights, quantization_method=[QuantizationMethod.POWER_OF_TWO, QuantizationMethod.SYMMETRIC], identifier=RoundingType.SoftQuantizer)
class SymmetricSoftRoundingGPTQ(BasePytorchGPTQTrainableQuantizer):
def __init__(self, quantization_config: TrainableQuantizerWeightsConfig, quantization_parameter_learning: bool=False):
super().__init__(quantization_config)
self.num_bits = quantization_config.weights_n_bits
self.per_channel = quantization_config.weights_per_channel_threshold
threshold_values = quantization_config.weights_quantization_params[THRESHOLD]
self.threshold_shape = np.asarray(threshold_values).shape
self.threshold_values = (np.reshape(np.asarray(threshold_values), [(- 1)]) if self.per_channel else float(threshold_values))
self.quantization_axis = quantization_config.weights_channels_axis
self.power_of_two = (quantization_config.weights_quantization_method == QuantizationMethod.POWER_OF_TWO)
self.quantization_parameter_learning = quantization_parameter_learning
self.gamma = SOFT_ROUNDING_GAMMA
self.zeta = SOFT_ROUNDING_ZETA
self.quantizer_parameters = {}
def initialize_quantization(self, tensor_shape: torch.Size, name: str, layer: PytorchQuantizationWrapper):
if self.per_channel:
threshold_tensor = to_torch_tensor(self.threshold_values)
else:
threshold_tensor = torch.tensor(self.threshold_values)
layer.register_parameter(f'{name}_{PTQ_THRESHOLD}', nn.Parameter(threshold_tensor, requires_grad=False))
w = layer.layer.weight
delta = qutils.calculate_delta(threshold_tensor.reshape(self.threshold_shape), self.num_bits, signed=True)
w_clipped_normed = torch.clip((w / delta), (- (2 ** (self.num_bits - 1))), ((2 ** (self.num_bits - 1)) - 1))
rest = (w_clipped_normed - torch.floor(w_clipped_normed))
alpha = (- torch.log((((self.zeta - self.gamma) / (rest - self.gamma)) - 1)))
layer.register_parameter(f'{name}_{AUXVAR}', nn.Parameter(alpha, requires_grad=True))
self.add_quantizer_variable(PTQ_THRESHOLD, layer.get_parameter(f'{name}_{PTQ_THRESHOLD}'), VariableGroup.QPARAMS)
self.add_quantizer_variable(AUXVAR, layer.get_parameter(f'{name}_{AUXVAR}'), VariableGroup.WEIGHTS)
if self.quantization_parameter_learning:
if self.per_channel:
layer.register_parameter(f'{name}_{SCALE_PTQ}', nn.Parameter(to_torch_tensor(torch.ones_like(torch.Tensor(self.threshold_values))), requires_grad=True))
else:
layer.register_parameter(f'{name}_{SCALE_PTQ}', nn.Parameter(to_torch_tensor(torch.tensor([1.0], requires_grad=True))))
self.add_quantizer_variable(SCALE_PTQ, layer.get_parameter(f'{name}_{SCALE_PTQ}'), VariableGroup.QPARAMS)
def get_soft_targets(self) -> torch.Tensor:
scaled_sigmoid = ((torch.sigmoid(self.get_quantizer_variable(AUXVAR)) * (self.zeta - self.gamma)) + self.gamma)
return torch.clip(scaled_sigmoid, min=0, max=1)
def get_quant_config(self) -> Dict[(str, np.ndarray)]:
old_threshold = torch_tensor_to_numpy(self.get_quantizer_variable(PTQ_THRESHOLD))
old_threshold = np.resize(old_threshold, self.threshold_shape)
if self.power_of_two:
old_threshold = max_power_of_two(old_threshold, MIN_THRESHOLD)
elif self.quantization_parameter_learning:
scale = torch.reshape(self.get_quantizer_variable(SCALE_PTQ), self.threshold_shape)
scale = torch.where((scale <= 0), torch.tensor(MIN_THRESHOLD, device=scale.device), scale)
old_threshold = (old_threshold * torch_tensor_to_numpy(scale))
old_threshold = old_threshold.reshape(self.threshold_shape)
return {THRESHOLD: old_threshold}
def __call__(self, inputs: nn.Parameter, training: bool) -> torch.Tensor:
auxvar = self.get_quantizer_variable(AUXVAR)
ptq_threshold_tensor = self.get_quantizer_variable(PTQ_THRESHOLD)
aux_var = self.get_soft_targets()
if (not training):
aux_var = (aux_var >= 0.5).to(auxvar.dtype)
if self.per_channel:
reshape_shape = get_threshold_reshape_shape(inputs.shape, quant_axis=self.quantization_axis, quant_axis_dim=(- 1))
ptq_threshold_tensor_hat = torch.reshape(ptq_threshold_tensor, reshape_shape)
q_tensor = soft_rounding_symmetric_quantizer(input_tensor=inputs, auxvar_tensor=aux_var, threshold_tensor=ptq_threshold_tensor_hat, num_bits=self.num_bits, signed=True, power_of_two=self.power_of_two)
if (self.quantization_parameter_learning and (not self.power_of_two)):
scale = torch.reshape(self.get_quantizer_variable(SCALE_PTQ), reshape_shape)
scale = torch.where((scale <= 0), torch.tensor(MIN_THRESHOLD, device=scale.device), scale)
q_tensor *= scale
else:
q_tensor = soft_rounding_symmetric_quantizer(input_tensor=inputs, auxvar_tensor=aux_var, threshold_tensor=ptq_threshold_tensor, num_bits=self.num_bits, signed=True, power_of_two=self.power_of_two)
if (self.quantization_parameter_learning and (not self.power_of_two)):
scale = self.get_quantizer_variable(SCALE_PTQ)
scale = torch.where((scale <= 0), torch.tensor(MIN_THRESHOLD, device=scale.device), scale)
q_tensor *= scale
return q_tensor |
class StepFunc(Protocol):
def __call__(self, *, model: rf.Module, extern_data: TensorDict) -> None:
... |
def open_segmentation_mask(segmentation_filename, dataset_name):
transformer = transforms.Compose([transforms.Resize((224, 224))])
mask = Image.open(segmentation_filename).convert('L')
mask = transformer(mask)
mask = (np.array(mask) / 255.0)
if (dataset_name == 'VOC'):
mask[(mask > 0)] = 1
return mask |
class ModelContextFusion(ModelTemplate):
def __init__(self, token_emb_mat, glove_emb_mat, tds, tel, hn, scope):
super(ModelContextFusion, self).__init__(token_emb_mat, glove_emb_mat, tds, tel, hn, scope)
self.update_tensor_add_ema_and_opt()
def build_network(self):
(tds, tel, hn) = (self.tds, self.tel, self.hn)
(bs, sn, sl, ql) = (self.bs, self.sn, self.sl, self.ql)
with tf.variable_scope('emb'):
token_emb_mat = generate_embedding_mat(tds, tel, init_mat=self.token_emb_mat, extra_mat=self.glove_emb_mat, scope='gene_token_emb_mat')
c_emb = tf.nn.embedding_lookup(token_emb_mat, self.context_token)
q_emb = tf.nn.embedding_lookup(token_emb_mat, self.question_token)
with tf.variable_scope('prepro'):
q_rep = multi_dimensional_attention(q_emb, self.question_token_mask, 'q2coding', cfg.dropout, self.is_train, cfg.wd, 'relu')
q_rep_map = bn_dense_layer(q_rep, hn, True, 0.0, 'q_rep_map', 'relu', False, cfg.wd, cfg.dropout, self.is_train)
with tf.variable_scope('sent_emb'):
c_emb_rshp = tf.reshape(c_emb, [(bs * sn), sl, tel], 'c_emb_rshp')
c_mask_rshp = tf.reshape(self.context_token_mask, [(bs * sn), sl], 'c_mask_rshp')
sent_enc_rshp = sentence_encoding_models(c_emb_rshp, c_mask_rshp, cfg.context_fusion_method, 'relu', 'sent2enc', cfg.wd, self.is_train, cfg.dropout, hn, block_len=cfg.block_len)
sent_enc = tf.reshape(sent_enc_rshp, [bs, sn, (2 * hn)])
sent_enc_map = bn_dense_layer(sent_enc, hn, True, 0.0, 'sent_enc_map', 'relu', False, cfg.wd, cfg.dropout, self.is_train)
with tf.variable_scope('fusion'):
q_rep_map_ex = tf.tile(tf.expand_dims(q_rep_map, 1), [1, sn, 1])
fusion_rep = tf.concat([sent_enc_map, q_rep_map_ex, (sent_enc_map - q_rep_map_ex), (sent_enc_map * q_rep_map_ex)], (- 1))
with tf.variable_scope('output'):
out_cf = context_fusion_layers(fusion_rep, self.context_sent_mask, cfg.context_fusion_method, 'relu', 'out_cf', cfg.wd, self.is_train, cfg.dropout, hn, block_len=4)
pre_output = bn_dense_layer(out_cf, hn, True, 0.0, 'pre_output', 'relu', False, cfg.wd, cfg.dropout, self.is_train)
logits = get_logits(pre_output, None, True, 0.0, 'logits', self.context_sent_mask, cfg.wd, cfg.dropout, self.is_train, 'linear')
return logits |
def register_types_ns3_Hash(module):
root_module = module.get_root()
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module) |
def test_channel(sol):
config.update({'Re': 8000.0, 'nu': (1.0 / 8000.0), 'dt': 0.001, 'T': 0.01, 'L': [2, (2 * pi), ((4 * pi) / 3.0)], 'M': [7, 5, 2], 'eps': 1e-07}, 'channel')
solver = get_solver(regression_test=regression_test, mesh='channel', parse_args=[sol])
context = solver.get_context()
initialize(solver, context)
set_Source(**context)
solve(solver, context)
config.params.dealias = '3/2-rule'
config.params.optimization = 'cython'
importlib.reload(solver)
initialize(solver, context)
solve(solver, context)
config.params.dealias_cheb = True
config.params.checkpoint = 5
config.params.write_result = 2
initialize(solver, context)
solve(solver, context) |
def convert_temperature(val, old_scale, new_scale):
if (old_scale.lower() in ['celsius', 'c']):
tempo = (_np.asanyarray(val) + zero_Celsius)
elif (old_scale.lower() in ['kelvin', 'k']):
tempo = _np.asanyarray(val)
elif (old_scale.lower() in ['fahrenheit', 'f']):
tempo = ((((_np.asanyarray(val) - 32.0) * 5.0) / 9.0) + zero_Celsius)
elif (old_scale.lower() in ['rankine', 'r']):
tempo = ((_np.asanyarray(val) * 5.0) / 9.0)
else:
raise NotImplementedError(('%s scale is unsupported: supported scales are Celsius, Kelvin, Fahrenheit and Rankine' % old_scale))
if (new_scale.lower() in ['celsius', 'c']):
res = (tempo - zero_Celsius)
elif (new_scale.lower() in ['kelvin', 'k']):
res = tempo
elif (new_scale.lower() in ['fahrenheit', 'f']):
res = ((((tempo - zero_Celsius) * 9.0) / 5.0) + 32.0)
elif (new_scale.lower() in ['rankine', 'r']):
res = ((tempo * 9.0) / 5.0)
else:
raise NotImplementedError(("'%s' scale is unsupported: supported scales are 'Celsius', 'Kelvin', 'Fahrenheit' and 'Rankine'" % new_scale))
return res |
(repr=False)
class Check():
name: str
value: Status
response: (GenericResponse | None)
elapsed: float
example: Case
message: (str | None) = None
context: (FailureContext | None) = None
request: (requests.PreparedRequest | None) = None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.