code stringlengths 101 5.91M |
|---|
def mlp_gaussian_policy(x, act_dim, hidden, layers):
net = nn(x, ([hidden] * (layers + 1)))
mu = tf.compat.v1.layers.dense(net, act_dim, activation=None)
log_std = tf.compat.v1.layers.dense(net, act_dim, activation=tf.tanh)
log_std = (LOG_STD_MIN + ((0.5 * (LOG_STD_MAX - LOG_STD_MIN)) * (log_std + 1)))
std = tf.exp(log_std)
pi = (mu + (tf.random.normal(tf.shape(input=mu)) * std))
logp_pi = gaussian_likelihood(pi, mu, log_std)
return (mu, pi, logp_pi) |
def run_training(model: nn.Module, optimizer: Optimizer, criterion: nn.Module, device: torch.device, train_loader: DataLoader, epochs: Tuple[(int, ...)], learning_rates: Tuple[(float, ...)], dev_loader: Optional[DataLoader]=None, test_loader: Optional[DataLoader]=None, batch_callback: Optional[Callable]=None, fp16: bool=False, run_dir: Optional[str]=None, checkpoint: str='last') -> Tuple[(nn.Module, AccuracySplits, TimeSplits)]:
assert (len(epochs) == len(learning_rates))
start_epoch = 1
global_step = 0
train_results_file: Optional[str] = None
dev_results_file: Optional[str] = None
test_results_file: Optional[str] = None
results_file: Optional[str] = None
if (run_dir is not None):
train_results_file = os.path.join(run_dir, 'train_results.csv')
if (dev_loader is not None):
dev_results_file = os.path.join(run_dir, 'dev_results.csv')
if (test_loader is not None):
test_results_file = os.path.join(run_dir, 'test_results.csv')
results_file = os.path.join(run_dir, 'results.csv')
best_accuracy = (- 1)
best_train_accuracy = (- 1)
best_dev_accuracy = (- 1)
best_test_accuracy = (- 1)
train_time = timedelta(0)
dev_time = timedelta(0)
test_time = timedelta(0)
for (nepochs, learning_rate) in zip(epochs, learning_rates):
end_epoch = (start_epoch + nepochs)
for group in optimizer.param_groups:
group['lr'] = learning_rate
_lr_optimizer = utils.get_learning_rate(optimizer)
if (_lr_optimizer is not None):
print(f'Learning rate set to {_lr_optimizer}')
assert (_lr_optimizer == learning_rate)
for epoch in range(start_epoch, end_epoch):
(global_step, train_accs, epoch_train_time) = run_epoch(epoch, global_step, model, train_loader, device, criterion=criterion, optimizer=optimizer, output_file=train_results_file, train=True, batch_callback=batch_callback, fp16=fp16)
if (results_file is not None):
save_summary(epoch, global_step, train_accs, epoch_train_time, results_file, 'train')
train_time += epoch_train_time
if (train_accs[0].avg > best_train_accuracy):
best_train_accuracy = train_accs[0].avg
if (dev_loader is not None):
(_, dev_accs, epoch_dev_time) = run_epoch(epoch, global_step, model, dev_loader, device, output_file=dev_results_file, train=False, fp16=fp16)
if (results_file is not None):
save_summary(epoch, global_step, dev_accs, epoch_dev_time, results_file, 'dev')
dev_time += epoch_dev_time
if (dev_accs[0].avg > best_dev_accuracy):
best_dev_accuracy = dev_accs[0].avg
if (test_loader is not None):
(_, test_accs, epoch_test_time) = run_epoch(epoch, global_step, model, test_loader, device, output_file=test_results_file, train=False, label='(Test): ', fp16=fp16)
if (results_file is not None):
save_summary(epoch, global_step, test_accs, epoch_test_time, results_file, 'test')
test_time += epoch_test_time
if (test_accs[0].avg > best_test_accuracy):
best_test_accuracy = test_accs[0].avg
current_accuracy = train_accs[0].avg
accuracy_mode = 'train'
if (dev_loader is not None):
current_accuracy = dev_accs[0].avg
accuracy_mode = 'dev'
is_best = (current_accuracy > best_accuracy)
if is_best:
print(f'New best model! ({current_accuracy:0.2f})')
best_accuracy = current_accuracy
if (run_dir is not None):
last_epoch = (epoch == (end_epoch - 1))
is_last_checkpoint = ((checkpoint == 'last') and last_epoch)
should_checkpoint = (is_best or is_last_checkpoint)
if (not should_checkpoint):
should_checkpoint |= (checkpoint == 'all')
if should_checkpoint:
state = {'epoch': epoch, 'accuracy': current_accuracy, 'mode': accuracy_mode, 'optimizer': optimizer.state_dict()}
if isinstance(model, nn.DataParallel):
state['model'] = model.module.state_dict()
else:
state['model'] = model.state_dict()
if (is_last_checkpoint or (checkpoint == 'all')):
checkpoint_path = os.path.join(run_dir, f'checkpoint_{epoch}.t7')
print(f'Saving checkpoint to {checkpoint_path}')
torch.save(state, checkpoint_path)
if is_best:
best_path = os.path.join(run_dir, 'checkpoint_best_model.t7')
torch.save(state, best_path)
start_epoch = end_epoch
accuracy_splits = AccuracySplits(best_train_accuracy, best_dev_accuracy, best_test_accuracy)
time_splits = TimeSplits(train_time, dev_time, test_time)
return (model, accuracy_splits, time_splits) |
def unpickle(file):
fp = open(file, 'rb')
if (sys.version_info.major == 2):
data = pickle.load(fp)
elif (sys.version_info.major == 3):
data = pickle.load(fp, encoding='latin-1')
fp.close()
return data |
def test_double_fault_ones_zeros(example_diversity_ones_zeros):
(y, y_pred_ones, y_pred_zeros) = example_diversity_ones_zeros
df = double_fault(y, y_pred_ones, y_pred_zeros)
assert (df == np.full((5,), 0)).all() |
def get_dataset(args):
(text_proc, raw_data) = get_vocab_and_sentences(args.dataset_file, args.max_sentence_len)
train_dataset = ANetDataset(args.feature_root, args.train_data_folder, args.slide_window_size, args.dur_file, args.kernel_list, text_proc, raw_data, args.pos_thresh, args.neg_thresh, args.stride_factor, args.dataset, save_samplelist=args.save_train_samplelist, load_samplelist=args.load_train_samplelist, sample_listpath=args.train_samplelist_path)
args.distributed = (args.world_size > 1)
if (args.distributed and args.cuda):
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size)
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), sampler=train_sampler, num_workers=args.num_workers, collate_fn=anet_collate_fn)
valid_dataset = ANetDataset(args.feature_root, args.val_data_folder, args.slide_window_size, args.dur_file, args.kernel_list, text_proc, raw_data, args.pos_thresh, args.neg_thresh, args.stride_factor, args.dataset, save_samplelist=args.save_valid_samplelist, load_samplelist=args.load_valid_samplelist, sample_listpath=args.valid_samplelist_path)
valid_loader = DataLoader(valid_dataset, batch_size=args.valid_batch_size, shuffle=False, num_workers=args.num_workers, collate_fn=anet_collate_fn)
return (train_loader, valid_loader, text_proc, train_sampler) |
class _ConstantPadNd(Module):
__constants__ = ['padding', 'value']
value: float
padding: Sequence[int]
def __init__(self, value: float) -> None:
super(_ConstantPadNd, self).__init__()
self.value = value
def forward(self, input: Tensor) -> Tensor:
return F.pad(input, self.padding, 'constant', self.value)
def extra_repr(self) -> str:
return 'padding={}, value={}'.format(self.padding, self.value) |
def run(keyword, title_matching=False):
per_search = 100
init_results = search(keyword, per_search, offset=0)
total = init_results['total']
total_search = (total // per_search)
insert_search_log(keyword, total)
output_dir = f'{dw_path}/{keyword}'
make_dir(output_dir)
keyword_id = get_keyword_id(keyword)
print(f'{total} models found')
for i in range((total_search + 1)):
results = search(keyword, per_search, offset=(i * per_search))
for item in tqdm(results['entries']):
try:
id = item['id']
name = filter_escape_char(item['title'])
if is_model(id):
continue
if (title_matching and (keyword not in item['title'].lower())):
continue
zip_file = download(output_dir, item)
if (not zip_file):
continue
unzipped_dir = unzip_file(zip_file)
files = filter_files(unzipped_dir)
for file in files:
moved_file = move_file(join(unzipped_dir, file), output_dir)
obj_file = convert_to_obj(moved_file)
image = item['binaries']['bot_lt']['contentUrl']
insert_dw_file(id, name, image, obj_file, keyword_id)
shutil.rmtree(unzipped_dir)
except Exception as e:
logging.error(f'[{keyword}]:{e}')
clean_dir(output_dir)
create_image(output_dir) |
_torch
class ScheduleInitTest(unittest.TestCase):
m = (nn.Linear(50, 50) if is_torch_available() else None)
optimizer = (AdamW(m.parameters(), lr=10.0) if is_torch_available() else None)
num_steps = 10
def assertListAlmostEqual(self, list1, list2, tol, msg=None):
self.assertEqual(len(list1), len(list2))
for (a, b) in zip(list1, list2):
self.assertAlmostEqual(a, b, delta=tol, msg=msg)
def test_schedulers(self):
common_kwargs = {'num_warmup_steps': 2, 'num_training_steps': 10}
scheds = {get_constant_schedule: ({}, ([10.0] * self.num_steps)), get_constant_schedule_with_warmup: ({'num_warmup_steps': 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0]), get_linear_schedule_with_warmup: ({**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25]), get_cosine_schedule_with_warmup: ({**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38]), get_cosine_with_hard_restarts_schedule_with_warmup: ({**common_kwargs, 'num_cycles': 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46]), get_polynomial_decay_schedule_with_warmup: ({**common_kwargs, 'power': 2.0, 'lr_end': 1e-07}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156]), get_inverse_sqrt_schedule: ({'num_warmup_steps': 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714])}
for (scheduler_func, data) in scheds.items():
(kwargs, expected_learning_rates) = data
scheduler = scheduler_func(self.optimizer, **kwargs)
self.assertEqual(len([scheduler.get_lr()[0]]), 1)
lrs_1 = unwrap_schedule(scheduler, self.num_steps)
self.assertListAlmostEqual(lrs_1, expected_learning_rates, tol=0.01, msg=f'failed for {scheduler_func} in normal scheduler')
scheduler = scheduler_func(self.optimizer, **kwargs)
if (scheduler_func.__name__ != 'get_constant_schedule'):
LambdaScheduleWrapper.wrap_scheduler(scheduler)
lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps)
self.assertListEqual(lrs_1, lrs_2, msg=f'failed for {scheduler_func} in save and reload') |
def rel_positions_grid(grid_sizes):
tensors = []
for size in grid_sizes:
tensors.append(torch.linspace((- 1), 1, steps=size))
relpos_grid = torch.stack(torch.meshgrid(*tensors), dim=(- 0))
return relpos_grid |
class ParasolJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, parasol.experiment.Experiment):
return obj.to_dict()
if isinstance(obj, deepx.core.Node):
o = {'__bytes__': base64.b64encode(pickle.dumps(obj)).decode('ascii'), 'readable': str(obj)}
return o
return super(ParasolJSONEncoder, self).default(obj) |
class Aggregator(AggregatorBase):
def __init__(self, storage, server, modelservice, control):
super().__init__(storage, server, modelservice, control)
self.name = 'fedavg'
def combine_models(self, helper=None, time_window=180, max_nr_models=100, delete_models=True):
data = {}
data['time_model_load'] = 0.0
data['time_model_aggregation'] = 0.0
model = None
nr_aggregated_models = 0
total_examples = 0
logger.info('AGGREGATOR({}): Aggregating model updates... '.format(self.name))
while (not self.model_updates.empty()):
try:
(model_next, metadata, model_id) = self.next_model_update(helper)
logger.info('AGGREGATOR({}): Processing model update {}, metadata: {} '.format(self.name, model_id, metadata))
total_examples += metadata['num_examples']
if (nr_aggregated_models == 0):
model = model_next
else:
model = helper.increment_average(model, model_next, metadata['num_examples'], total_examples)
nr_aggregated_models += 1
if delete_models:
self.modelservice.models.delete(model_id)
logger.info('AGGREGATOR({}): Deleted model update {} from storage.'.format(self.name, model_id))
self.model_updates.task_done()
except Exception as e:
logger.error('AGGREGATOR({}): Error encoutered while processing model update {}, skipping this update.'.format(self.name, e))
self.model_updates.task_done()
data['nr_aggregated_models'] = nr_aggregated_models
logger.info('AGGREGATOR({}): Aggregation completed, aggregated {} models.'.format(self.name, nr_aggregated_models))
return (model, data) |
def score_function(config, base_args, orig_dir=''):
os.chdir(orig_dir)
kwargs = copy.deepcopy(base_args)
kwargs.update(config)
pl.utilities.seed.seed_everything(kwargs.get('seed'))
dataset_name = kwargs['dataset_name']
data_dir = (Path('data/spec_datasets') / dataset_name)
labels = (data_dir / kwargs['dataset_labels'])
split_file = ((data_dir / 'splits') / kwargs['split_name'])
df = pd.read_csv(labels, sep='\t')
spec_names = df['spec'].values
(train_inds, val_inds, test_inds) = common.get_splits(spec_names, split_file)
train_df = df.iloc[train_inds]
val_df = df.iloc[val_inds]
num_bins = kwargs.get('num_bins')
num_workers = kwargs.get('num_workers', 0)
upper_limit = kwargs.get('upper_limit', 1500)
train_dataset = massformer_data.BinnedDataset(train_df, data_dir=data_dir, num_bins=num_bins, upper_limit=upper_limit, form_dir_name=kwargs['form_dir_name'])
val_dataset = massformer_data.BinnedDataset(val_df, data_dir=data_dir, num_bins=num_bins, upper_limit=upper_limit, form_dir_name=kwargs['form_dir_name'])
collate_fn = train_dataset.get_collate_fn()
train_loader = DataLoader(train_dataset, num_workers=kwargs['num_workers'], collate_fn=collate_fn, shuffle=True, batch_size=kwargs['batch_size'])
val_loader = DataLoader(val_dataset, num_workers=kwargs['num_workers'], collate_fn=collate_fn, shuffle=False, batch_size=kwargs['batch_size'])
logging.info('Building model')
model = massformer_model.MassFormer(mf_num_ff_num_layers=kwargs['mf_num_ff_num_layers'], mf_ff_h_dim=kwargs['mf_ff_h_dim'], mf_ff_skip=kwargs['mf_ff_skip'], mf_layer_type=kwargs['mf_layer_type'], mf_dropout=kwargs['mf_dropout'], use_reverse=kwargs['use_reverse'], embed_adduct=kwargs['embed_adduct'], gf_model_name=kwargs['gf_model_name'], gf_pretrain_name=kwargs['gf_pretrain_name'], gf_fix_num_pt_layers=kwargs['gf_fix_num_pt_layers'], gf_reinit_num_pt_layers=kwargs['gf_reinit_num_pt_layers'], gf_reinit_layernorm=kwargs['gf_reinit_layernorm'], learning_rate=kwargs['learning_rate'], lr_decay_rate=kwargs['lr_decay_rate'], output_dim=num_bins, upper_limit=upper_limit, weight_decay=kwargs['weight_decay'], loss_fn=kwargs['loss_fn'])
tb_logger = pl_loggers.TensorBoardLogger(tune.get_trial_dir(), '', '.')
tune_callback = nn_utils.TuneReportCallback(['val_loss'])
val_check_interval = None
check_val_every_n_epoch = 1
monitor = 'val_loss'
earlystop_callback = EarlyStopping(monitor=monitor, patience=10)
callbacks = [earlystop_callback, tune_callback]
logging.info('Starting train')
trainer = pl.Trainer(logger=[tb_logger], accelerator=('gpu' if kwargs['gpu'] else None), devices=(1 if kwargs['gpu'] else None), callbacks=callbacks, gradient_clip_val=5, max_epochs=kwargs['max_epochs'], gradient_clip_algorithm='value', val_check_interval=val_check_interval, check_val_every_n_epoch=check_val_every_n_epoch, enable_checkpointing=False)
trainer.fit(model, train_loader, val_loader) |
class LossWrapper(torch.nn.Module):
def __init__(self, threshold, DTU_filter=False):
super().__init__()
gpu = torch.device('cuda')
self.threshold = threshold
self.loss = torch.nn.BCELoss()
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=1, padding=1).to(gpu)
self.maxpool_L = torch.nn.MaxPool2d(kernel_size=9, stride=1, padding=4).to(gpu)
self.DTU_filter = DTU_filter
def open(self, data):
return self.maxpool((- self.maxpool((- data))))
def close_L(self, data):
return (- self.maxpool_L((- self.maxpool_L(data))))
def preprocess(self, data):
return self.open(data)
def forward(self, input, color, depth_gt):
depth_estimate = input[0]
trust_estimate = input[1]
trust_gt = (depth_estimate - depth_gt).abs().lt(self.threshold).float()
trust_gt = self.preprocess(trust_gt)
ignore_mask = (((trust_estimate > 0.95) * (trust_gt == 1)) + ((trust_estimate < 0.05) * (trust_gt == 0)))
if self.DTU_filter:
gray_scale = color.mean(dim=1, keepdim=True)
gt_well_defined = ((((gray_scale <= 250) * (gray_scale >= 24)) + (depth_gt > 0)) > 0).float()
gt_well_defined = self.close_L(gt_well_defined)
gt_unknown = (gt_well_defined * (depth_gt <= 0).float())
gt_well_defined = (gt_well_defined - gt_unknown)
ignore_mask = ((ignore_mask + (gt_unknown > 0)) > 0)
trust_gt = (trust_gt * gt_well_defined)
ignore_mask = ignore_mask.float()
trust_gt = (trust_gt * (1 - ignore_mask))
trust_estimate = (trust_estimate * (1 - ignore_mask))
return self.loss(trust_estimate, trust_gt) |
class FlaxRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject):
_backends = ['flax']
def __init__(self, *args, **kwargs):
requires_backends(self, ['flax']) |
class Options():
def __init__(self):
self.args = []
self.kvs = {}
self.tag_str = None
def set(self, *args, **kwargs):
for a in args:
self.args.append(a)
for (k, v) in kwargs.items():
self.kvs[k] = v
return self
def remove(self, *args):
for a in args:
if (a in self.args):
self.args.remove(a)
if (a in self.kvs):
del self.kvs[a]
return self
def update(self, opt):
self.args += opt.args
self.kvs.update(opt.kvs)
return self
def __str__(self):
final = ' '.join(self.args)
for (k, v) in self.kvs.items():
final += ' --{} {}'.format(k, v)
return final
def clone(self):
opt = Options()
opt.args = self.args.copy()
opt.kvs = self.kvs.copy()
opt.tag_str = self.tag_str
return opt
def specify(self, *args, **kwargs):
return self.clone().set(*args, **kwargs)
def tag(self, tag):
self.tag_str = tag
return self |
class Inception(nn.Module):
def __init__(self, in_planes, n1x1, n3x3red, n3x3, n5x5red, n5x5, pool_planes):
super(Inception, self).__init__()
self.b1 = nn.Sequential(nn.Conv2d(in_planes, n1x1, kernel_size=1), nn.BatchNorm2d(n1x1), nn.LeakyReLU(True))
self.b2 = nn.Sequential(nn.Conv2d(in_planes, n3x3red, kernel_size=1), nn.BatchNorm2d(n3x3red), nn.LeakyReLU(True), nn.Conv2d(n3x3red, n3x3, kernel_size=3, padding=1), nn.BatchNorm2d(n3x3), nn.LeakyReLU(True))
self.b3 = nn.Sequential(nn.Conv2d(in_planes, n5x5red, kernel_size=1), nn.BatchNorm2d(n5x5red), nn.LeakyReLU(True), nn.Conv2d(n5x5red, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.LeakyReLU(True), nn.Conv2d(n5x5, n5x5, kernel_size=3, padding=1), nn.BatchNorm2d(n5x5), nn.LeakyReLU(True))
self.b4 = nn.Sequential(nn.MaxPool2d(3, stride=1, padding=1), nn.Conv2d(in_planes, pool_planes, kernel_size=1), nn.BatchNorm2d(pool_planes), nn.LeakyReLU(True))
def forward(self, x):
y1 = self.b1(x)
y2 = self.b2(x)
y3 = self.b3(x)
y4 = self.b4(x)
return torch.cat([y1, y2, y3, y4], 1) |
class S2TTransformerEncoder(FairseqEncoder):
def __init__(self, args):
super().__init__(None)
self.encoder_freezing_updates = args.encoder_freezing_updates
self.num_updates = 0
self.dropout_module = FairseqDropout(p=args.dropout, module_name=self.__class__.__name__)
self.embed_scale = math.sqrt(args.encoder_embed_dim)
if args.no_scale_embedding:
self.embed_scale = 1.0
self.padding_idx = 1
self.subsample = Conv1dSubsampler((args.input_feat_per_channel * args.input_channels), args.conv_channels, args.encoder_embed_dim, [int(k) for k in args.conv_kernel_sizes.split(',')])
self.embed_positions = PositionalEmbedding(args.max_source_positions, args.encoder_embed_dim, self.padding_idx)
self.transformer_layers = nn.ModuleList([TransformerEncoderLayer(args) for _ in range(args.encoder_layers)])
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(args.encoder_embed_dim)
else:
self.layer_norm = None
def _forward(self, src_tokens, src_lengths, return_all_hiddens=False):
(x, input_lengths) = self.subsample(src_tokens, src_lengths)
x = (self.embed_scale * x)
encoder_padding_mask = lengths_to_padding_mask(input_lengths)
positions = self.embed_positions(encoder_padding_mask).transpose(0, 1)
x += positions
x = self.dropout_module(x)
encoder_states = []
for layer in self.transformer_layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
encoder_states.append(x)
if (self.layer_norm is not None):
x = self.layer_norm(x)
return {'encoder_out': [x], 'encoder_padding_mask': ([encoder_padding_mask] if encoder_padding_mask.any() else []), 'encoder_embedding': [], 'encoder_states': encoder_states, 'src_tokens': [], 'src_lengths': []}
def forward(self, src_tokens, src_lengths, return_all_hiddens=False):
if (self.num_updates < self.encoder_freezing_updates):
with torch.no_grad():
x = self._forward(src_tokens, src_lengths, return_all_hiddens=return_all_hiddens)
else:
x = self._forward(src_tokens, src_lengths, return_all_hiddens=return_all_hiddens)
return x
def reorder_encoder_out(self, encoder_out, new_order):
new_encoder_out = ([] if (len(encoder_out['encoder_out']) == 0) else [x.index_select(1, new_order) for x in encoder_out['encoder_out']])
new_encoder_padding_mask = ([] if (len(encoder_out['encoder_padding_mask']) == 0) else [x.index_select(0, new_order) for x in encoder_out['encoder_padding_mask']])
new_encoder_embedding = ([] if (len(encoder_out['encoder_embedding']) == 0) else [x.index_select(0, new_order) for x in encoder_out['encoder_embedding']])
encoder_states = encoder_out['encoder_states']
if (len(encoder_states) > 0):
for (idx, state) in enumerate(encoder_states):
encoder_states[idx] = state.index_select(1, new_order)
return {'encoder_out': new_encoder_out, 'encoder_padding_mask': new_encoder_padding_mask, 'encoder_embedding': new_encoder_embedding, 'encoder_states': encoder_states, 'src_tokens': [], 'src_lengths': []}
def set_num_updates(self, num_updates):
super().set_num_updates(num_updates)
self.num_updates = num_updates |
class Server(object):
def __init__(self, args, model, device, confidence_estimators, estimator_filenames, ned_model):
self.args = args
self.device = device
self.numericalizer = model.numericalizer
self.model = model
self.confidence_estimators = confidence_estimators
self.estimator_filenames = estimator_filenames
self.ned_model = ned_model
self._cached_task_names = dict()
def numericalize_examples(self, ex):
all_features = NumericalizedExamples.from_examples(ex, self.numericalizer)
return NumericalizedExamples.collate_batches(all_features, self.numericalizer, device=self.device)
def _init_request(self, request):
args = copy.deepcopy(self.args)
generation_options = request.get('options', {})
for (k, v) in generation_options.items():
if (k not in GENERATION_ARGUMENTS):
logger.warning(f'{k} is not a generation option and cannot be overridden')
continue
setattr(args, k, v)
if (hasattr(args, 'src_locale') and hasattr(args, 'tgt_locale')):
(src_locale, tgt_locale) = adjust_language_code(self.model.config, self.args.pretrained_model, args.src_locale, args.tgt_locale)
self.numericalizer.update_language_dependent_properties(src_locale, tgt_locale)
self.model.update_language_dependent_configs(tgt_locale)
task_name = (request['task'] if ('task' in request) else 'generic')
task = list(get_tasks([task_name], args, self._cached_task_names).values())[0]
if (task_name not in self._cached_task_names):
self._cached_task_names[task_name] = task
return (task, args)
def _numericalize_request(self, request, task, args):
if ('instances' not in request):
request['instances'] = [{'example_id': request.get('example_id', ''), 'context': request['context'], 'question': request['question'], 'answer': request.get('answer', '')}]
examples = []
for instance in request['instances']:
(example_id, context, question, answer) = (instance.get('example_id', ''), instance['context'], instance['question'], instance.get('answer', ''))
if (not context):
context = task.default_context
if (not question):
question = task.default_question
ex = Example.from_raw(str(example_id), context, question, answer, preprocess=task.preprocess_field, lower=args.lower)
examples.append(ex)
if self.ned_model:
self.ned_model.process_examples(examples, None, task.utterance_field)
self.model.add_new_vocab_from_data([task])
self.model.set_generation_output_options([task])
return self.numericalize_examples(examples)
def _predict_batch(self, batch, task, args):
if (args.calibrator_paths is not None):
output = self.model.validate([batch], task, output_predictions_only=True, confidence_estimators=self.confidence_estimators)
response = []
if (sum(args.num_outputs) > 1):
for (idx, predictions) in enumerate(output.predictions):
candidates = []
for cand in predictions:
candidate = {'answer': cand, 'score': {}}
for (e_idx, estimator_scores) in enumerate(output.confidence_scores):
candidate['score'][self.estimator_filenames[e_idx]] = float(estimator_scores[idx])
candidates.append(candidate)
response.append({'candidates': candidates})
else:
for (idx, p) in enumerate(output.predictions):
instance = {'answer': p[0], 'score': {}}
for (e_idx, estimator_scores) in enumerate(output.confidence_scores):
instance['score'][self.estimator_filenames[e_idx]] = float(estimator_scores[idx])
response.append(instance)
else:
output = self.model.validate([batch], task, output_predictions_only=True)
if (sum(args.num_outputs) > 1):
response = []
for (idx, predictions) in enumerate(output.predictions):
candidates = []
for cand in predictions:
candidates.append({'answer': cand})
response.append({'candidates': candidates})
else:
response = [{'answer': p[0]} for p in output.predictions]
return response
def handle_request(self, request):
try:
with torch.no_grad():
(task, args) = self._init_request(request)
batch = self._numericalize_request(request, task, args)
response = self._predict_batch(batch, task, args)
except RuntimeError as e:
if ('CUDA error' in str(e)):
exit(100)
else:
raise e
return response
def handle_json_request(self, line: str) -> str:
request = json.loads(line)
if ('instances' in request):
return (json.dumps({'id': request['id'], 'instances': self.handle_request(request)}) + '\n')
else:
response = self.handle_request(request)
assert (len(response) == 1)
response = response[0]
response['id'] = request['id']
return (json.dumps(response, ensure_ascii=False) + '\n')
async def handle_client(self, client_reader, client_writer):
try:
line = (await client_reader.readline())
while line:
client_writer.write(self.handle_json_request(line).encode('utf-8'))
line = (await client_reader.readline())
except IOError:
logger.info('Connection to client_reader closed')
try:
client_writer.close()
except IOError:
pass
def _run_tcp(self):
loop = asyncio.get_event_loop()
server = loop.run_until_complete(asyncio.start_server(self.handle_client, port=self.args.port))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
def _run_stdin(self):
try:
while True:
line = sys.stdin.readline()
if (not line):
break
sys.stdout.write(self.handle_json_request(line))
sys.stdout.flush()
except KeyboardInterrupt:
pass
def run(self):
log_model_size(logger, self.model, self.args.model)
self.model.to(self.device)
self.model.eval()
if self.args.stdin:
self._run_stdin()
else:
self._run_tcp() |
class LearningSwitch(object):
def __init__(self, connection, transparent):
self.connection = connection
self.transparent = transparent
self.macToPort = {}
connection.addListeners(self)
self.hold_down_expired = (_flood_delay == 0)
log.debug('Initializing LearningSwitch, transparent=%s', str(self.transparent))
def _handle_PacketIn(self, event):
packet = event.parsed
def flood(message=None):
msg = of.ofp_packet_out()
if ((time.time() - self.connection.connect_time) >= _flood_delay):
if (self.hold_down_expired is False):
self.hold_down_expired = True
log.info('%s: Flood hold_down expired -- flooding', dpid_to_str(event.dpid))
if (message is not None):
log.debug(message)
log.debug(('%i: flood %s -> %s' % (event.dpid, packet.src, packet.dst)))
action = of.ofp_action_output(port=of.OFPP_FLOOD)
msg.actions.append(action)
else:
log.info(('Holding down flood for %s' % dpid_to_str(event.dpid)))
pass
msg.data = event.ofp
msg.in_port = event.port
self.connection.send(msg)
def drop(duration=None):
if (duration is not None):
if (not isinstance(duration, tuple)):
duration = (duration, duration)
msg = of.ofp_flow_mod()
msg.match = of.ofp_match.from_packet(packet)
msg.idle_timeout = duration[0]
msg.hard_timeout = duration[1]
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
elif (event.ofp.buffer_id is not None):
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
self.connection.send(msg)
self.macToPort[packet.src] = event.port
if (not self.transparent):
if ((packet.type == packet.LLDP_TYPE) or packet.dst.isBridgeFiltered()):
drop()
return
if packet.dst.is_multicast:
flood()
elif (packet.dst not in self.macToPort):
flood(('Port from %s unknown -- flooding' % packet.dst))
else:
port = self.macToPort[packet.dst]
if (port == event.port):
log.warning(('Same port for packet from %s -> %s on %s.%s. Drop.' % (packet.src, packet.dst, dpid_to_str(event.dpid), port)))
drop(10)
return
log.debug(('installing flow for %s.%i -> %s.%i' % (packet.src, event.port, packet.dst, port)))
msg = of.ofp_flow_mod()
msg.match = of.ofp_match.from_packet(packet, event.port)
msg.idle_timeout = 10
msg.hard_timeout = 30
action = of.ofp_action_output(port=port)
msg.actions.append(action)
msg.data = event.ofp
self.connection.send(msg) |
def __create_source_from_ast(module_body: ast.stmt) -> str:
return ast.unparse(ast.fix_missing_locations(ast.Module(body=[module_body], type_ignores=[]))) |
def keypoint_losses(kps_pred, keypoint_locations_int32, keypoint_weights, keypoint_loss_normalizer=None):
device_id = kps_pred.get_device()
kps_target = Variable(torch.from_numpy(keypoint_locations_int32.astype('int64'))).cuda(device_id)
keypoint_weights = Variable(torch.from_numpy(keypoint_weights)).cuda(device_id)
loss = F.cross_entropy(kps_pred.view((- 1), (cfg.KRCNN.HEATMAP_SIZE ** 2)), kps_target, reduce=False)
loss = (torch.sum((loss * keypoint_weights)) / torch.sum(keypoint_weights))
loss *= cfg.KRCNN.LOSS_WEIGHT
if (not cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS):
loss *= keypoint_loss_normalizer.item()
return loss |
class AutoContrast(BaseAugmentation):
def _augment(self, img):
return ImageOps.autocontrast(img) |
def seconds_to_tokens(sec, sr, prior, chunk_size):
tokens = ((sec * hps.sr) // prior.raw_to_tokens)
tokens = (((tokens // chunk_size) + 1) * chunk_size)
assert (tokens <= prior.n_ctx), 'Choose a shorter generation length to stay within the top prior context'
return tokens |
def main():
vis_dir = sys.argv[(- 1)]
print('visualizing {}'.format(vis_dir))
vis_dir = sys.argv[(- 1)]
obj_files = glob(os.path.join(vis_dir, '*/*.obj'))
obj_files.sort()
for obj_file in obj_files:
print(obj_file)
for (camera_id, camera) in enumerate(cameras):
(obj_dir, obj_fn) = os.path.split(obj_file)
prefix = (obj_fn.split('.')[0] + '_{:0>4}_'.format(camera_id))
output_path = os.path.join(obj_dir, prefix)
initialize(camera, output_path)
obj = load_obj(obj_file)
bpy.ops.render.render(animation=True) |
class JBluesDiluteBlackBody(ProcessingPlasmaProperty):
outputs = ('j_blues',)
latex_name = 'J_{\\textrm{blue}}'
def calculate(lines, nu, t_rad, w):
j_blues = (w * intensity_black_body(nu.values[np.newaxis].T, t_rad))
j_blues = pd.DataFrame(j_blues, index=lines.index, columns=np.arange(len(t_rad)))
return j_blues |
(resources={'machine': 1})
def ray_allgather(args_dict, notification_address, world_size, world_rank, object_size):
object_id = ray.ObjectID(str((args_dict['seed'] + world_rank)).encode().rjust(20, b'\x00'))
array = np.random.rand((object_size // 4)).astype(np.float32)
ray.worker.global_worker.put_object(array, object_id=object_id)
barrier(notification_address, notification_port, world_size)
object_ids = []
for i in range(0, world_size):
object_ids.append(ray.ObjectID(str((args_dict['seed'] + i)).encode().rjust(20, b'\x00')))
start = time.time()
gather_result = []
for object_id in object_ids:
ray.get(object_id)
duration = (time.time() - start)
hash_sum = 0
print('Allgather completed, hash =', hash_sum, 'duration =', duration)
ray.internal.free(object_ids) |
_module()
class AOTBlockNeck(nn.Module):
def __init__(self, in_channels=256, dilation_rates=(1, 2, 4, 8), num_aotblock=8, act_cfg=dict(type='ReLU'), **kwargs):
super().__init__()
self.dilation_rates = list(dilation_rates)
self.model = nn.Sequential(*[AOTBlock(in_channels=in_channels, dilation_rates=self.dilation_rates, act_cfg=act_cfg) for _ in range(0, num_aotblock)])
def forward(self, x):
x = self.model(x)
return x |
def _get_sumo_net(cfg_file):
cfg_file = os.path.join(os.getcwd(), cfg_file)
tree = ET.parse(cfg_file)
tag = tree.find('//net-file')
if (tag is None):
return None
net_file = os.path.join(os.path.dirname(cfg_file), tag.get('value'))
logging.debug('Reading net file: %s', net_file)
sumo_net = traci.sumolib.net.readNet(net_file)
return sumo_net |
def pretty_time(orig_seconds):
(days, seconds) = divmod(round(orig_seconds), 86400)
(hours, seconds) = divmod(seconds, 3600)
(minutes, seconds) = divmod(seconds, 60)
out = []
if (days > 0):
out.append(f'{days}d')
if (hours > 0):
out.append(f'{hours}h')
if (minutes > 0):
out.append(f'{minutes}m')
if (seconds > 0):
out.append(f'{seconds}s')
else:
if out:
s = ''
elif (orig_seconds == 0):
s = '0s'
else:
s = '<0s'
out.append(s)
return ''.join(out) |
def wrap_fp16_model(model):
if ((TORCH_VERSION == 'parrots') or (digit_version(TORCH_VERSION) < digit_version('1.6.0'))):
model.half()
patch_norm_fp32(model)
for m in model.modules():
if hasattr(m, 'fp16_enabled'):
m.fp16_enabled = True |
def kldiv(x, xp, k=3, base=2):
assert (k <= (len(x) - 1)), 'Set k smaller than num. samples - 1'
assert (k <= (len(xp) - 1)), 'Set k smaller than num. samples - 1'
assert (len(x[0]) == len(xp[0])), 'Two distributions must have same dim.'
(x, xp) = to_np_array(x, xp)
d = len(x[0])
n = len(x)
m = len(xp)
const = (log(m) - log((n - 1)))
tree = ss.cKDTree(x)
treep = ss.cKDTree(xp)
nn = [tree.query(point, (k + 1), p=float('inf'))[0][k] for point in x]
nnp = [treep.query(point, k, p=float('inf'))[0][(k - 1)] for point in x]
return (((const + (d * np.mean(np.log(nnp)))) - (d * np.mean(np.log(nn)))) / log(base)) |
.parametrize('knn_methods', knn_methods)
def test_lca(knn_methods):
(pool_classifiers, X_dsel, y_dsel, X_test, y_test) = setup_classifiers()
lca = LCA(pool_classifiers, knn_classifier=knn_methods)
lca.fit(X_dsel, y_dsel)
assert np.isclose(lca.score(X_test, y_test), 0.) |
def DistributedFairseqModel(args, model):
assert isinstance(model, nn.Module)
if (args.ddp_backend == 'c10d'):
ddp_class = nn.parallel.DistributedDataParallel
init_kwargs = dict(module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=False, bucket_cap_mb=args.bucket_cap_mb)
if ('check_reduction' in inspect.getargspec(ddp_class)[0]):
init_kwargs['check_reduction'] = True
if ('find_unused_parameters' in inspect.getargspec(ddp_class)[0]):
init_kwargs['find_unused_parameters'] = args.find_unused_parameters
elif (args.ddp_backend == 'no_c10d'):
ddp_class = LegacyDistributedDataParallel
init_kwargs = dict(module=model, world_size=args.distributed_world_size, buffer_size=(2 ** 28))
else:
raise ValueError(('Unknown --ddp-backend: ' + args.ddp_backend))
class _DistributedFairseqModel(ddp_class):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
wrapped_module = super().__getattr__('module')
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
return _DistributedFairseqModel(**init_kwargs) |
def process_line(line, data_folder, language, accented_letters):
mp3_path = ((data_folder + '/clips/') + line.split('\t')[1])
file_name = mp3_path.split('.')[(- 2)].split('/')[(- 1)]
spk_id = line.split('\t')[0]
snt_id = file_name
if (torchaudio.get_audio_backend() != 'sox_io'):
logger.warning('This recipe needs the sox-io backend of torchaudio')
logger.warning('The torchaudio backend is changed to sox_io')
torchaudio.set_audio_backend('sox_io')
if os.path.isfile(mp3_path):
info = read_audio_info(mp3_path)
else:
msg = ('\tError loading: %s' % str(len(file_name)))
logger.info(msg)
return None
duration = (info.num_frames / info.sample_rate)
words = line.split('\t')[2]
words = unicode_normalisation(words)
words = language_specific_preprocess(language, words)
if (not accented_letters):
words = strip_accents(words)
words = words.replace("'", ' ')
words = words.replace('', ' ')
words = re.sub(' +', ' ', words)
words = words.lstrip().rstrip()
chars = words.replace(' ', '_')
chars = ' '.join([char for char in chars][:])
if (language in ['ja', 'ch']):
if (len(chars) < 3):
return None
elif (len(words.split(' ')) < 3):
return None
return CVRow(snt_id, duration, mp3_path, spk_id, words) |
def _replace_ref_nodes_with_names(model: models.Model, model_list: List[optplan.ProblemGraphNode]) -> None:
def process_field(model: models.Model, child_model: models.Model) -> str:
if isinstance(child_model, str):
return child_model
ind = model_list.index(child_model)
return model_list[ind].name
_iter_optplan_fields(model, set(), process_field) |
def generate_task23(dataset, output, sampling_rate):
np.random.seed(42)
windowlen = (10 * sampling_rate)
labels = []
for idx in tqdm(range(len(dataset)), total=len(dataset)):
(waveforms, metadata) = dataset.get_sample(idx)
if ('split' in metadata):
trace_split = metadata['split']
else:
trace_split = ''
def checkphase(metadata, phase, npts):
return ((phase in metadata) and (not np.isnan(metadata[phase])) and (0 <= metadata[phase] < npts))
arrivals = sorted([(metadata[phase], phase_label, phase.split('_')[1]) for (phase, phase_label) in phase_dict.items() if checkphase(metadata, phase, waveforms.shape[(- 1)])])
if (len(arrivals) == 0):
continue
for (i, (onset, phase, full_phase)) in enumerate(arrivals):
if (i == 0):
onset_before = 0
else:
onset_before = (int(arrivals[(i - 1)][0]) + int((0.5 * sampling_rate)))
if (i == (len(arrivals) - 1)):
onset_after = np.inf
else:
onset_after = (int(arrivals[(i + 1)][0]) - int((0.5 * sampling_rate)))
if (((onset_after - onset_before) < windowlen) or (onset_before > onset) or (onset_after < onset)):
continue
else:
onset_after = min(onset_after, waveforms.shape[(- 1)])
(start_sample, end_sample) = select_window_containing((onset_after - onset_before), windowlen=windowlen, containing=(onset - onset_before), bounds=(50, 50))
start_sample += onset_before
end_sample += onset_before
if ((end_sample - start_sample) <= windowlen):
sample = {'trace_name': metadata['trace_name'], 'trace_idx': idx, 'trace_split': trace_split, 'sampling_rate': sampling_rate, 'start_sample': start_sample, 'end_sample': end_sample, 'phase_label': phase, 'full_phase_label': full_phase, 'phase_onset': onset}
labels += [sample]
labels = pd.DataFrame(labels)
diff = (labels['end_sample'] - labels['start_sample'])
labels = labels[(diff > 100)]
labels.to_csv((output / 'task23.csv'), index=False) |
def pad_same(x, k, s, d=(1, 1), value=0):
(ih, iw) = x.size()[(- 2):]
(pad_h, pad_w) = (get_same_padding(ih, k[0], s[0], d[0]), get_same_padding(iw, k[1], s[1], d[1]))
if ((pad_h > 0) or (pad_w > 0)):
x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))], value=value)
return x |
def save_file(data, path, verbose=False):
dir = os.path.dirname(path)
if (not os.path.isdir(dir)):
os.makedirs(dir)
if verbose:
print('Saving: {}'.format(path))
(_, ext) = os.path.splitext(path)
if (ext == '.pkl'):
with open(path, 'wb') as f:
pickle.dump(data, f, protocol=2)
elif (ext == '.json'):
with open(path, 'w') as f:
json.dump(data, f, indent=4, separators=(',', ': '), sort_keys=True)
f.write('\n') |
def resnetv2sn152(**kwargs):
model = ResNetV2SN(Bottleneck, [3, 8, 36, 3], **kwargs)
return model |
(plot=False, auto=True)
def auto_td3_benchmarks():
td3_env_ids = [env_id for env_id in MuJoCo1M_ENV_SET if (env_id != 'Reacher-v2')]
iterate_experiments(td3_garage_tf, td3_env_ids) |
class XLMProphetNetTokenizer(PreTrainedTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ['attention_mask']
def __init__(self, vocab_file, bos_token='[SEP]', eos_token='[SEP]', sep_token='[SEP]', unk_token='[UNK]', pad_token='[PAD]', cls_token='[CLS]', mask_token='[MASK]', **kwargs):
super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, mask_token=mask_token, **kwargs)
try:
import sentencepiece as spm
except ImportError:
logger.warning('You need to install SentencePiece to use XLMRobertaTokenizer: install sentencepiece')
raise
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
self.fairseq_tokens_to_ids = {'[PAD]': 0, '[CLS]': 1, '[SEP]': 2, '[UNK]': 3, '[MASK]': 4}
for i in range(10):
tok = '[unused{}]'.format(i)
self.fairseq_tokens_to_ids[tok] = (5 + i)
self.fairseq_offset = 12
self.fairseq_ids_to_tokens = {v: k for (k, v) in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(k)
def __getstate__(self):
state = self.__dict__.copy()
state['sp_model'] = None
return state
def __setstate__(self, d):
self.__dict__ = d
try:
import sentencepiece as spm
except ImportError:
logger.warning('You need to install SentencePiece to use XLMRobertaTokenizer: install sentencepiece')
raise
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def get_special_tokens_mask(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None, already_has_special_tokens: bool=False) -> List[int]:
if already_has_special_tokens:
if (token_ids_1 is not None):
raise ValueError('You should not supply a second sequence if the provided sequence of ids is already formated with special tokens for the model.')
return list(map((lambda x: (1 if (x in [self.sep_token_id, self.cls_token_id]) else 0)), token_ids_0))
if (token_ids_1 is None):
return (([0] * len(token_ids_0)) + [1])
return (((([0] * len(token_ids_0)) + [1]) + ([0] * len(token_ids_1))) + [1])
def create_token_type_ids_from_sequences(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
sep = [self.sep_token_id]
if (token_ids_1 is None):
return (len((token_ids_0 + sep)) * [0])
return (len(((((token_ids_0 + sep) + sep) + token_ids_1) + sep)) * [0])
def vocab_size(self):
return (len(self.sp_model) + self.fairseq_offset)
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
if (token in self.fairseq_tokens_to_ids):
return self.fairseq_tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
return ((spm_id + self.fairseq_offset) if spm_id else self.unk_token_id)
def _convert_id_to_token(self, index):
if (index in self.fairseq_ids_to_tokens):
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece((index - self.fairseq_offset))
def convert_tokens_to_string(self, tokens):
out_string = ''.join(tokens).replace(SPIECE_UNDERLINE, ' ').strip()
return out_string
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]:
if (not os.path.isdir(save_directory)):
logger.error('Vocabulary path ({}) should be a directory'.format(save_directory))
return
out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']))
if (os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]]=None) -> List[int]:
if (token_ids_1 is None):
return (token_ids_0 + [self.sep_token_id])
sep = [self.sep_token_id]
return (((token_ids_0 + sep) + token_ids_1) + sep) |
def test_nonzero_offset_fromarrow_ArrowRecordBatch_2():
a = pyarrow.RecordBatch.from_arrays([pyarrow.array([1.1, 2.2, 3.3, 4.4, 5.5]), pyarrow.array([[1, 2, 3], [], [], [4, 5], [6]])], ['a', 'b'])
assert (to_list(ak._connect.pyarrow.handle_arrow(a[2:])) == [{'a': 3.3, 'b': []}, {'a': 4.4, 'b': [4, 5]}, {'a': 5.5, 'b': [6]}]) |
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self._sum = 0
self._count = 0
def update(self, val, n=1):
self._sum += (val * n)
self._count += n
def value(self):
if (self._count == 0):
return 0
return (self._sum / self._count) |
class Controller(object):
def __init__(self, scenario, sessions, chat_id=None, allow_cross_talk=False, session_names=(None, None)):
self.lock = Lock()
self.scenario = scenario
self.sessions = sessions
self.session_names = session_names
self.chat_id = chat_id
assert (len(self.sessions) == 2)
self.events = []
self.max_turns = None
self.allow_cross_talk = allow_cross_talk
self.session_status = {agent: 'received' for (agent, _) in enumerate(self.sessions)}
def describe_scenario(self):
print(('=' * 50))
for session in self.sessions:
print('\nAGENT={}'.format(session.agent))
session.kb.dump()
print(('=' * 50))
return True
def event_callback(self, event):
raise NotImplementedError
def get_outcome(self):
raise NotImplementedError
def get_result(self, agent_idx):
return None
def simulate(self, max_turns=None, verbose=False):
self.events = []
self.max_turns = max_turns
time = 0
num_turns = 0
game_over = False
self.describe_scenario()
if (random.random() < 0.5):
first_speaker = 0
else:
first_speaker = 1
while (not game_over):
for (agent, session) in enumerate(self.sessions):
if ((num_turns == 0) and (agent != first_speaker)):
continue
event = session.send()
time += 1
if (not event):
continue
event.time = time
self.event_callback(event)
self.events.append(event)
if verbose:
print(('agent=%s: session=%s, event=%s' % (agent, type(session).__name__, event.to_dict())))
else:
action = event.action
data = event.data
event_output = (data if (action == 'message') else 'Action: {0}, Data: {1}'.format(action, data))
print(('agent=%s, event=%s' % (agent, event_output)))
num_turns += 1
if (self.game_over() or (max_turns and (num_turns >= max_turns))):
game_over = True
break
for (partner, other_session) in enumerate(self.sessions):
if (agent != partner):
other_session.receive(event)
uuid = generate_uuid('E')
outcome = self.get_outcome()
if verbose:
print(('outcome: %s' % outcome))
print('')
agent_names = {'0': self.session_names[0], '1': self.session_names[1]}
return Example(self.scenario, uuid, self.events, outcome, uuid, agent_names)
def step(self, backend=None):
with self.lock:
for (agent, session) in enumerate(self.sessions):
if (session is None):
continue
if ((not self.allow_cross_talk) and (self.session_status[agent] != 'received')):
continue
event = session.send()
if (event is None):
continue
if (not (event.action in Event.decorative_events)):
self.session_status[agent] = 'sent'
self.event_callback(event)
self.events.append(event)
if (backend is not None):
backend.add_event_to_db(self.get_chat_id(), event)
for (partner, other_session) in enumerate(self.sessions):
if (agent != partner):
other_session.receive(event)
if (not (event.action in Event.decorative_events)):
self.session_status[partner] = 'received'
def inactive(self):
for s in self.sessions:
if (s is None):
return True
return False
def set_inactive(self, agents=[]):
with self.lock:
if (agents is None):
return
elif (len(agents) == 0):
self.sessions = ([None] * len(self.sessions))
else:
for idx in agents:
self.sessions[idx] = None
def get_chat_id(self):
return self.chat_id
def game_over(self):
raise NotImplementedError
def complete(self):
raise NotImplementedError |
class MultiHeadAttentionV2(nn.Module):
def __init__(self, input_size, output_size, num_heads, weight_norm=False, groups=1, dropout=0, causal=False, add_bias_kv=False):
super(MultiHeadAttentionV2, self).__init__()
assert ((input_size % num_heads) == 0)
wn_func = (wn if weight_norm else (lambda x: x))
self.input_size = input_size
self.output_size = output_size
self.num_heads = num_heads
self.linear_q = wn_func(Linear(input_size, input_size, bias=False, groups=groups))
self.linear_k = wn_func(Linear(input_size, input_size, bias=add_bias_kv, groups=groups))
self.linear_v = wn_func(Linear(input_size, input_size, bias=add_bias_kv, groups=groups))
self.linear_out = wn_func(Linear(input_size, output_size, groups=groups))
self.sdp_attention = SDPAttention(dropout=dropout, causal=causal)
def set_mask_q(self, masked_tq):
self.sdp_attention.set_mask_q(masked_tq)
def set_mask_k(self, masked_tk):
self.sdp_attention.set_mask_k(masked_tk)
def forward(self, q, k, v):
(b_q, t_q, dim_q) = list(q.size())
(b_k, t_k, dim_k) = list(k.size())
(b_v, t_v, dim_v) = list(v.size())
qw = self.linear_q(q)
kw = self.linear_k(k)
vw = self.linear_v(v)
qw = qw.chunk(self.num_heads, 2)
kw = kw.chunk(self.num_heads, 2)
vw = vw.chunk(self.num_heads, 2)
output = []
attention_scores = []
for i in range(self.num_heads):
(out_h, score) = self.sdp_attention(qw[i], kw[i], vw[i])
output.append(out_h)
attention_scores.append(score)
output = torch.cat(output, 2)
return (self.linear_out(output), attention_scores) |
class SuperRunMode(Enum):
FullModel = 'fullmodel'
Candidate = 'candidate'
Default = 'fullmodel' |
(**njit_dict_no_parallel)
def update_line_estimators(estimators, r_packet, cur_line_id, distance_trace, time_explosion):
if (not nc.ENABLE_FULL_RELATIVITY):
energy = calc_packet_energy(r_packet, distance_trace, time_explosion)
else:
energy = calc_packet_energy_full_relativity(r_packet)
estimators.j_blue_estimator[(cur_line_id, r_packet.current_shell_id)] += (energy / r_packet.nu)
estimators.Edotlu_estimator[(cur_line_id, r_packet.current_shell_id)] += energy |
class PolynomialDecayLRScheduleConfig(FairseqDataclass):
warmup_updates: int = field(default=0, metadata={'help': 'warmup the learning rate linearly for the first N updates'})
warmup_ratio: float = field(default=0, metadata={'help': 'warmup ratio'})
force_anneal: Optional[int] = field(default=None, metadata={'help': 'force annealing at specified epoch'})
end_learning_rate: float = field(default=0.0, metadata={'help': 'learning rate to decay to'})
power: float = field(default=1.0, metadata={'help': 'decay exponent'})
total_num_update: Optional[float] = field(default=1000000, metadata={'help': 'total number of updates over which to decay learning rate'})
lr: List[float] = II('optimization.lr') |
def tf_efficientnet_lite3(pretrained=False, **kwargs):
kwargs['bn_eps'] = BN_EPS_TF_DEFAULT
kwargs['pad_type'] = 'same'
model = _gen_efficientnet_lite('tf_efficientnet_lite3', channel_multiplier=1.2, depth_multiplier=1.4, pretrained=pretrained, **kwargs)
return model |
def wigner_9j(j_1, j_2, j_3, j_4, j_5, j_6, j_7, j_8, j_9, prec=None):
imin = 0
imax = int(min((j_1 + j_9), (j_2 + j_6), (j_4 + j_8)))
sumres = 0
for kk in range(imin, (imax + 1)):
sumres = (sumres + (((((2 * kk) + 1) * racah(j_1, j_2, j_9, j_6, j_3, kk, prec)) * racah(j_4, j_6, j_8, j_2, j_5, kk, prec)) * racah(j_1, j_4, j_9, j_8, j_7, kk, prec)))
return sumres |
def __filter_nodes(network, ip_address_hint, country_code_hint):
all_nodes = [{'id': node_id, **node} for (node_id, node) in network.nodes(data=True)]
if ((ip_address_hint is not None) and (not ip_address_hint.is_global)):
logging.debug(f'Ignoring non-global address {ip_address_hint}')
ip_address_hint = None
if (country_code_hint is not None):
country_code_hint = country_code_hint.casefold()
ip_match_found = any(((('ip_address' in node) and (IPv4Address(node['ip_address']) == ip_address_hint)) for node in all_nodes))
if ip_match_found:
candidate_nodes = [node for node in all_nodes if (('ip_address' in node) and (IPv4Address(node['ip_address']) == ip_address_hint))]
else:
candidate_nodes = [node for node in all_nodes if (('country_code' in node) and (node['country_code'].casefold() == country_code_hint))]
if (len(candidate_nodes) == 0):
candidate_nodes = [node for node in all_nodes]
any_ip_found = any((('ip_address' in node) for node in candidate_nodes))
if (any_ip_found and (ip_address_hint is not None)):
candidate_nodes = [node for node in candidate_nodes if ('ip_address' in node)]
def compute_prefix_match(ip_1, ip_2):
return ((~ (int(ip_1) ^ int(ip_2))) & )
prefix_matches = [(node, compute_prefix_match(IPv4Address(node['ip_address']), ip_address_hint)) for node in candidate_nodes]
max_prefix_match = max(prefix_matches, key=(lambda x: x[1]))[1]
candidate_nodes = [node for (node, prefix_match) in prefix_matches if (prefix_match == max_prefix_match)]
assert (len(set([IPv4Address(node['ip_address']) for node in candidate_nodes])) == 1)
return candidate_nodes |
def register_Ns3DsrDsrOptionPadn_methods(root_module, cls):
cls.add_constructor([param('ns3::dsr::DsrOptionPadn const &', 'arg0')])
cls.add_constructor([])
cls.add_method('GetOptionNumber', 'uint8_t', [], is_const=True, is_virtual=True)
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('Process', 'uint8_t', [param('ns3::Ptr< ns3::Packet >', 'packet'), param('ns3::Ptr< ns3::Packet >', 'dsrP'), param('ns3::Ipv4Address', 'ipv4Address'), param('ns3::Ipv4Address', 'source'), param('ns3::Ipv4Header const &', 'ipv4Header'), param('uint8_t', 'protocol'), param('bool &', 'isPromisc'), param('ns3::Ipv4Address', 'promiscSource')], is_virtual=True)
cls.add_static_attribute('OPT_NUMBER', 'uint8_t const', is_const=True)
return |
def _test_vae(vae_trainer, epoch, replay_buffer, vae_save_period=1, uniform_dataset=None):
save_imgs = ((epoch % vae_save_period) == 0)
log_fit_skew_stats = (replay_buffer._prioritize_vae_samples and (uniform_dataset is not None))
if (uniform_dataset is not None):
replay_buffer.log_loss_under_uniform(uniform_dataset, vae_trainer.batch_size, rl_logger=vae_trainer.vae_logger_stats_for_rl)
vae_trainer.test_epoch(epoch, from_rl=True, save_reconstruction=save_imgs)
if save_imgs:
vae_trainer.dump_samples(epoch)
if log_fit_skew_stats:
replay_buffer.dump_best_reconstruction(epoch)
replay_buffer.dump_worst_reconstruction(epoch)
replay_buffer.dump_sampling_histogram(epoch, batch_size=vae_trainer.batch_size)
if (uniform_dataset is not None):
replay_buffer.dump_uniform_imgs_and_reconstructions(dataset=uniform_dataset, epoch=epoch) |
def read_32(fobj, start_length, size):
(start, length) = start_length
fobj.seek(start)
pixel_size = ((size[0] * size[2]), (size[1] * size[2]))
sizesq = (pixel_size[0] * pixel_size[1])
if (length == (sizesq * 3)):
indata = fobj.read(length)
im = Image.frombuffer('RGB', pixel_size, indata, 'raw', 'RGB', 0, 1)
else:
im = Image.new('RGB', pixel_size, None)
for band_ix in range(3):
data = []
bytesleft = sizesq
while (bytesleft > 0):
byte = fobj.read(1)
if (not byte):
break
byte = i8(byte)
if (byte & 128):
blocksize = (byte - 125)
byte = fobj.read(1)
for i in range(blocksize):
data.append(byte)
else:
blocksize = (byte + 1)
data.append(fobj.read(blocksize))
bytesleft -= blocksize
if (bytesleft <= 0):
break
if (bytesleft != 0):
raise SyntaxError(('Error reading channel [%r left]' % bytesleft))
band = Image.frombuffer('L', pixel_size, b''.join(data), 'raw', 'L', 0, 1)
im.im.putband(band.im, band_ix)
return {'RGB': im} |
def _sample_indices(n_to_sample, n_available_tasks, with_replacement):
if with_replacement:
return np.random.randint(n_available_tasks, size=n_to_sample)
else:
blocks = []
for _ in range(math.ceil((n_to_sample / n_available_tasks))):
s = np.arange(n_available_tasks)
np.random.shuffle(s)
blocks.append(s)
return np.concatenate(blocks)[:n_to_sample] |
def stdize(data, eps=1e-06):
return ((data - np.mean(data, axis=0)) / (np.std(data, axis=0) + eps)) |
(message='scipy.misc.extend_notes_in_docstring is deprecated in Scipy 1.3.0')
def extend_notes_in_docstring(cls, notes):
return _ld.extend_notes_in_docstring(cls, notes) |
def decode_arch_def(arch_def, depth_multiplier=1.0, depth_trunc='ceil', experts_multiplier=1, fix_first_last=False):
arch_args = []
for (stage_idx, block_strings) in enumerate(arch_def):
assert isinstance(block_strings, list)
stage_args = []
repeats = []
for block_str in block_strings:
assert isinstance(block_str, str)
(bt, ba, rep) = _decode_block_str(block_str)
stage_args.append((bt, ba))
repeats.append(rep)
if (fix_first_last and ((stage_idx == 0) or (stage_idx == (len(arch_def) - 1)))):
arch_args.append(_scale_stage_depth(stage_args, repeats, 1.0, depth_trunc))
else:
arch_args.append(_scale_stage_depth(stage_args, repeats, depth_multiplier, depth_trunc))
return arch_args |
def modularity_matrix(adj_matrix: np.ndarray) -> np.ndarray:
k_i = np.expand_dims(adj_matrix.sum(axis=1), axis=1)
k_j = k_i.T
norm = (1 / k_i.sum())
K = (norm * np.matmul(k_i, k_j))
return (norm * (adj_matrix - K)) |
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compounddef=None):
supermod.DoxygenType.__init__(self, version, compounddef)
def find(self, details):
return self.compounddef.find(details) |
class StatisticsAggregator():
def __init__(self, functions: dict=None):
super().__init__()
if (functions is None):
functions = {'MEAN': np.mean, 'STD': np.std}
self.functions = functions
def calculate(self, results: typing.List[evaluator.Result]) -> typing.List[evaluator.Result]:
labels = sorted({result.label for result in results})
metrics = sorted({result.metric for result in results})
aggregated_results = []
for label in labels:
for metric in metrics:
values = [r.value for r in results if ((r.label == label) and (r.metric == metric))]
for (fn_id, fn) in self.functions.items():
aggregated_results.append(evaluator.Result(fn_id, label, metric, float(fn(values))))
return aggregated_results |
def main(args):
set_global_seed(args.seed)
tasks = args.tasks.split('.')
for task in tasks:
if (('n' in task) and (args.geo in ['box', 'vec'])):
assert False, 'Q2B and GQE cannot handle queries with negation'
if (args.evaluate_union == 'DM'):
assert (args.geo == 'beta'), "only BetaE supports modeling union using De Morgan's Laws"
cur_time = parse_time()
if (args.prefix is None):
prefix = 'logs'
else:
prefix = args.prefix
print('overwritting args.save_path')
args.save_path = os.path.join(prefix, args.data_path.split('/')[(- 1)], args.tasks, args.geo)
if (args.geo in ['box']):
tmp_str = 'g-{}-mode-{}'.format(args.gamma, args.box_mode)
elif (args.geo in ['vec']):
tmp_str = 'g-{}'.format(args.gamma)
elif (args.geo == 'beta'):
tmp_str = 'g-{}-mode-{}'.format(args.gamma, args.beta_mode)
if (args.checkpoint_path is not None):
args.save_path = args.checkpoint_path
else:
args.save_path = os.path.join(args.save_path, tmp_str, cur_time)
if (not os.path.exists(args.save_path)):
os.makedirs(args.save_path)
print('logging to', args.save_path)
if (not args.do_train):
writer = SummaryWriter('./logs-debug/unused-tb')
else:
writer = SummaryWriter(args.save_path)
set_logger(args)
with open(('%s/stats.txt' % args.data_path)) as f:
entrel = f.readlines()
nentity = int(entrel[0].split(' ')[(- 1)])
nrelation = int(entrel[1].split(' ')[(- 1)])
args.nentity = nentity
args.nrelation = nrelation
logging.info(('' * 3))
logging.info(('Geo: %s' % args.geo))
logging.info(('Data Path: %s' % args.data_path))
logging.info(('#entity: %d' % nentity))
logging.info(('#relation: %d' % nrelation))
logging.info(('#max steps: %d' % args.max_steps))
logging.info(('Evaluate unoins using: %s' % args.evaluate_union))
(train_queries, train_answers, valid_queries, valid_hard_answers, valid_easy_answers, test_queries, test_hard_answers, test_easy_answers) = load_data(args, tasks)
logging.info('Training info:')
if args.do_train:
for query_structure in train_queries:
logging.info(((query_name_dict[query_structure] + ': ') + str(len(train_queries[query_structure]))))
train_path_queries = defaultdict(set)
train_other_queries = defaultdict(set)
path_list = ['1p', '2p', '3p']
for query_structure in train_queries:
if (query_name_dict[query_structure] in path_list):
train_path_queries[query_structure] = train_queries[query_structure]
else:
train_other_queries[query_structure] = train_queries[query_structure]
train_path_queries = flatten_query(train_path_queries)
train_path_iterator = SingledirectionalOneShotIterator(DataLoader(TrainDataset(train_path_queries, nentity, nrelation, args.negative_sample_size, train_answers), batch_size=args.batch_size, shuffle=True, num_workers=args.cpu_num, collate_fn=TrainDataset.collate_fn))
if (len(train_other_queries) > 0):
train_other_queries = flatten_query(train_other_queries)
train_other_iterator = SingledirectionalOneShotIterator(DataLoader(TrainDataset(train_other_queries, nentity, nrelation, args.negative_sample_size, train_answers), batch_size=args.batch_size, shuffle=True, num_workers=args.cpu_num, collate_fn=TrainDataset.collate_fn))
else:
train_other_iterator = None
logging.info('Validation info:')
if args.do_valid:
for query_structure in valid_queries:
logging.info(((query_name_dict[query_structure] + ': ') + str(len(valid_queries[query_structure]))))
valid_queries = flatten_query(valid_queries)
valid_dataloader = DataLoader(TestDataset(valid_queries, args.nentity, args.nrelation), batch_size=args.test_batch_size, num_workers=args.cpu_num, collate_fn=TestDataset.collate_fn)
logging.info('Test info:')
if args.do_test:
for query_structure in test_queries:
logging.info(((query_name_dict[query_structure] + ': ') + str(len(test_queries[query_structure]))))
test_queries = flatten_query(test_queries)
test_dataloader = DataLoader(TestDataset(test_queries, args.nentity, args.nrelation), batch_size=args.test_batch_size, num_workers=args.cpu_num, collate_fn=TestDataset.collate_fn)
model = KGReasoning(nentity=nentity, nrelation=nrelation, hidden_dim=args.hidden_dim, gamma=args.gamma, geo=args.geo, use_cuda=args.cuda, box_mode=eval_tuple(args.box_mode), beta_mode=eval_tuple(args.beta_mode), test_batch_size=args.test_batch_size, query_name_dict=query_name_dict)
logging.info('Model Parameter Configuration:')
num_params = 0
for (name, param) in model.named_parameters():
logging.info(('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad))))
if param.requires_grad:
num_params += np.prod(param.size())
logging.info(('Parameter Number: %d' % num_params))
if args.cuda:
model = model.cuda()
if args.do_train:
current_learning_rate = args.learning_rate
optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), model.parameters()), lr=current_learning_rate)
warm_up_steps = (args.max_steps // 2)
if (args.checkpoint_path is not None):
logging.info(('Loading checkpoint %s...' % args.checkpoint_path))
checkpoint = torch.load(os.path.join(args.checkpoint_path, 'checkpoint'))
init_step = checkpoint['step']
model.load_state_dict(checkpoint['model_state_dict'])
if args.do_train:
current_learning_rate = checkpoint['current_learning_rate']
warm_up_steps = checkpoint['warm_up_steps']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
logging.info(('Ramdomly Initializing %s Model...' % args.geo))
init_step = 0
step = init_step
if (args.geo == 'box'):
logging.info(('box mode = %s' % args.box_mode))
elif (args.geo == 'beta'):
logging.info(('beta mode = %s' % args.beta_mode))
logging.info(('tasks = %s' % args.tasks))
logging.info(('init_step = %d' % init_step))
if args.do_train:
logging.info('Start Training...')
logging.info(('learning_rate = %d' % current_learning_rate))
logging.info(('batch_size = %d' % args.batch_size))
logging.info(('hidden_dim = %d' % args.hidden_dim))
logging.info(('gamma = %f' % args.gamma))
if args.do_train:
training_logs = []
for step in range(init_step, args.max_steps):
if (step == ((2 * args.max_steps) // 3)):
args.valid_steps *= 4
log = model.train_step(model, optimizer, train_path_iterator, args, step)
for metric in log:
writer.add_scalar(('path_' + metric), log[metric], step)
if (train_other_iterator is not None):
log = model.train_step(model, optimizer, train_other_iterator, args, step)
for metric in log:
writer.add_scalar(('other_' + metric), log[metric], step)
log = model.train_step(model, optimizer, train_path_iterator, args, step)
training_logs.append(log)
if (step >= warm_up_steps):
current_learning_rate = (current_learning_rate / 5)
logging.info(('Change learning_rate to %f at step %d' % (current_learning_rate, step)))
optimizer = torch.optim.Adam(filter((lambda p: p.requires_grad), model.parameters()), lr=current_learning_rate)
warm_up_steps = (warm_up_steps * 1.5)
if ((step % args.save_checkpoint_steps) == 0):
save_variable_list = {'step': step, 'current_learning_rate': current_learning_rate, 'warm_up_steps': warm_up_steps}
save_model(model, optimizer, save_variable_list, args)
if (((step % args.valid_steps) == 0) and (step > 0)):
if args.do_valid:
logging.info('Evaluating on Valid Dataset...')
valid_all_metrics = evaluate(model, valid_easy_answers, valid_hard_answers, args, valid_dataloader, query_name_dict, 'Valid', step, writer)
if args.do_test:
logging.info('Evaluating on Test Dataset...')
test_all_metrics = evaluate(model, test_easy_answers, test_hard_answers, args, test_dataloader, query_name_dict, 'Test', step, writer)
if ((step % args.log_steps) == 0):
metrics = {}
for metric in training_logs[0].keys():
metrics[metric] = (sum([log[metric] for log in training_logs]) / len(training_logs))
log_metrics('Training average', step, metrics)
training_logs = []
save_variable_list = {'step': step, 'current_learning_rate': current_learning_rate, 'warm_up_steps': warm_up_steps}
save_model(model, optimizer, save_variable_list, args)
try:
print(step)
except:
step = 0
if args.do_test:
logging.info('Evaluating on Test Dataset...')
test_all_metrics = evaluate(model, test_easy_answers, test_hard_answers, args, test_dataloader, query_name_dict, 'Test', step, writer)
logging.info('Training finished!!') |
def mock_xml_file():
root = ElementTree.Element('text')
root.text = plain_text_str
tree = ElementTree.ElementTree(root)
with tempfile.NamedTemporaryFile(mode='wb', delete=False, suffix='.xml') as f:
tree.write(f)
return f.name |
def get_edge_feature(point_cloud, nn_idx, k=20):
og_batch_size = point_cloud.get_shape().as_list()[0]
point_cloud = tf.squeeze(point_cloud)
if (og_batch_size == 1):
point_cloud = tf.expand_dims(point_cloud, 0)
point_cloud_central = point_cloud
point_cloud_shape = point_cloud.get_shape()
batch_size = point_cloud_shape[0].value
num_points = point_cloud_shape[1].value
num_dims = point_cloud_shape[2].value
idx_ = (tf.range(batch_size) * num_points)
idx_ = tf.reshape(idx_, [batch_size, 1, 1])
point_cloud_flat = tf.reshape(point_cloud, [(- 1), num_dims])
point_cloud_neighbors = tf.gather(point_cloud_flat, (nn_idx + idx_))
point_cloud_central = tf.expand_dims(point_cloud_central, axis=(- 2))
point_cloud_central = tf.tile(point_cloud_central, [1, 1, k, 1])
edge_feature = tf.concat([point_cloud_central, (point_cloud_neighbors - point_cloud_central)], axis=(- 1))
return edge_feature |
def go(runs):
for run in runs:
for key in run.keys():
go.__globals__[key] = run[key]
print('')
print('CONFIG: ', run)
time_layer(numEpochs, batchSize, inputPlanes, inputSize, outputPlanes, filterSize) |
def extract_acos(dloader, transform, save_path, split):
for (bidx, batch) in tqdm.tqdm(enumerate(dloader, start=1), total=len(dloader)):
(wav, uttname, _) = batch
uttname = os.path.splitext(os.path.basename(uttname[0]))[0]
aco = transform(wav.view((- 1)))
for k in aco.keys():
if (('uttname' in k) or ('raw' in k) or ('chunk' in k)):
continue
save_dir = os.path.join(save_path, split, k)
if (not os.path.exists(save_dir)):
os.makedirs(save_dir)
kname = (uttname + '.{}'.format(k))
torch.save(aco[k], os.path.join(save_dir, kname)) |
def test_case32():
url = (brokerIp + '/NGSI9/registerContext')
headers = {'Content-Type': 'appliction/json', 'fiware-service': 'openiot', 'fiware-servicepath': '/'}
r = requests.post(url, data=json.dumps(data_ngsi10.subdata52), headers=headers)
url = (brokerIp + '/ngsi10/updateContext')
headers = {'Content-Type': 'appliction/json', 'fiware-service': 'openiot', 'fiware-servicepath': '/'}
r = requests.post(url, data=json.dumps(data_ngsi10.subdata57), headers=headers)
assert (r.status_code == 200) |
def to_fast_pickable(l):
if (not l):
return [[], []]
f = l[0]
f = f.set()
r = f.ring()
one = r.one().navigation()
zero = r.zero().navigation()
nodes = set()
def find_navs(nav):
if ((nav not in nodes) and (not nav.constant())):
nodes.add(nav)
find_navs(nav.then_branch())
find_navs(nav.else_branch())
for f in l:
f_nav = f.set().navigation()
find_navs(f_nav)
nodes_sorted = sorted(nodes, key=CCuddNavigator.value)
nodes2i = {one: 1, zero: 0}
for (i, n) in enumerate(nodes_sorted):
nodes2i[n] = (i + 2)
for i in range(len(nodes_sorted)):
n = nodes_sorted[i]
t = nodes2i[n.then_branch()]
e = nodes2i[n.else_branch()]
nodes_sorted[i] = (n.value(), t, e)
return [[nodes2i[f.set().navigation()] for f in l], nodes_sorted] |
def pred_fn(pred_rng, params, batch, model):
return model.apply({'params': params}, batch['images'], training=False).argmax(axis=(- 1)) |
class DefaultQuant(QuantizeHandler):
def convert(self, quantizer, node):
assert self.all_nodes
root_module = quantizer.modules['']
return quantize_node(root_module, quantizer.quantized_graph, node, quantizer.activation_post_process_map[node.name]) |
def split_chinese_sentence(text: str) -> List[str]:
sentences = []
quote_mark_count = 0
sentence = ''
for (i, c) in enumerate(text):
sentence += c
if (c in {'', ''}):
sentences.append(sentence)
sentence = ''
elif (c in {'', '!', '?', '!', '?'}):
if ((i < (len(text) - 1)) and (text[(i + 1)] not in {'', '"', ''})):
sentences.append(sentence)
sentence = ''
elif (c == '"'):
quote_mark_count += 1
if (((quote_mark_count % 2) == 0) and (len(sentence) > 2) and (sentence[(- 2)] in {'?', '!', '', '?', '!'})):
sentences.append(sentence)
sentence = ''
if sentence:
sentences.append(sentence)
return sentences |
class NRTRDataset_hdf5(Dataset):
def __init__(self, hdf5_file, transform=None):
self.data = dict()
self._transform = transform
self.hdf5_file = hdf5_file
def __len__(self):
with h5py.File(self.hdf5_file, 'r') as data:
lens = len(data['label'])
return lens
def __getitem__(self, idx):
with h5py.File(self.hdf5_file, 'r') as data:
image = data['image'][idx]
image = torch.from_numpy(image)
image = image.to(torch.float32)
target = data['label'][idx]
target = torch.from_numpy(target)
target = target.to(torch.int64)
return (image, target) |
class FourierMatDict(SpectralMatDict):
def __missing__(self, key):
measure = (1 if (len(key) == 2) else key[2])
c = functools.partial(FourierMatrix, measure=measure)
self[key] = c
return c |
class Net_mnist(nn.Module):
def __init__(self):
super(Net_mnist, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(((4 * 4) * 50), 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view((- 1), ((4 * 4) * 50))
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x |
class VGGBackbone(object):
def __init__(self, configer):
self.configer = configer
def __call__(self):
arch = self.configer.sub_arch
if (arch in ['vgg19_bn', 'vgg19']):
arch_net = VGG(arch, self.configer.pretrained_backbone)
else:
raise Exception('Architecture undefined!')
return arch_net |
def test(model, queryloader, galleryloader, pool, use_gpu, ranks=[1, 5, 10, 20], return_distmat=False):
batch_time = AverageMeter()
model.eval()
with torch.no_grad():
(qf, q_pids, q_camids) = ([], [], [])
for (batch_idx, (imgs, pids, camids)) in enumerate(queryloader):
if use_gpu:
imgs = imgs.cuda()
(b, n, s, c, h, w) = imgs.size()
assert (b == 1)
imgs = imgs.view((b * n), s, c, h, w)
end = time.time()
features = model(imgs)
batch_time.update((time.time() - end))
features = features.view(n, (- 1))
features = torch.mean(features, 0)
features = features.cpu()
qf.append(features.numpy())
q_pids.extend(pids.numpy())
q_camids.extend(camids.numpy())
qf = np.stack(qf, 0)
q_pids = np.asarray(q_pids)
q_camids = np.asarray(q_camids)
print('Extracted features for query set, obtained {}-by-{} matrix'.format(qf.shape[0], qf.shape[1]))
(gf, g_pids, g_camids) = ([], [], [])
for (batch_idx, (imgs, pids, camids)) in enumerate(galleryloader):
if use_gpu:
imgs = imgs.cuda()
(b, n, s, c, h, w) = imgs.size()
assert (b == 1)
imgs = imgs.view((b * n), s, c, h, w)
end = time.time()
features = model(imgs)
batch_time.update((time.time() - end))
features = features.view(n, (- 1))
features = torch.mean(features, 0)
features = features.cpu()
gf.append(features.numpy())
g_pids.extend(pids.numpy())
g_camids.extend(camids.numpy())
gf = np.stack(gf, 0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
print('Extracted features for gallery set, obtained {}-by-{} matrix'.format(gf.shape[0], gf.shape[1]))
print('==> BatchTime(s)/BatchSize(img): {:.3f}/{}'.format(batch_time.avg, (args.test_batch * args.seq_len)))
(m, n) = (qf.shape[0], gf.shape[0])
distmat = (np.tile((qf ** 2).sum(axis=1, keepdims=True), (1, n)) + np.tile((gf ** 2).sum(axis=1, keepdims=True), (1, m)).transpose())
distmat = (distmat - (2 * np.dot(qf, gf.transpose())))
print('Computing CMC and mAP')
(cmc, mAP, mINP) = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)
print('Results ')
print('mINP: {:.1%} mAP: {:.1%} CMC curve {:.1%} {:.1%} {:.1%} {:.1%}'.format(mINP, mAP, cmc[(1 - 1)], cmc[(5 - 1)], cmc[(10 - 1)], cmc[(20 - 1)]))
print('')
if return_distmat:
return distmat
return cmc[0] |
class CoefficientOfDetermination(NumpyArrayMetric):
def __init__(self, metric: str='R2'):
super().__init__(metric)
def calculate(self):
y_true = self.reference.flatten()
y_predicted = self.prediction.flatten()
sse = sum(((y_true - y_predicted) ** 2))
tse = ((len(y_true) - 1) * np.var(y_true, ddof=1))
r2_score = (1 - (sse / tse))
return r2_score |
def _sympysage_polynomial_ring(self):
base_ring = self.domain._sage_()
variables = ','.join(map(str, self.gens))
return base_ring[variables] |
def _install_wheel(name, wheel_zip, wheel_path, scheme, pycompile=True, warn_script_location=True, direct_url=None, requested=False):
(info_dir, metadata) = parse_wheel(wheel_zip, name)
if wheel_root_is_purelib(metadata):
lib_dir = scheme.purelib
else:
lib_dir = scheme.platlib
installed = {}
changed = set()
generated = []
def record_installed(srcfile, destfile, modified=False):
newpath = _fs_to_record_path(destfile, lib_dir)
installed[srcfile] = newpath
if modified:
changed.add(_fs_to_record_path(destfile))
def all_paths():
names = wheel_zip.namelist()
decoded_names = map(ensure_text, names)
for name in decoded_names:
(yield cast('RecordPath', name))
def is_dir_path(path):
return path.endswith('/')
def assert_no_path_traversal(dest_dir_path, target_path):
if (not is_within_directory(dest_dir_path, target_path)):
message = 'The wheel {!r} has a file {!r} trying to install outside the target directory {!r}'
raise InstallationError(message.format(wheel_path, target_path, dest_dir_path))
def root_scheme_file_maker(zip_file, dest):
def make_root_scheme_file(record_path):
normed_path = os.path.normpath(record_path)
dest_path = os.path.join(dest, normed_path)
assert_no_path_traversal(dest, dest_path)
return ZipBackedFile(record_path, dest_path, zip_file)
return make_root_scheme_file
def data_scheme_file_maker(zip_file, scheme):
scheme_paths = {}
for key in SCHEME_KEYS:
encoded_key = ensure_text(key)
scheme_paths[encoded_key] = ensure_text(getattr(scheme, key), encoding=sys.getfilesystemencoding())
def make_data_scheme_file(record_path):
normed_path = os.path.normpath(record_path)
try:
(_, scheme_key, dest_subpath) = normed_path.split(os.path.sep, 2)
except ValueError:
message = "Unexpected file in {}: {!r}. .data directory contents should be named like: '<scheme key>/<path>'.".format(wheel_path, record_path)
raise InstallationError(message)
try:
scheme_path = scheme_paths[scheme_key]
except KeyError:
valid_scheme_keys = ', '.join(sorted(scheme_paths))
message = 'Unknown scheme key used in {}: {} (for file {!r}). .data directory contents should be in subdirectories named with a valid scheme key ({})'.format(wheel_path, scheme_key, record_path, valid_scheme_keys)
raise InstallationError(message)
dest_path = os.path.join(scheme_path, dest_subpath)
assert_no_path_traversal(scheme_path, dest_path)
return ZipBackedFile(record_path, dest_path, zip_file)
return make_data_scheme_file
def is_data_scheme_path(path):
return path.split('/', 1)[0].endswith('.data')
paths = all_paths()
file_paths = filterfalse(is_dir_path, paths)
(root_scheme_paths, data_scheme_paths) = partition(is_data_scheme_path, file_paths)
make_root_scheme_file = root_scheme_file_maker(wheel_zip, ensure_text(lib_dir, encoding=sys.getfilesystemencoding()))
files = map(make_root_scheme_file, root_scheme_paths)
def is_script_scheme_path(path):
parts = path.split('/', 2)
return ((len(parts) > 2) and parts[0].endswith('.data') and (parts[1] == 'scripts'))
(other_scheme_paths, script_scheme_paths) = partition(is_script_scheme_path, data_scheme_paths)
make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)
other_scheme_files = map(make_data_scheme_file, other_scheme_paths)
files = chain(files, other_scheme_files)
distribution = pkg_resources_distribution_for_wheel(wheel_zip, name, wheel_path)
(console, gui) = get_entrypoints(distribution)
def is_entrypoint_wrapper(file):
path = file.dest_path
name = os.path.basename(path)
if name.lower().endswith('.exe'):
matchname = name[:(- 4)]
elif name.lower().endswith('-script.py'):
matchname = name[:(- 10)]
elif name.lower().endswith('.pya'):
matchname = name[:(- 4)]
else:
matchname = name
return ((matchname in console) or (matchname in gui))
script_scheme_files = map(make_data_scheme_file, script_scheme_paths)
script_scheme_files = filterfalse(is_entrypoint_wrapper, script_scheme_files)
script_scheme_files = map(ScriptFile, script_scheme_files)
files = chain(files, script_scheme_files)
for file in files:
file.save()
record_installed(file.src_record_path, file.dest_path, file.changed)
def pyc_source_file_paths():
for installed_path in sorted(set(installed.values())):
full_installed_path = os.path.join(lib_dir, installed_path)
if (not os.path.isfile(full_installed_path)):
continue
if (not full_installed_path.endswith('.py')):
continue
(yield full_installed_path)
def pyc_output_path(path):
if PY2:
if sys.flags.optimize:
return (path + 'o')
else:
return (path + 'c')
else:
return importlib.util.cache_from_source(path)
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
for path in pyc_source_file_paths():
path_arg = ensure_str(path, encoding=sys.getfilesystemencoding())
success = compileall.compile_file(path_arg, force=True, quiet=True)
if success:
pyc_path = pyc_output_path(path)
assert os.path.exists(pyc_path)
pyc_record_path = cast('RecordPath', pyc_path.replace(os.path.sep, '/'))
record_installed(pyc_record_path, pyc_path)
logger.debug(stdout.getvalue())
maker = PipScriptMaker(None, scheme.scripts)
maker.clobber = True
maker.variants = {''}
maker.set_mode = True
scripts_to_generate = get_console_script_specs(console)
gui_scripts_to_generate = list(starmap('{} = {}'.format, gui.items()))
generated_console_scripts = maker.make_multiple(scripts_to_generate)
generated.extend(generated_console_scripts)
generated.extend(maker.make_multiple(gui_scripts_to_generate, {'gui': True}))
if warn_script_location:
msg = message_about_scripts_not_on_PATH(generated_console_scripts)
if (msg is not None):
logger.warning(msg)
generated_file_mode = (438 & (~ current_umask()))
def _generate_file(path, **kwargs):
with adjacent_tmp_file(path, **kwargs) as f:
(yield f)
os.chmod(f.name, generated_file_mode)
replace(f.name, path)
dest_info_dir = os.path.join(lib_dir, info_dir)
installer_path = os.path.join(dest_info_dir, 'INSTALLER')
with _generate_file(installer_path) as installer_file:
installer_file.write(b'pip\n')
generated.append(installer_path)
if (direct_url is not None):
direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)
with _generate_file(direct_url_path) as direct_url_file:
direct_url_file.write(direct_url.to_json().encode('utf-8'))
generated.append(direct_url_path)
if requested:
requested_path = os.path.join(dest_info_dir, 'REQUESTED')
with open(requested_path, 'w'):
pass
generated.append(requested_path)
record_text = distribution.get_metadata('RECORD')
record_rows = list(csv.reader(record_text.splitlines()))
rows = get_csv_rows_for_installed(record_rows, installed=installed, changed=changed, generated=generated, lib_dir=lib_dir)
record_path = os.path.join(dest_info_dir, 'RECORD')
with _generate_file(record_path, **csv_io_kwargs('w')) as record_file:
writer = csv.writer(cast('IO[str]', record_file))
writer.writerows(_normalized_outrows(rows)) |
(plot=False, auto=True)
def auto_ppo_benchmarks():
iterate_experiments(ppo_garage_pytorch, MuJoCo1M_ENV_SET)
iterate_experiments(ppo_garage_tf, MuJoCo1M_ENV_SET) |
def register_Ns3FlowProbeFlowStats_methods(root_module, cls):
cls.add_constructor([param('ns3::FlowProbe::FlowStats const &', 'arg0')])
cls.add_constructor([])
cls.add_instance_attribute('bytes', 'uint64_t', is_const=False)
cls.add_instance_attribute('bytesDropped', 'std::vector< unsigned long >', is_const=False)
cls.add_instance_attribute('delayFromFirstProbeSum', 'ns3::Time', is_const=False)
cls.add_instance_attribute('packets', 'uint32_t', is_const=False)
cls.add_instance_attribute('packetsDropped', 'std::vector< unsigned int >', is_const=False)
return |
def aggregated_data_from_experiments(experiments, contains_err=False):
experiment_labels = list(experiments.keys())
protocol_labels = list(experiments[list(experiments.keys())[0]].keys())
metric_labels = []
for label in experiments[experiment_labels[0]][protocol_labels[0]].keys():
if (('mean' in label) and ('std' not in label)):
metric_labels.append(label)
data = list()
data.append(protocol_labels)
data.append(experiment_labels)
data.append(metric_labels)
scores = dict()
for metric_label in metric_labels:
metric_scores = dict()
for experiment_label in experiment_labels:
experiment_scores_list = list()
experiment_scores_std_list = list()
for evaluation_protocol in protocol_labels:
experiment_scores_list.append(experiments[experiment_label][evaluation_protocol][metric_label])
if contains_err:
experiment_scores_std_list.append(experiments[experiment_label][evaluation_protocol][(metric_label + '_std')])
else:
experiment_scores_std_list.append(0.0)
experiment_scores = (experiment_scores_list, experiment_scores_std_list)
metric_scores[experiment_label] = experiment_scores
scores[metric_label] = metric_scores
data.append(scores)
return data |
def _normalize_H(H, level):
H = [(ZZ(h) % level) for h in H]
for h in H:
if (gcd(h, level) > 1):
raise ArithmeticError(('The generators %s must be units modulo %s' % (H, level)))
H = {u for u in H if (u > 1)}
final_H = set()
for h in H:
inv_h = h.inverse_mod(level)
if (inv_h <= h):
final_H.add(inv_h)
else:
final_H.add(h)
return sorted(final_H) |
class AggregatorGRPCServer(aggregator_pb2_grpc.AggregatorServicer):
def __init__(self, aggregator, agg_port, tls=True, disable_client_auth=False, root_certificate=None, certificate=None, private_key=None, **kwargs):
self.aggregator = aggregator
self.uri = f'[::]:{agg_port}'
self.tls = tls
self.disable_client_auth = disable_client_auth
self.root_certificate = root_certificate
self.certificate = certificate
self.private_key = private_key
self.server = None
self.server_credentials = None
self.logger = logging.getLogger(__name__)
def validate_collaborator(self, request, context):
if self.tls:
common_name = context.auth_context()['x509_common_name'][0].decode('utf-8')
collaborator_common_name = request.header.sender
if (not self.aggregator.valid_collaborator_cn_and_id(common_name, collaborator_common_name)):
sleep((5 * random()))
context.abort(StatusCode.UNAUTHENTICATED, f'Invalid collaborator. CN: |{common_name}| collaborator_common_name: |{collaborator_common_name}|')
def get_header(self, collaborator_name):
return aggregator_pb2.MessageHeader(sender=self.aggregator.uuid, receiver=collaborator_name, federation_uuid=self.aggregator.federation_uuid, single_col_cert_common_name=self.aggregator.single_col_cert_common_name)
def check_request(self, request):
check_is_in(request.header.sender, self.aggregator.authorized_cols, self.logger)
check_equal(request.header.receiver, self.aggregator.uuid, self.logger)
check_equal(request.header.federation_uuid, self.aggregator.federation_uuid, self.logger)
check_equal(request.header.single_col_cert_common_name, self.aggregator.single_col_cert_common_name, self.logger)
def GetTasks(self, request, context):
self.validate_collaborator(request, context)
self.check_request(request)
collaborator_name = request.header.sender
(tasks, round_number, sleep_time, time_to_quit) = self.aggregator.get_tasks(request.header.sender)
if tasks:
if isinstance(tasks[0], str):
tasks_proto = [aggregator_pb2.Task(name=task) for task in tasks]
else:
tasks_proto = [aggregator_pb2.Task(name=task.name, function_name=task.function_name, task_type=task.task_type, apply_local=task.apply_local) for task in tasks]
else:
tasks_proto = []
return aggregator_pb2.GetTasksResponse(header=self.get_header(collaborator_name), round_number=round_number, tasks=tasks_proto, sleep_time=sleep_time, quit=time_to_quit)
def GetAggregatedTensor(self, request, context):
self.validate_collaborator(request, context)
self.check_request(request)
collaborator_name = request.header.sender
tensor_name = request.tensor_name
require_lossless = request.require_lossless
round_number = request.round_number
report = request.report
tags = tuple(request.tags)
named_tensor = self.aggregator.get_aggregated_tensor(collaborator_name, tensor_name, round_number, report, tags, require_lossless)
return aggregator_pb2.GetAggregatedTensorResponse(header=self.get_header(collaborator_name), round_number=round_number, tensor=named_tensor)
def SendLocalTaskResults(self, request, context):
try:
proto = aggregator_pb2.TaskResults()
proto = utils.datastream_to_proto(proto, request)
except RuntimeError:
raise RuntimeError('Empty stream message, reestablishing connection from client to resume training...')
self.validate_collaborator(proto, context)
self.check_request(proto)
collaborator_name = proto.header.sender
task_name = proto.task_name
round_number = proto.round_number
data_size = proto.data_size
named_tensors = proto.tensors
self.aggregator.send_local_task_results(collaborator_name, round_number, task_name, data_size, named_tensors)
return aggregator_pb2.SendLocalTaskResultsResponse(header=self.get_header(collaborator_name))
def get_server(self):
self.server = server(ThreadPoolExecutor(max_workers=cpu_count()), options=channel_options)
aggregator_pb2_grpc.add_AggregatorServicer_to_server(self, self.server)
if (not self.tls):
self.logger.warn('gRPC is running on insecure channel with TLS disabled.')
port = self.server.add_insecure_port(self.uri)
self.logger.info(f'Insecure port: {port}')
else:
with open(self.private_key, 'rb') as f:
private_key_b = f.read()
with open(self.certificate, 'rb') as f:
certificate_b = f.read()
with open(self.root_certificate, 'rb') as f:
root_certificate_b = f.read()
if self.disable_client_auth:
self.logger.warn('Client-side authentication is disabled.')
self.server_credentials = ssl_server_credentials(((private_key_b, certificate_b),), root_certificates=root_certificate_b, require_client_auth=(not self.disable_client_auth))
self.server.add_secure_port(self.uri, self.server_credentials)
return self.server
def serve(self):
self.get_server()
self.logger.info('Starting Aggregator gRPC Server')
self.server.start()
try:
while (not self.aggregator.all_quit_jobs_sent()):
sleep(5)
except KeyboardInterrupt:
pass
self.server.stop(0) |
.service(data='Content-Type error', status=400, method='POST', path='/reports/upload/')
.openapi_version('3.0')
def test_unknown_error_on_upload(cli, schema_url, service, snapshot_cli):
assert (cli.run(schema_url, 'my-api', f'--schemathesis-io-token={service.token}', f'--schemathesis-io-url={service.base_url}', '--report') == snapshot_cli) |
class MM(Enum):
MM_NORMAL = 1
MM_WRQ = 2
MM_WRQ_RELU = 3
MM_NN = 4
MM_NT = 5
MM_TT = 6
UNKNOWN = (- 1) |
('/register', methods=['POST'])
def register():
username = request.form['username']
password = request.form['password']
db = MySQLdb.connect(host='localhost', user='root', passwd='root', db='user')
c = db.cursor()
c.execute(("SELECT username FROM user WHERE username = '%s'" % username))
rows = c.fetchall()
if (len(rows) == 0):
c.execute(("INSERT INTO user (username, password) VALUES ('%s', '%s')" % (username, hashlib.md5(password).hexdigest())))
db.commit()
return 'success'
else:
return 'username already exists' |
class TruePositive(ConfusionMatrixMetric):
def __init__(self, metric: str='TP'):
super().__init__(metric)
def calculate(self):
return self.confusion_matrix.tp |
class BitEncode(Model):
def __init__(self, bit_size=1, *, output_shape=None, input_shape=None, name=None, bin_dtype=bb.DType.FP32, real_dtype=bb.DType.FP32, core_model=None):
if (output_shape is None):
output_shape = []
if (core_model is None):
core_creator = search_core_model('BitEncode', [bin_dtype, real_dtype]).create
core_model = core_creator(bit_size, output_shape)
super(BitEncode, self).__init__(core_model=core_model, input_shape=input_shape, name=name) |
def _quota_exceeded(response: 'requests.models.Response') -> bool:
return ('Google Drive - Quota exceeded' in response.text) |
def register_Ns3GbrQosInformation_methods(root_module, cls):
cls.add_constructor([param('ns3::GbrQosInformation const &', 'arg0')])
cls.add_constructor([])
cls.add_instance_attribute('gbrDl', 'uint64_t', is_const=False)
cls.add_instance_attribute('gbrUl', 'uint64_t', is_const=False)
cls.add_instance_attribute('mbrDl', 'uint64_t', is_const=False)
cls.add_instance_attribute('mbrUl', 'uint64_t', is_const=False)
return |
class ReplicationPad1d(_ReplicationPadNd):
padding: _size_2_t
def __init__(self, padding: _size_2_t) -> None:
super(ReplicationPad1d, self).__init__()
self.padding = _pair(padding) |
def create_pipeline_configuration(DEBUG=False, batch_size=8):
config = {'batch_dim': 0, 'depth': 10000, 'basic_blocks': (StatelessEmbedding, T5LayerNorm, Dropout, T5Block, Linear), 'model_inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0, 8]}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [0]}, 'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'is_batched': True, 'used_by': [15]}}, 'model_outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_780': {'shape': torch.Size([1]), 'dtype': torch.float32, 'is_batched': False, 'created_by': 15}}, 'stages': {0: {'stage_cls': Partition0, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_attention_mask': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'decoder_input_ids': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'input_ids': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_1': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___64': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___66': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [1]}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_8': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:0')], 'stage_depth': 15}, 1: {'stage_cls': Partition1, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_1': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___64': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___66': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_2': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___109': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___111': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [2]}}, 'devices': [('cpu' if DEBUG else 'cuda:1')], 'stage_depth': 14}, 2: {'stage_cls': Partition2, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_2': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___109': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___111': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 1}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_3': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___154': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___156': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [3]}}, 'devices': [('cpu' if DEBUG else 'cuda:2')], 'stage_depth': 13}, 3: {'stage_cls': Partition3, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_3': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___154': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___156': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 2}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_4': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___199': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___201': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [4]}}, 'devices': [('cpu' if DEBUG else 'cuda:3')], 'stage_depth': 12}, 4: {'stage_cls': Partition4, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_4': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___199': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___201': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 3}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_5': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___244': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___246': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [5]}}, 'devices': [('cpu' if DEBUG else 'cuda:4')], 'stage_depth': 11}, 5: {'stage_cls': Partition5, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_5': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___244': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___246': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 4}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_6': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___289': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___291': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [6]}}, 'devices': [('cpu' if DEBUG else 'cuda:5')], 'stage_depth': 10}, 6: {'stage_cls': Partition6, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_6': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___289': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___291': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 5}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_7': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___334': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___336': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [7]}}, 'devices': [('cpu' if DEBUG else 'cuda:6')], 'stage_depth': 9}, 7: {'stage_cls': Partition7, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Tensor::__mul___19_7': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___334': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___336': {'shape': torch.Size([8, 32, 320, 320]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 6}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___379': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [8]}}, 'devices': [('cpu' if DEBUG else 'cuda:7')], 'stage_depth': 8}, 8: {'stage_cls': Partition8, 'inputs': {'attention_mask': {'shape': torch.Size([8, 320]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/tuple::__getitem___379': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 7}, 'T5ForConditionalGeneration/T5Stack[decoder]/StatelessEmbedding[embed_tokens]': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 0}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_8': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 0}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_9': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_9': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___486': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___488': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [9]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___490': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [9]}}, 'devices': [('cpu' if DEBUG else 'cuda:8')], 'stage_depth': 7}, 9: {'stage_cls': Partition9, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_9': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_9': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_9': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___486': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___488': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 8}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___490': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 8}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_10': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_10': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___525': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___527': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [10]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___529': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [10]}}, 'devices': [('cpu' if DEBUG else 'cuda:9')], 'stage_depth': 6}, 10: {'stage_cls': Partition10, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_10': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_10': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_10': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___525': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___527': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 9}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___529': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 9}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_11': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_11': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___564': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___566': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [11]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___568': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [11]}}, 'devices': [('cpu' if DEBUG else 'cuda:10')], 'stage_depth': 5}, 11: {'stage_cls': Partition11, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_11': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_11': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_11': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___564': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___566': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 10}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___568': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 10}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_12': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_12': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___603': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___605': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [12]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___607': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [12]}}, 'devices': [('cpu' if DEBUG else 'cuda:11')], 'stage_depth': 4}, 12: {'stage_cls': Partition12, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_12': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_12': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_12': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___603': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___605': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 11}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___607': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 11}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_13': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_13': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___642': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___644': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [13]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___646': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [13]}}, 'devices': [('cpu' if DEBUG else 'cuda:12')], 'stage_depth': 3}, 13: {'stage_cls': Partition13, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_13': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_13': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_13': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___642': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___644': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 12}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___646': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 12}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_14': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_14': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___681': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___683': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [14]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___685': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [14]}}, 'devices': [('cpu' if DEBUG else 'cuda:13')], 'stage_depth': 2}, 14: {'stage_cls': Partition14, 'inputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_14': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_14': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_14': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___681': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___683': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 13}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___685': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 13}}, 'outputs': {'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_15': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_15': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___720': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___722': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'used_by': [15]}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___724': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'used_by': [15]}}, 'devices': [('cpu' if DEBUG else 'cuda:14')], 'stage_depth': 1}, 15: {'stage_cls': Partition15, 'inputs': {'labels': {'shape': torch.Size([8, 8]), 'dtype': torch.int64, 'req_grad': False, 'is_batched': True, 'created_by': (- 1)}, 'T5ForConditionalGeneration/T5Stack[encoder]/Dropout[dropout]_15': {'shape': torch.Size([8, 320, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___440_15': {'shape': torch.Size([8, 1, 8, 8]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/Tensor::__mul___448_15': {'shape': torch.Size([8, 1, 1, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___720': {'shape': torch.Size([8, 8, 1024]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___722': {'shape': torch.Size([8, 32, 8, 8]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': True, 'created_by': 14}, 'T5ForConditionalGeneration/T5Stack[decoder]/tuple::__getitem___724': {'shape': torch.Size([8, 32, 8, 320]), 'dtype': torch.float32, 'req_grad': False, 'is_batched': True, 'created_by': 14}}, 'outputs': {'T5ForConditionalGeneration/torch.nn.functional::cross_entropy_780': {'shape': torch.Size([1]), 'dtype': torch.float32, 'req_grad': True, 'is_batched': False, 'used_by': [(- 1)]}}, 'devices': [('cpu' if DEBUG else 'cuda:15')], 'stage_depth': 0}}}
batch_dim = config['batch_dim']
for d in chain(config['model_inputs'].values(), config['model_outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
for s in config['stages'].values():
for d in chain(s['inputs'].values(), s['outputs'].values()):
if d['is_batched']:
shape = d['shape']
d['shape'] = torch.Size(((shape[:batch_dim] + (batch_size,)) + shape[(batch_dim + 1):]))
return config |
class domainAdaptationDataSet(data.Dataset):
def __init__(self, root, images_list_path, scale_factor, num_scales, curr_scale, set, get_image_label=False):
self.root = root
if (images_list_path != None):
self.images_list_file = osp.join(images_list_path, ('%s.txt' % set))
self.img_ids = [image_id.strip() for image_id in open(self.images_list_file)]
self.scale_factor = scale_factor
self.num_scales = num_scales
self.curr_scale = curr_scale
self.set = set
self.trans = transforms.ToTensor()
self.crop_size = IMG_CROP_SIZE_SEMSEG
self.ignore_label = IGNORE_LABEL
self.get_image_label = get_image_label
def __len__(self):
return len(self.img_ids)
def SetEpochSize(self, epoch_size):
if (epoch_size > len(self.img_ids)):
self.img_ids = (self.img_ids * int(np.ceil((float(epoch_size) / len(self.img_ids)))))
self.img_ids = self.img_ids[:epoch_size]
def convert_to_class_ids(self, label_image):
label = np.asarray(label_image, np.float32)
label_copy = (self.ignore_label * np.ones(label.shape, dtype=np.float32))
for (k, v) in self.id_to_trainid.items():
label_copy[(label == k)] = v
return label_copy
def GeneratePyramid(self, image, is_label=False):
scales_pyramid = GeneratePyramid(image, self.num_scales, self.curr_scale, self.scale_factor, is_label=is_label)
return scales_pyramid |
class LightHamHead(BaseSegHead):
cfg = {'t': ([64, 160, 256], 256, 256, 0.1), 's': ([128, 320, 512], 256, 256, 0.1), 'b': ([128, 320, 512], 512, 512, 0.1), 'l': ([128, 320, 512], 1024, 1024, 0.1)}
def __init__(self, ham_channels=256, dropout_ratio=0.1, ham_kwargs=dict(), conv_cfg=None, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), act_cfg=dict(type='ReLU'), **kwargs):
super(LightHamHead, self).__init__(**kwargs)
self.ham_channels = ham_channels
self.dropout_ratio = dropout_ratio
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.squeeze = ConvModule(sum(self.in_channels), self.ham_channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.hamburger = Hamburger(self.ham_channels, ham_kwargs, norm_cfg=norm_cfg)
self.align = ConvModule(self.ham_channels, self.channels, 1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg, act_cfg=self.act_cfg)
self.init_weight()
def forward(self, inps):
(h, w) = inps[0].shape[2:]
feat = [F.interpolate(inp, size=(h, w), mode='bilinear', align_corners=False) for inp in inps]
feat = torch.cat(feat, dim=1)
feat = self.squeeze(feat)
feat = self.hamburger(feat)
feat = self.align(feat)
return self.classify(feat)
def init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif (isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.Linear)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0) |
_builder('snli_ve')
class SNLIVisualEntailmentBuilder(BaseDatasetBuilder):
train_dataset_cls = SNLIVisualEntialmentDataset
eval_dataset_cls = SNLIVisualEntialmentDataset
DATASET_CONFIG_DICT = {'default': 'configs/datasets/snli_ve/defaults.yaml'} |
class VietorisRipsComplex(SimplicialComplex):
def __init__(self, points, epsilon, labels=None, distfcn=distance.euclidean):
super(VietorisRipsComplex, self).__init__()
self.pts = points
self.labels = (list(range(len(self.pts))) if ((labels is None) or (len(labels) != len(self.pts))) else labels)
self.epsilon = epsilon
self.distfcn = distfcn
self.network = self.construct_network(self.pts, self.labels, self.epsilon, self.distfcn)
self.import_simplices(map(tuple, nx.find_cliques(self.network)))
def construct_network(self, points, labels, epsilon, distfcn):
g = nx.Graph()
g.add_nodes_from(labels)
for pair in product(zip(points, labels), zip(points, labels)):
if (pair[0][1] != pair[1][1]):
dist = distfcn(pair[0][0], pair[1][0])
if (dist < epsilon):
g.add_edge(pair[0][1], pair[1][1])
return g |
def _create_var(name: str, value_expr: TfExpression) -> TfExpression:
assert (not _finalized)
name_id = name.replace('/', '_')
v = tf.cast(value_expr, _dtype)
if v.shape.is_fully_defined():
size = np.prod(v.shape.as_list())
size_expr = tf.constant(size, dtype=_dtype)
else:
size = None
size_expr = tf.reduce_prod(tf.cast(tf.shape(v), _dtype))
if (size == 1):
if (v.shape.ndims != 0):
v = tf.reshape(v, [])
v = [size_expr, v, tf.square(v)]
else:
v = [size_expr, tf.reduce_sum(v), tf.reduce_sum(tf.square(v))]
v = tf.cond(tf.math.is_finite(v[1]), (lambda : tf.stack(v)), (lambda : tf.zeros(3, dtype=_dtype)))
with tfutil.absolute_name_scope(('Autosummary/' + name_id)), tf.control_dependencies(None):
var = tf.Variable(tf.zeros(3, dtype=_dtype), trainable=False)
update_op = tf.cond(tf.compat.v1.is_variable_initialized(var), (lambda : tf.compat.v1.assign_add(var, v)), (lambda : tf.compat.v1.assign(var, v)))
if (name in _vars):
_vars[name].append(var)
else:
_vars[name] = [var]
return update_op |
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
(N, C, H, W) = x.size()
g = self.groups
return x.view(N, g, (C / g), H, W).permute(0, 2, 1, 3, 4).contiguous().view(N, C, H, W) |
def stem_token(token, resources):
from snips_nlu_utils import normalize
if token.stemmed_value:
return token.stemmed_value
if (not token.normalized_value):
token.normalized_value = normalize(token.value)
token.stemmed_value = _stem(token.normalized_value, resources)
return token.stemmed_value |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.