code stringlengths 101 5.91M |
|---|
class Reference(metaclass=ABCMeta):
def __init__(self, typ: ProperType) -> None:
self._type = typ
def type(self) -> ProperType:
return self._type
def is_primitive(self) -> bool:
return self.type.accept(is_primitive_type)
def is_none_type(self) -> bool:
return isinstance(self.type, NoneType)
def get_names(self, variable_names: ns.AbstractNamingScope, module_names: ns.AbstractNamingScope) -> list[str]:
def clone(self, memo: dict[(VariableReference, VariableReference)]) -> Reference:
def structural_eq(self, other: Any, memo: dict[(VariableReference, VariableReference)]) -> bool:
def structural_hash(self, memo: dict[(VariableReference, int)]) -> int:
def get_variable_reference(self) -> (VariableReference | None):
def replace_variable_reference(self, old: VariableReference, new: VariableReference) -> None: |
class resnet99_avg(nn.Module):
def __init__(self, num_classes=9):
super(resnet99_avg, self).__init__()
self.conv1a = nn.Conv2d(103, 32, kernel_size=3, stride=1, padding=0, groups=1)
self.conv1b = nn.Conv2d(103, 32, kernel_size=3, stride=1, padding=0, groups=1)
self.bn1 = nn.BatchNorm2d(64, eps=0.001, momentum=0.9)
self.conv2a = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, groups=1)
self.conv2b = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, groups=1)
self.bn2 = nn.BatchNorm2d(64, eps=0.001, momentum=0.9)
self.conv3a = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, groups=1)
self.conv3b = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, groups=1)
self.fc = nn.Linear(64, num_classes)
def forward(self, x):
x1 = self.conv1a(x)
x2 = self.conv1b(x)
x1 = torch.cat((x1, x2), axis=1)
x2 = self.bn1(x1)
x2 = nn.ReLU()(x2)
x2 = self.conv2a(x2)
x2 = nn.ReLU()(x2)
x2 = self.conv2b(x2)
x1 = torch.add(x1, x2)
x2 = self.bn2(x1)
x2 = nn.ReLU()(x2)
x2 = self.conv3a(x2)
x2 = nn.ReLU()(x2)
x2 = self.conv3b(x2)
x1 = torch.add(x1, x2)
x1 = nn.AdaptiveAvgPool2d((1, 1))(x1)
x1 = x1.reshape(x1.size(0), (- 1))
out = self.fc(x1)
return out |
def _get_RFCN_head(is_train, ft_map, rois, num_classes):
num_rfcn_chn = 512
S = 7
conv_new_1 = mx.sym.Convolution(data=ft_map, kernel=(1, 1), num_filter=num_rfcn_chn, name='conv_new_1', lr_mult=3.0)
relu_new_1 = mx.sym.Activation(data=conv_new_1, act_type='relu', name='conv_new_1_relu')
rfcn_cls = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=((S * S) * num_classes), name='rfcn_cls', lr_mult=3.0)
rfcn_bbox = mx.sym.Convolution(data=relu_new_1, kernel=(1, 1), num_filter=(((S * S) * 4) * num_classes), name='rfcn_bbox', lr_mult=3.0)
psroipool5_cls = mx.contrib.sym.PSROIPooling(name='psroipool5_cls', data=rfcn_cls, rois=rois, group_size=S, pooled_size=S, output_dim=num_classes, spatial_scale=(1.0 / config.RCNN_FEAT_STRIDE))
psroipool5_reg = mx.contrib.sym.PSROIPooling(name='psroipool5_reg', data=rfcn_bbox, rois=rois, group_size=S, pooled_size=S, output_dim=(num_classes * 4), spatial_scale=(1.0 / config.RCNN_FEAT_STRIDE))
cls_score = mx.symbol.Pooling(data=psroipool5_cls, global_pool=True, kernel=(S, S), pool_type='avg', name='cls_score')
bbox_pred = mx.symbol.Pooling(data=psroipool5_reg, global_pool=True, kernel=(S, S), pool_type='avg', name='bbox_pred')
cls_score = mx.sym.Reshape(name='cls_score_reshape', data=cls_score, shape=((- 1), num_classes))
bbox_pred = mx.sym.Reshape(name='bbox_pred_reshape', data=bbox_pred, shape=((- 1), (4 * num_classes)))
return (cls_score, bbox_pred) |
def process_vlsp22(paths, dataset_name, *args):
assert ((dataset_name == 'vi_vlsp22') or (dataset_name == 'vi_vlsp23'))
if (dataset_name == 'vi_vlsp22'):
default_subdir = 'VLSP_2022'
default_make_test_split = False
updated_tagset = False
elif (dataset_name == 'vi_vlsp23'):
default_subdir = os.path.join('VLSP_2023', 'Trainingdataset')
default_make_test_split = True
updated_tagset = True
parser = argparse.ArgumentParser()
parser.add_argument('--subdir', default=default_subdir, type=str, help='Where to find the data - allows for using previous versions, if needed')
parser.add_argument('--no_convert_brackets', default=True, action='store_false', dest='convert_brackets', help="Don't convert the VLSP parens RKBT & LKBT to PTB parens")
parser.add_argument('--n_splits', default=None, type=int, help='Split the data into this many pieces. Relevant as there is no set training/dev split, so this allows for N models on N different dev sets')
parser.add_argument('--test_split', default=default_make_test_split, action='store_true', help='Split 1/10th of the data as a test split as well. Useful for experimental results. Less relevant since there is now an official test set')
parser.add_argument('--no_test_split', dest='test_split', action='store_false', help='Split 1/10th of the data as a test split as well. Useful for experimental results. Less relevant since there is now an official test set')
args = parser.parse_args(args=list(*args))
if os.path.exists(args.subdir):
vlsp_dir = args.subdir
else:
vlsp_dir = os.path.join(paths['CONSTITUENCY_BASE'], 'vietnamese', args.subdir)
if (not os.path.exists(vlsp_dir)):
raise FileNotFoundError('Could not find the {} dataset in the expected location of {} - CONSTITUENCY_BASE == {}'.format(dataset_name, vlsp_dir, paths['CONSTITUENCY_BASE']))
vlsp_files = os.listdir(vlsp_dir)
vlsp_test_files = [os.path.join(vlsp_dir, x) for x in vlsp_files if (x.startswith('private') and (not x.endswith('.zip')))]
vlsp_train_files = [os.path.join(vlsp_dir, x) for x in vlsp_files if (x.startswith('file') and (not x.endswith('.zip')))]
vlsp_train_files.sort()
if (len(vlsp_train_files) == 0):
raise FileNotFoundError("No train files (files starting with 'file') found in {}".format(vlsp_dir))
if ((not args.test_split) and (len(vlsp_test_files) == 0)):
raise FileNotFoundError('No test files found in {}'.format(vlsp_dir))
print('Loading training files from {}'.format(vlsp_dir))
print('Procesing training files:\n {}'.format('\n '.join(vlsp_train_files)))
with tempfile.TemporaryDirectory() as train_output_path:
vtb_convert.convert_files(vlsp_train_files, train_output_path, verbose=True, fix_errors=True, convert_brackets=args.convert_brackets, updated_tagset=updated_tagset)
if args.n_splits:
test_size = (0.1 if args.test_split else 0.0)
dev_size = ((1.0 - test_size) / args.n_splits)
train_size = ((1.0 - test_size) - dev_size)
for rotation in range(args.n_splits):
random.seed(1234)
rotation_name = ('%s-%d-%d' % (dataset_name, rotation, args.n_splits))
if args.test_split:
rotation_name = (rotation_name + 't')
vtb_split.split_files(train_output_path, paths['CONSTITUENCY_DATA_DIR'], rotation_name, train_size=train_size, dev_size=dev_size, rotation=(rotation, args.n_splits))
else:
test_size = (0.1 if args.test_split else 0.0)
dev_size = 0.1
train_size = ((1.0 - test_size) - dev_size)
if args.test_split:
dataset_name = (dataset_name + 't')
vtb_split.split_files(train_output_path, paths['CONSTITUENCY_DATA_DIR'], dataset_name, train_size=train_size, dev_size=dev_size)
if (not args.test_split):
print('Procesing test files:\n {}'.format('\n '.join(vlsp_test_files)))
with tempfile.TemporaryDirectory() as test_output_path:
vtb_convert.convert_files(vlsp_test_files, test_output_path, verbose=True, fix_errors=True, convert_brackets=args.convert_brackets)
if args.n_splits:
for rotation in range(args.n_splits):
rotation_name = ('%s-%d-%d' % (dataset_name, rotation, args.n_splits))
vtb_split.split_files(test_output_path, paths['CONSTITUENCY_DATA_DIR'], rotation_name, train_size=0, dev_size=0)
else:
vtb_split.split_files(test_output_path, paths['CONSTITUENCY_DATA_DIR'], dataset_name, train_size=0, dev_size=0) |
def read_fasta_yield(f):
(name, seq) = ('', '')
count = 0
while True:
line = f.readline()
if (not line):
break
if ('>' == line[0]):
if ((0 != count) or ((0 == count) and (seq != ''))):
if is_fasta(Seq(name, seq, count)):
(yield Seq(name, seq, count))
seq = ''
name = line[1:].strip()
count += 1
else:
seq += line.strip()
if is_fasta(Seq(name, seq, count)):
(yield Seq(name, seq, count)) |
def get_grammatical_function(attributes):
tree = attributes['parse_tree']
parent = tree.parent()
if (parent is None):
return 'OTHER'
else:
parent_label = parent.label()
if re.match('^(S|FRAG)', parent_label):
return 'SUBJECT'
elif re.match('VP', parent_label):
return 'OBJECT'
else:
return 'OTHER' |
def hamming_calc(TP, POP):
try:
length = POP
return ((1 / length) * (length - sum(TP.values())))
except Exception:
return 'None' |
def train(model, loader, optimizer):
model.train()
for (batch, *args) in loader:
batch = batch.to(model.device)
optimizer.zero_grad()
out = model(batch.x, batch.adj_t, *args)
train_mask = batch.train_mask[:out.size(0)]
loss = criterion(out[train_mask], batch.y[:out.size(0)][train_mask])
loss.backward()
optimizer.step() |
def add_compare_with_cpu_command(subparsers):
subparser = subparsers.add_parser('compare_with_cpu', help='Compare performance between two nntxt.')
subparser.add_argument('-c', '--config', help='path to nntxt', required=True)
subparser.add_argument('-c2', '--config2', help='path to cpu nntxt', required=True)
subparser.add_argument('-o', '--outdir', help='output directory', required=True)
subparser.set_defaults(func=compare_with_cpu_command) |
def setup_args():
parser = argparse.ArgumentParser()
parser.add_argument('--seed', type=int, default=9, help='seed for reproducibility')
parser.add_argument('--base_dir', type=str, default='rule_classifier_data/val', help='base directory for the data')
parser.add_argument('--proj_name', type=str, default='rsbotownversion', help='name of the input repo')
return parser.parse_args() |
class Mish_VGG(nn.Module):
def __init__(self, vgg_name):
super(Mish_VGG, self).__init__()
self.features = self._make_layers(cfg[vgg_name])
self.classifier = nn.Linear(512, 10)
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), (- 1))
out = self.classifier(out)
return out
def _make_layers(self, cfg):
layers = []
in_channels = 3
for x in cfg:
if (x == 'M'):
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), nn.BatchNorm2d(x), Mish()]
in_channels = x
layers += [nn.AvgPool2d(kernel_size=1, stride=1)]
return nn.Sequential(*layers) |
.parametrize('ctx, func_name', ctxs)
.parametrize('seed', [313])
.parametrize('prob', [0.7, 1.0])
.parametrize('area_ratios', [(0.02, 0.04)])
.parametrize('aspect_ratios', [(0.3, 3.3333)])
.parametrize('replacements', [(2.0, 2.0), (3.0, 4.0)])
.parametrize('n', [1, 3])
.parametrize('share', [True, False])
.parametrize('inplace', [False])
.parametrize('base_axis', [1])
.parametrize('func_seed', [412, (- 1)])
.parametrize('channel_last', [False, True])
def test_random_erase_forward(ctx, func_name, seed, prob, area_ratios, aspect_ratios, replacements, n, share, inplace, base_axis, func_seed, channel_last):
if (channel_last and (func_name == 'RandomErase')):
pytest.skip('RandomErase with channel_last is only supported in CUDA.')
lb = replacements[0]
rng = np.random.RandomState(seed)
(b, c, h, w) = (4, 3, 32, 32)
ishape = ([b, h, w, c] if channel_last else [b, c, h, w])
x = nn.Variable.from_numpy_array((rng.rand(*ishape) + 1.0))
with nn.context_scope(ctx):
y0 = F.random_erase(x, prob=prob, area_ratios=area_ratios, aspect_ratios=aspect_ratios, replacements=replacements, n=n, share=share, inplace=inplace, base_axis=base_axis, seed=func_seed, channel_last=channel_last)
y0.forward()
if (prob == 1.0):
assert np.any((y0.d >= lb))
if (func_seed != (- 1)):
with nn.context_scope(ctx):
y1 = F.random_erase(x, prob=prob, area_ratios=area_ratios, aspect_ratios=aspect_ratios, replacements=replacements, n=n, share=share, inplace=inplace, base_axis=base_axis, seed=func_seed, channel_last=channel_last)
y1.forward()
assert_allclose(y0.d, y1.d)
with nn.context_scope(ctx):
y2 = F.random_erase(x, prob=prob, area_ratios=area_ratios, aspect_ratios=aspect_ratios, replacements=replacements, n=n, share=share, inplace=inplace, base_axis=base_axis, seed=(func_seed + 2), channel_last=channel_last)
y2.forward()
assert np.any((y0.d != y2.d)) |
_kl(Beta, Normal)
def _kl_beta_normal(p, q):
E_beta = (p.concentration1 / (p.concentration1 + p.concentration0))
var_normal = q.scale.pow(2)
t1 = (- p.entropy())
t2 = (0.5 * ((var_normal * 2) * math.pi).log())
t3 = ((((E_beta * (1 - E_beta)) / ((p.concentration1 + p.concentration0) + 1)) + E_beta.pow(2)) * 0.5)
t4 = (q.loc * E_beta)
t5 = (q.loc.pow(2) * 0.5)
return ((t1 + t2) + (((t3 - t4) + t5) / var_normal)) |
class OfflineRLAlgorithm(object, metaclass=abc.ABCMeta):
def __init__(self, trainer, evaluation_policy, evaluation_env, evaluation_data_collector, replay_buffer, batch_size, max_path_length, num_epochs, num_eval_steps_per_epoch, num_trains_per_train_loop, num_train_loops_per_epoch=1, save_snapshot_freq=1000):
self.trainer = trainer
self.eval_policy = evaluation_policy
self.eval_env = evaluation_env
self.eval_data_collector = evaluation_data_collector
self.replay_buffer = replay_buffer
self.batch_size = batch_size
self.max_path_length = max_path_length
self.num_epochs = num_epochs
self.num_eval_steps_per_epoch = num_eval_steps_per_epoch
self.num_trains_per_train_loop = num_trains_per_train_loop
self.num_train_loops_per_epoch = num_train_loops_per_epoch
self.save_snapshot_freq = save_snapshot_freq
self._start_epoch = 0
self.post_epoch_funcs = []
def _train(self):
for epoch in gt.timed_for(range(self._start_epoch, self.num_epochs), save_itrs=True):
if hasattr(self.trainer, 'log_alpha'):
curr_alpha = self.trainer.log_alpha.exp()
else:
curr_alpha = None
self.eval_data_collector.collect_new_paths(max_path_length=self.max_path_length, num_samples=self.num_eval_steps_per_epoch, discard_incomplete_paths=True, alpha=curr_alpha)
gt.stamp('evaluation sampling')
self.training_mode(True)
for _ in range(self.num_train_loops_per_epoch):
for _ in range(self.num_trains_per_train_loop):
(train_data, indices) = self.replay_buffer.random_batch(self.batch_size, return_indices=True)
self.trainer.train(train_data, indices)
self.training_mode(False)
gt.stamp('training')
self._end_epoch(epoch)
def train(self, start_epoch=0):
self._start_epoch = start_epoch
self._train()
def _end_epoch(self, epoch):
snapshot = self._get_snapshot()
if ((self.save_snapshot_freq is not None) and (((epoch + 1) % self.save_snapshot_freq) == 0)):
logger.save_itr_params((epoch + 1), snapshot, prefix='offline_itr')
gt.stamp('saving', unique=False)
self._log_stats(epoch)
self._end_epochs(epoch)
for post_epoch_func in self.post_epoch_funcs:
post_epoch_func(self, epoch)
def _get_snapshot(self):
snapshot = {}
for (k, v) in self.trainer.get_snapshot().items():
snapshot[('trainer/' + k)] = v
"\n for k, v in self.eval_data_collector.get_snapshot().items():\n snapshot['evaluation/' + k] = v\n for k, v in self.replay_buffer.get_snapshot().items():\n snapshot['replay_buffer/' + k] = v\n "
return snapshot
def _end_epochs(self, epoch):
self.eval_data_collector.end_epoch(epoch)
self.trainer.end_epoch(epoch)
if hasattr(self.eval_policy, 'end_epoch'):
self.eval_policy.end_epoch(epoch)
def _get_trainer_diagnostics(self):
return self.trainer.get_diagnostics()
def _get_training_diagnostics_dict(self):
return {'policy_trainer': self._get_trainer_diagnostics()}
def _log_stats(self, epoch):
logger.log('Epoch {} finished'.format(epoch), with_timestamp=True)
logger.record_dict(self.replay_buffer.get_diagnostics(), prefix='replay_buffer/')
training_diagnostics = self._get_training_diagnostics_dict()
for prefix in training_diagnostics:
logger.record_dict(training_diagnostics[prefix], prefix=(prefix + '/'))
'\n Evaluation\n '
if (self.num_eval_steps_per_epoch > 0):
logger.record_dict(self.eval_data_collector.get_diagnostics(), prefix='evaluation/')
eval_paths = self.eval_data_collector.get_epoch_paths()
if hasattr(self.eval_env, 'get_diagnostics'):
logger.record_dict(self.eval_env.get_diagnostics(eval_paths), prefix='evaluation/')
logger.record_dict(eval_util.get_generic_path_information(eval_paths), prefix='evaluation/')
'\n Misc\n '
gt.stamp('logging', unique=False)
logger.record_dict(_get_epoch_timings())
logger.record_tabular('Epoch', epoch)
logger.dump_tabular(with_prefix=False, with_timestamp=False)
def training_mode(self, mode):
pass |
def vgg19_bn(pretrained: bool=False, progress: bool=True, **kwargs: Any) -> VGG:
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs) |
class BatchUpdateParameterServer(object):
def __init__(self, batch_update_size):
self.model = nn.Linear(in_features, out_features)
self.lock = threading.Lock()
self.future_model = torch.futures.Future()
self.batch_update_size = batch_update_size
self.curr_update_size = 0
self.optimizer = optim.SGD(self.model.parameters(), lr=0.001, momentum=0.9)
for p in self.model.parameters():
p.grad = torch.zeros_like(p)
def get_model(self):
return self.model
.async_execution
def update_and_fetch_model(ps_rref, grads):
self = ps_rref.local_value()
for (p, g) in zip(self.model.parameters(), grads):
p.grad += g
with self.lock:
timed_log(f'PS got {self.curr_update_size}/{self.batch_update_size} updates')
self.curr_update_size += 1
fut = self.future_model
if (self.curr_update_size >= self.batch_update_size):
for p in self.model.parameters():
p.grad /= self.batch_update_size
self.curr_update_size = 0
self.optimizer.step()
self.optimizer.zero_grad()
fut.set_result(self.model)
timed_log('PS updated model')
self.future_model = torch.futures.Future()
return fut |
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'maskrcnn', 'csrc')
main_file = glob.glob(os.path.join(extensions_dir, '*.cpp'))
source_cpu = glob.glob(os.path.join(extensions_dir, 'cpu', '*.cpp'))
source_cuda = glob.glob(os.path.join(extensions_dir, 'cuda', '*.cu'))
sources = (main_file + source_cpu)
extension = CppExtension
extra_compile_args = {'cxx': []}
define_macros = []
if (torch.cuda.is_available() and (CUDA_HOME is not None)):
extension = CUDAExtension
sources += source_cuda
define_macros += [('WITH_CUDA', None)]
extra_compile_args['nvcc'] = ['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__']
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [extension('maskrcnn._C', sources, include_dirs=include_dirs, define_macros=define_macros, extra_compile_args=extra_compile_args)]
return ext_modules |
class SpkIdBrain(sb.Brain):
def compute_forward(self, batch, stage):
batch = batch.to(self.device)
(feats, lens) = self.prepare_features(batch.sig, stage)
embeddings = self.modules.embedding_model(feats, lens)
predictions = self.modules.classifier(embeddings)
return predictions
def prepare_features(self, wavs, stage):
(wavs, lens) = wavs
if (stage == sb.Stage.TRAIN):
if hasattr(self.modules, 'env_corrupt'):
wavs_noise = self.modules.env_corrupt(wavs, lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
lens = torch.cat([lens, lens])
if hasattr(self.hparams, 'augmentation'):
wavs = self.hparams.augmentation(wavs, lens)
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
return (feats, lens)
def compute_objectives(self, predictions, batch, stage):
(_, lens) = batch.sig
(spkid, _) = batch.spk_id_encoded
if ((stage == sb.Stage.TRAIN) and hasattr(self.modules, 'env_corrupt')):
spkid = torch.cat([spkid, spkid], dim=0)
lens = torch.cat([lens, lens])
loss = sb.nnet.losses.nll_loss(predictions, spkid, lens)
self.loss_metric.append(batch.id, predictions, spkid, lens, reduction='batch')
if (stage != sb.Stage.TRAIN):
self.error_metrics.append(batch.id, predictions, spkid, lens)
return loss
def on_stage_start(self, stage, epoch=None):
self.loss_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.losses.nll_loss)
if (stage != sb.Stage.TRAIN):
self.error_metrics = self.hparams.error_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
if (stage == sb.Stage.TRAIN):
self.train_loss = stage_loss
else:
stats = {'loss': stage_loss, 'error': self.error_metrics.summarize('average')}
if (stage == sb.Stage.VALID):
(old_lr, new_lr) = self.hparams.lr_annealing(epoch)
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats({'Epoch': epoch, 'lr': old_lr}, train_stats={'loss': self.train_loss}, valid_stats=stats)
if self.hparams.ckpt_enable:
self.checkpointer.save_and_keep_only(meta=stats, min_keys=['error'])
hp.report_result(stats)
if (stage == sb.Stage.TEST):
self.hparams.train_logger.log_stats({'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stats) |
class PyTestChromosomeToAstVisitor(cv.ChromosomeVisitor):
def __init__(self) -> None:
self._module_aliases = ns.NamingScope('module')
self._common_modules: set[str] = set()
self._conversion_results: list[_AstConversionResult] = []
def module_aliases(self) -> ns.NamingScope:
return self._module_aliases
def common_modules(self) -> set[str]:
return self._common_modules
def visit_test_suite_chromosome(self, chromosome) -> None:
for test_case_chromosome in chromosome.test_case_chromosomes:
test_case_chromosome.accept(self)
def visit_test_case_chromosome(self, chromosome) -> None:
visitor = tc_to_ast.TestCaseToAstVisitor(module_aliases=self._module_aliases, common_modules=self._common_modules, exec_result=chromosome.get_last_execution_result())
chromosome.test_case.accept(visitor)
self._conversion_results.append(_AstConversionResult(visitor.test_case_ast, visitor.is_failing_test))
def __create_ast_imports(module_aliases: ns.NamingScope, common_modules: (set[str] | None)=None) -> list[ast.stmt]:
imports: list[ast.stmt] = []
if (common_modules is not None):
imports.extend((ast.Import(names=[ast.alias(name=module, asname=None)]) for module in common_modules))
for (module_name, alias) in module_aliases:
imports.append(ast.Import(names=[ast.alias(name=module_name, asname=alias)]))
return imports
def __create_functions(results: list[_AstConversionResult], with_self_arg: bool) -> list[ast.stmt]:
functions: list[ast.stmt] = []
for (i, result) in enumerate(results):
nodes = result.test_case_ast_stmts
function_name = f'case_{i}'
if (len(nodes) == 0):
nodes = [ast.Pass()]
function_node = PyTestChromosomeToAstVisitor.__create_function_node(function_name, nodes, with_self_arg, result.exception_status)
functions.append(function_node)
return functions
def __create_function_node(function_name: str, nodes: list[ast.stmt], with_self_arg: bool, is_failing: bool) -> ast.FunctionDef:
return ast.FunctionDef(name=f'test_{function_name}', args=ast.arguments(args=([ast.Name(id='self', ctx='Param')] if with_self_arg else []), defaults=[], vararg=None, kwarg=None, posonlyargs=[], kwonlyargs=[], kw_defaults=[]), body=nodes, decorator_list=PyTestChromosomeToAstVisitor.__create_decorator_list(is_failing), returns=None)
def __create_decorator_list(is_failing: bool) -> list[ast.expr]:
if is_failing:
return [ast.Call(func=ast.Attribute(value=ast.Attribute(value=ast.Name(id='pytest', ctx=ast.Load()), attr='mark', ctx=ast.Load()), attr='xfail', ctx=ast.Load()), args=[], keywords=[ast.keyword(arg='strict', value=ast.Constant(value=True))])]
return []
def to_module(self) -> ast.Module:
import_nodes = PyTestChromosomeToAstVisitor.__create_ast_imports(self._module_aliases, self._common_modules)
functions = self.__create_functions(self._conversion_results, False)
return ast.Module(body=(import_nodes + functions), type_ignores=[]) |
class NewUsersSplitter(Splitter):
_init_arg_names = ['test_size', 'drop_cold_items', 'query_column', 'item_column', 'timestamp_column', 'session_id_column', 'session_id_processing_strategy']
def __init__(self, test_size: float, drop_cold_items: bool=False, query_column: str='query_id', item_column: Optional[str]='item_id', timestamp_column: Optional[str]='timestamp', session_id_column: Optional[str]=None, session_id_processing_strategy: str='test'):
super().__init__(drop_cold_items=drop_cold_items, query_column=query_column, item_column=item_column, timestamp_column=timestamp_column, session_id_column=session_id_column, session_id_processing_strategy=session_id_processing_strategy)
if ((test_size < 0) or (test_size > 1)):
raise ValueError('test_size must between 0 and 1')
self.test_size = test_size
def _core_split_pandas(self, interactions: PandasDataFrame, threshold: float) -> Union[(PandasDataFrame, PandasDataFrame)]:
start_date_by_user = interactions.groupby(self.query_column).agg(_start_dt_by_user=(self.timestamp_column, 'min')).reset_index()
test_start_date = start_date_by_user.groupby('_start_dt_by_user').agg(_num_users_by_start_date=(self.query_column, 'count')).reset_index().sort_values(by='_start_dt_by_user', ascending=False)
test_start_date['_cum_num_users_to_dt'] = test_start_date['_num_users_by_start_date'].cumsum()
test_start_date['total'] = sum(test_start_date['_num_users_by_start_date'])
test_start_date = test_start_date[(test_start_date['_cum_num_users_to_dt'] >= (threshold * test_start_date['total']))]
test_start = test_start_date['_start_dt_by_user'].max()
train = interactions[(interactions[self.timestamp_column] < test_start)]
test = interactions.merge(start_date_by_user[(start_date_by_user['_start_dt_by_user'] >= test_start)], how='inner', on=self.query_column).drop(columns=['_start_dt_by_user'])
if self.session_id_column:
interactions['is_test'] = False
interactions.loc[(test.index, 'is_test')] = True
interactions = self._recalculate_with_session_id_column(interactions)
train = interactions[(~ interactions['is_test'])].drop(columns=['is_test'])
test = interactions[interactions['is_test']].drop(columns=['is_test'])
interactions = interactions.drop(columns=['is_test'])
return (train, test)
def _core_split_spark(self, interactions: SparkDataFrame, threshold: float) -> Union[(SparkDataFrame, SparkDataFrame)]:
start_date_by_user = interactions.groupby(self.query_column).agg(sf.min(self.timestamp_column).alias('_start_dt_by_user'))
test_start_date = start_date_by_user.groupby('_start_dt_by_user').agg(sf.count(self.query_column).alias('_num_users_by_start_date')).select('_start_dt_by_user', sf.sum('_num_users_by_start_date').over(Window.orderBy(sf.desc('_start_dt_by_user'))).alias('_cum_num_users_to_dt'), sf.sum('_num_users_by_start_date').over(Window.orderBy(sf.lit(1))).alias('total')).filter((sf.col('_cum_num_users_to_dt') >= (sf.col('total') * threshold))).agg(sf.max('_start_dt_by_user')).head()[0]
train = interactions.filter((sf.col(self.timestamp_column) < test_start_date))
test = interactions.join(start_date_by_user.filter((sf.col('_start_dt_by_user') >= test_start_date)), how='inner', on=self.query_column).drop('_start_dt_by_user')
if self.session_id_column:
test = test.withColumn('is_test', sf.lit(True))
interactions = interactions.join(test, on=interactions.schema.names, how='left').na.fill({'is_test': False})
interactions = self._recalculate_with_session_id_column(interactions)
train = interactions.filter((~ sf.col('is_test'))).drop('is_test')
test = interactions.filter(sf.col('is_test')).drop('is_test')
return (train, test)
def _core_split(self, interactions: DataFrameLike) -> SplitterReturnType:
split_method = self._core_split_spark
if isinstance(interactions, PandasDataFrame):
split_method = self._core_split_pandas
return split_method(interactions, self.test_size) |
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
device = torch.device('cpu')
checkpoint = torch.load(fname, map_location=device)
self.args = checkpoint['args']
model = Wav2VecModel.build_model(self.args, None)
model.load_state_dict(checkpoint['model'])
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return (z, c) |
class StateFusion(transformation.MultiStateTransformation):
first_state = transformation.PatternNode(sdfg.SDFGState)
second_state = transformation.PatternNode(sdfg.SDFGState)
def annotates_memlets():
return False
def expressions(cls):
return [sdutil.node_path_graph(cls.first_state, cls.second_state)]
def find_fused_components(first_cc_input, first_cc_output, second_cc_input, second_cc_output) -> List[CCDesc]:
g = nx.DiGraph()
g.add_nodes_from(((0, i) for i in range(len(first_cc_output))))
g.add_nodes_from(((1, i) for i in range(len(second_cc_output))))
for (i, cc1) in enumerate(first_cc_output):
outnames1 = {n.data for n in cc1}
for (j, cc2) in enumerate(second_cc_input):
inpnames2 = {n.data for n in cc2}
if (len((outnames1 & inpnames2)) > 0):
g.add_edge((0, i), (1, j))
result = []
for cc in nx.weakly_connected_components(g):
(input1, output1, input2, output2) = (set(), set(), set(), set())
for (gind, cind) in cc:
if (gind == 0):
input1 |= first_cc_input[cind]
output1 |= first_cc_output[cind]
else:
input2 |= second_cc_input[cind]
output2 |= second_cc_output[cind]
result.append(CCDesc(input1, output1, input2, output2))
return result
def memlets_intersect(graph_a: SDFGState, group_a: List[nodes.AccessNode], inputs_a: bool, graph_b: SDFGState, group_b: List[nodes.AccessNode], inputs_b: bool) -> bool:
src_subset = (lambda e: (e.data.src_subset if (e.data.src_subset is not None) else e.data.dst_subset))
dst_subset = (lambda e: (e.data.dst_subset if (e.data.dst_subset is not None) else e.data.src_subset))
if inputs_a:
edges_a = [e for n in group_a for e in graph_a.out_edges(n)]
subset_a = src_subset
else:
edges_a = [e for n in group_a for e in graph_a.in_edges(n)]
subset_a = dst_subset
if inputs_b:
edges_b = [e for n in group_b for e in graph_b.out_edges(n)]
subset_b = src_subset
else:
edges_b = [e for n in group_b for e in graph_b.in_edges(n)]
subset_b = dst_subset
for ea in edges_a:
for eb in edges_b:
result = subsets.intersects(subset_a(ea), subset_b(eb))
if ((result is True) or (result is None)):
return True
return False
def has_path(self, first_state: SDFGState, second_state: SDFGState, match_nodes: Dict[(nodes.AccessNode, nodes.AccessNode)], node_a: nodes.Node, node_b: nodes.Node) -> bool:
for (match_a, match_b) in match_nodes.items():
if (nx.has_path(first_state._nx, node_a, match_a) and nx.has_path(second_state._nx, match_b, node_b)):
return True
return False
def _check_all_paths(self, first_state: SDFGState, second_state: SDFGState, match_nodes: Dict[(nodes.AccessNode, nodes.AccessNode)], nodes_first: List[nodes.AccessNode], nodes_second: List[nodes.AccessNode], first_read: bool, second_read: bool) -> bool:
for node_a in nodes_first:
succ_a = first_state.successors(node_a)
for node_b in nodes_second:
if all((self.has_path(first_state, second_state, match_nodes, sa, node_b) for sa in succ_a)):
return True
if StateFusion.memlets_intersect(first_state, nodes_first, first_read, second_state, nodes_second, second_read):
return False
return True
def _check_paths(self, first_state: SDFGState, second_state: SDFGState, match_nodes: Dict[(nodes.AccessNode, nodes.AccessNode)], nodes_first: List[nodes.AccessNode], nodes_second: List[nodes.AccessNode], second_input: Set[nodes.AccessNode], first_read: bool, second_read: bool) -> bool:
fail = False
path_found = False
for match in match_nodes:
for node in nodes_first:
path_to = nx.has_path(first_state._nx, node, match)
if (not path_to):
continue
path_found = True
node2 = next((n for n in second_input if (n.data == match.data)))
if (not all((nx.has_path(second_state._nx, node2, n) for n in nodes_second))):
fail = True
break
if (fail or path_found):
break
if (fail or (not path_found)):
if StateFusion.memlets_intersect(first_state, nodes_first, first_read, second_state, nodes_second, second_read):
return False
return True
def can_be_applied(self, graph, expr_index, sdfg, permissive=False):
first_state: SDFGState = self.first_state
second_state: SDFGState = self.second_state
out_edges = graph.out_edges(first_state)
in_edges = graph.in_edges(first_state)
if (len(out_edges) != 1):
return False
if ((len(in_edges) > 1) and (graph.in_degree(second_state) > 1)):
return False
if (not out_edges[0].data.is_unconditional()):
return False
if out_edges[0].data.assignments:
if (not in_edges):
return False
new_assignments = set(out_edges[0].data.assignments.keys())
if any(((new_assignments & set(e.data.assignments.keys())) for e in in_edges)):
return False
if (len((new_assignments & first_state.free_symbols)) > 0):
return False
freesyms = out_edges[0].data.free_symbols
if (freesyms and any(((n.data in freesyms) for n in first_state.nodes() if (isinstance(n, nodes.AccessNode) and (first_state.in_degree(n) > 0))))):
return False
symbols_used = set(out_edges[0].data.free_symbols)
for e in in_edges:
if (e.data.assignments.keys() & symbols_used):
return False
if (new_assignments & set(e.data.free_symbols)):
return False
for (src, _, _) in in_edges:
for (_, dst, _) in graph.out_edges(src):
if (dst == second_state):
return False
if (not permissive):
if Config.get_bool('frontend', 'dont_fuse_callbacks'):
for node in (first_state.data_nodes() + second_state.data_nodes()):
if (node.data == '__pystate'):
return False
try:
next((node for node in first_state.nodes() if ((isinstance(node, nodes.LibraryNode) and (type(node).__name__ == 'Waitall')) or (node.label == '_Waitall_'))))
return False
except StopIteration:
pass
try:
next((node for node in second_state.nodes() if ((isinstance(node, nodes.LibraryNode) and (type(node).__name__ == 'Waitall')) or (node.label == '_Waitall_'))))
return False
except StopIteration:
pass
first_in_edges = graph.in_edges(first_state)
second_in_edges = graph.in_edges(second_state)
if (((not second_state.is_empty()) or (not first_state.is_empty()) or (len(first_in_edges) == 0)) and (len(second_in_edges) != 1)):
return False
first_cc = [cc_nodes for cc_nodes in nx.weakly_connected_components(first_state._nx)]
second_cc = [cc_nodes for cc_nodes in nx.weakly_connected_components(second_state._nx)]
first_input = {node for node in first_state.source_nodes() if isinstance(node, nodes.AccessNode)}
first_output = {node for node in first_state.scope_children()[None] if (isinstance(node, nodes.AccessNode) and (node not in first_input))}
second_input = {node for node in second_state.source_nodes() if isinstance(node, nodes.AccessNode)}
second_output = {node for node in second_state.scope_children()[None] if (isinstance(node, nodes.AccessNode) and (node not in second_input))}
first_cc_input = [cc.intersection(first_input) for cc in first_cc]
first_cc_output = [cc.intersection(first_output) for cc in first_cc]
second_cc_input = [cc.intersection(second_input) for cc in second_cc]
second_cc_output = [cc.intersection(second_output) for cc in second_cc]
first_output_names = {node.data for node in first_output}
second_input_names = {node.data for node in second_input}
if (len(second_input) > len(second_input_names)):
return False
matches = (first_output_names & second_input_names)
for match in matches:
cc_appearances = 0
for cc in first_cc_output:
if (len([n for n in cc if (n.data == match)]) > 0):
cc_appearances += 1
if (cc_appearances > 1):
return False
resulting_ccs: List[CCDesc] = StateFusion.find_fused_components(first_cc_input, first_cc_output, second_cc_input, second_cc_output)
for fused_cc in resulting_ccs:
write_write_candidates = ((fused_cc.first_outputs & fused_cc.second_outputs) - fused_cc.second_inputs)
order = [x for x in reversed(list(nx.topological_sort(first_state._nx))) if (isinstance(x, nodes.AccessNode) and (x.data in fused_cc.first_outputs))]
match_nodes: Dict[(nodes.AccessNode, nodes.AccessNode)] = {next((n for n in order if (n.data == match))): next((n for n in fused_cc.second_input_nodes if (n.data == match))) for match in (fused_cc.first_outputs & fused_cc.second_inputs)}
for cand in write_write_candidates:
nodes_first = [n for n in first_output if (n.data == cand)]
nodes_second = [n for n in second_output if (n.data == cand)]
if (not self._check_paths(first_state, second_state, match_nodes, nodes_first, nodes_second, second_input, False, False)):
return False
first_inout = (fused_cc.first_inputs | fused_cc.first_outputs)
for other_cc in resulting_ccs:
if (other_cc is fused_cc):
for d in first_inout:
if (d in other_cc.second_outputs):
nodes_second = [n for n in second_output if (n.data == d)]
if (d in fused_cc.first_inputs):
nodes_first = [n for n in first_input if (n.data == d)]
else:
nodes_first = []
for n2 in nodes_second:
for e in second_state.in_edges(n2):
path = second_state.memlet_path(e)
src = path[0].src
if ((src in second_input) and (src.data in fused_cc.first_outputs)):
for n1 in fused_cc.first_output_nodes:
if (n1.data == src.data):
for n0 in nodes_first:
if (not nx.has_path(first_state._nx, n0, n1)):
return False
if (not self._check_all_paths(first_state, second_state, match_nodes, nodes_first, nodes_second, True, False)):
return False
continue
for d in first_inout:
if (d in other_cc.second_outputs):
nodes_second = [n for n in second_output if (n.data == d)]
if (d in fused_cc.first_inputs):
nodes_first = [n for n in first_input if (n.data == d)]
if StateFusion.memlets_intersect(first_state, nodes_first, True, second_state, nodes_second, False):
return False
if (d in fused_cc.first_outputs):
nodes_first = [n for n in first_output if (n.data == d)]
if StateFusion.memlets_intersect(first_state, nodes_first, False, second_state, nodes_second, False):
return False
second_inout = ((fused_cc.first_inputs | fused_cc.first_outputs) & fused_cc.second_outputs)
for inout in second_inout:
nodes_first = [n for n in match_nodes if (n.data == inout)]
if any(((first_state.out_degree(n) > 0) for n in nodes_first)):
return False
nodes_first = {n for n in (fused_cc.first_input_nodes | fused_cc.first_output_nodes) if (n.data == inout)}
nodes_second = {n for n in fused_cc.second_output_nodes if (n.data == inout)}
if (not self._check_paths(first_state, second_state, match_nodes, nodes_first, nodes_second, second_input, True, False)):
return False
if (len(fused_cc.first_output_nodes) > len(fused_cc.first_outputs)):
for inpnode in fused_cc.second_input_nodes:
found = None
for outnode in fused_cc.first_output_nodes:
if (outnode.data != inpnode.data):
continue
if StateFusion.memlets_intersect(first_state, [outnode], False, second_state, [inpnode], True):
if (found is not None):
if nx.has_path(first_state.nx, outnode, found):
continue
elif nx.has_path(first_state.nx, found, outnode):
found = outnode
else:
return False
found = outnode
if ((first_state.number_of_nodes() > 0) and (second_state.number_of_nodes() > 0) and (sdutil.is_fpga_kernel(sdfg, first_state) != sdutil.is_fpga_kernel(sdfg, second_state))):
return False
return True
def apply(self, _, sdfg):
first_state: SDFGState = self.first_state
second_state: SDFGState = self.second_state
edges = sdfg.edges_between(first_state, second_state)
for edge in edges:
if edge.data.assignments:
for (src, dst, other_data) in sdfg.in_edges(first_state):
other_data.assignments.update(edge.data.assignments)
sdfg.remove_edge(edge)
if first_state.is_empty():
sdutil.change_edge_dest(sdfg, first_state, second_state)
sdfg.remove_node(first_state)
if (sdfg.start_state == first_state):
sdfg.start_state = sdfg.node_id(second_state)
return
if second_state.is_empty():
sdutil.change_edge_src(sdfg, second_state, first_state)
sdutil.change_edge_dest(sdfg, second_state, first_state)
sdfg.remove_node(second_state)
if (sdfg.start_state == second_state):
sdfg.start_state = sdfg.node_id(first_state)
return
first_input = [node for node in first_state.source_nodes() if isinstance(node, nodes.AccessNode)]
first_output = [node for node in first_state.sink_nodes() if isinstance(node, nodes.AccessNode)]
second_input = [node for node in second_state.source_nodes() if isinstance(node, nodes.AccessNode)]
top2 = top_level_nodes(second_state)
first_input = [node for node in first_input if (next((x for x in first_output if (x.data == node.data)), None) is None)]
second_mid = [x for x in list(nx.topological_sort(second_state._nx)) if (isinstance(x, nodes.AccessNode) and (second_state.out_degree(x) > 0) and (not isinstance(sdfg.arrays[x.data], dt.View)))]
sdict = first_state.scope_dict()
order = [x for x in reversed(list(nx.topological_sort(first_state._nx))) if (isinstance(x, nodes.AccessNode) and (sdict[x] is None))]
for node in second_state.nodes():
if isinstance(node, nodes.NestedSDFG):
node.sdfg.parent = first_state
first_state.add_node(node)
for (src, src_conn, dst, dst_conn, data) in second_state.edges():
first_state.add_edge(src, src_conn, dst, dst_conn, data)
top = top_level_nodes(first_state)
merged_nodes = set()
for node in second_mid:
if (node not in top2):
continue
candidates = [x for x in order if ((x.data == node.data) and (x in top) and (x not in merged_nodes))]
source_node = (first_state.in_degree(node) == 0)
if (not source_node):
for cand in candidates:
if StateFusion.memlets_intersect(first_state, [cand], False, second_state, [node], True):
if nx.has_path(first_state._nx, cand, node):
continue
sdutil.change_edge_src(first_state, cand, node)
sdutil.change_edge_dest(first_state, cand, node)
first_state.remove_node(cand)
continue
if (len(candidates) == 0):
continue
elif (len(candidates) == 1):
n = candidates[0]
else:
for cand in candidates:
if StateFusion.memlets_intersect(first_state, [cand], False, second_state, [node], True):
n = cand
break
else:
n = candidates[0]
sdutil.change_edge_src(first_state, node, n)
sdutil.change_edge_dest(first_state, node, n)
first_state.remove_node(node)
merged_nodes.add(n)
sdutil.change_edge_src(sdfg, second_state, first_state)
sdfg.remove_node(second_state)
if (sdfg.start_state == second_state):
sdfg.start_state = sdfg.node_id(first_state) |
class PlanarPoincareParticle(object):
def __init__(self, m, M, l, gamma, G=1.0, sLambda=None, sGamma=None, Lambda=None, Gamma=None, a=None, e=None):
if (not single_true([sLambda, Lambda, a])):
raise AttributeError('Can only pass one of Lambda, sLambda (specific Lambda, i.e. per unit mass), or a (semimajor axis)')
if (not single_true([sGamma, Gamma, e])):
raise AttributeError('Can only pass one of Gamma, sGamma (specific Gamma, i.e. per unit mass), or e (eccentricity)')
if sLambda:
self.sLambda = sLambda
elif Lambda:
try:
self.sLambda = (Lambda / m)
except:
raise AttributeError('Need to pass specific actions (sLambda and sGamma) or a and e for test particles')
elif a:
self.sLambda = np.sqrt(((G * M) * a))
if Gamma:
try:
sGamma = (Gamma / m)
except:
raise AttributeError('Need to pass specific actions (sLambda and sGamma) or a and e for test particles')
elif e:
sGamma = (self.sLambda * (1.0 - np.sqrt((1.0 - (e ** 2)))))
self.sX = (np.sqrt((2.0 * sGamma)) * np.cos(gamma))
self.sY = (np.sqrt((2.0 * sGamma)) * np.sin(gamma))
self.m = m
self.M = M
self.G = G
self.l = l
def X(self):
return (np.sqrt(self.m) * self.sX)
def X(self, value):
self.sX = (X / np.sqrt(self.m))
def Y(self):
return (np.sqrt(self.m) * self.sY)
def Y(self, value):
self.sY = (Y / np.sqrt(self.m))
def Lambda(self):
return (self.m * self.sLambda)
def Lambda(self, value):
self.sLambda = (value / self.m)
def Gamma(self):
return ((self.m * ((self.sX ** 2) + (self.sY ** 2))) / 2.0)
def Gamma(self, value):
self.sGamma = (value / self.m)
def sGamma(self):
return (((self.sX ** 2) + (self.sY ** 2)) / 2.0)
def gamma(self):
return np.arctan2(self.sY, self.sX)
def a(self):
return (((self.sLambda ** 2) / self.G) / self.M)
def e(self):
GbyL = (self.sGamma / self.sLambda)
if ((1 - ((1.0 - GbyL) * (1.0 - GbyL))) < 0):
raise AttributeError('sGamma:{0}, sLambda:{1}, GbyL:{2}, val:{3}'.format(self.sGamma, self.sLambda, GbyL, (1 - ((1.0 - GbyL) * (1.0 - GbyL)))))
return np.sqrt((1 - ((1 - GbyL) * (1 - GbyL))))
def pomega(self):
return (- self.gamma)
def n(self):
return np.sqrt(((self.G * self.M) / (self.a ** 3))) |
def test_archive_reuse_case_factory_get_chromosome_mutation_count():
test_case_chromosome_factory = MagicMock(tccf.TestCaseChromosomeFactory)
archive = MagicMock()
chromosome_from_archive = MagicMock()
clone_chromosome_from_archive = MagicMock()
chromosome_from_archive.clone.return_value = clone_chromosome_from_archive
archive.solutions = [chromosome_from_archive]
factory = tccf.ArchiveReuseTestCaseChromosomeFactory(test_case_chromosome_factory, archive)
config.configuration.seeding.seed_from_archive_probability = 1.0
config.configuration.seeding.seed_from_archive_mutations = 42
sampled = factory.get_chromosome()
assert (sampled == clone_chromosome_from_archive)
assert (clone_chromosome_from_archive.mutate.call_count == 42) |
class Vocabulary():
default_implementation = 'default'
def __init__(self, counter: Dict[(str, Dict[(str, int)])]=None, min_count: Dict[(str, int)]=None, max_vocab_size: Union[(int, Dict[(str, int)])]=None, non_padded_namespaces: Iterable[str]=DEFAULT_NON_PADDED_NAMESPACES, pretrained_files: Optional[Dict[(str, str)]]=None, only_include_pretrained_words: bool=False, tokens_to_add: Dict[(str, List[str])]=None, min_pretrained_embeddings: Dict[(str, int)]=None) -> None:
self._padding_token = DEFAULT_PADDING_TOKEN
self._oov_token = DEFAULT_OOV_TOKEN
self._non_padded_namespaces = set(non_padded_namespaces)
self._token_to_index = _TokenToIndexDefaultDict(self._non_padded_namespaces, self._padding_token, self._oov_token)
self._index_to_token = _IndexToTokenDefaultDict(self._non_padded_namespaces, self._padding_token, self._oov_token)
self._retained_counter: Optional[Dict[(str, Dict[(str, int)])]] = None
self._extend(counter, min_count, max_vocab_size, non_padded_namespaces, pretrained_files, only_include_pretrained_words, tokens_to_add, min_pretrained_embeddings)
def save_to_files(self, directory: str) -> None:
os.makedirs(directory, exist_ok=True)
if os.listdir(directory):
logging.warning('vocabulary serialization directory %s is not empty', directory)
with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'w', 'utf-8') as namespace_file:
for namespace_str in self._non_padded_namespaces:
print(namespace_str, file=namespace_file)
for (namespace, mapping) in self._index_to_token.items():
with codecs.open(os.path.join(directory, (namespace + '.txt')), 'w', 'utf-8') as token_file:
num_tokens = len(mapping)
start_index = (1 if (mapping[0] == self._padding_token) else 0)
for i in range(start_index, num_tokens):
print(mapping[i].replace('\n', ''), file=token_file)
def from_files(cls, directory: str) -> 'Vocabulary':
logger.info('Loading token dictionary from %s.', directory)
with codecs.open(os.path.join(directory, NAMESPACE_PADDING_FILE), 'r', 'utf-8') as namespace_file:
non_padded_namespaces = [namespace_str.strip() for namespace_str in namespace_file]
vocab = cls(non_padded_namespaces=non_padded_namespaces)
for namespace_filename in os.listdir(directory):
if (namespace_filename == NAMESPACE_PADDING_FILE):
continue
namespace = namespace_filename.replace('.txt', '')
if any((namespace_match(pattern, namespace) for pattern in non_padded_namespaces)):
is_padded = False
else:
is_padded = True
filename = os.path.join(directory, namespace_filename)
vocab.set_from_file(filename, is_padded, namespace=namespace)
return vocab
def set_from_file(self, filename: str, is_padded: bool=True, oov_token: str=DEFAULT_OOV_TOKEN, namespace: str='tokens'):
if is_padded:
self._token_to_index[namespace] = {self._padding_token: 0}
self._index_to_token[namespace] = {0: self._padding_token}
else:
self._token_to_index[namespace] = {}
self._index_to_token[namespace] = {}
with codecs.open(filename, 'r', 'utf-8') as input_file:
lines = input_file.read().split('\n')
if (lines and (lines[(- 1)] == '')):
lines = lines[:(- 1)]
for (i, line) in enumerate(lines):
index = ((i + 1) if is_padded else i)
token = line.replace('', '\n')
if (token == oov_token):
token = self._oov_token
self._token_to_index[namespace][token] = index
self._index_to_token[namespace][index] = token
if is_padded:
assert (self._oov_token in self._token_to_index[namespace]), 'OOV token not found!'
def from_instances(cls, instances: Iterable['adi.Instance'], min_count: Dict[(str, int)]=None, max_vocab_size: Union[(int, Dict[(str, int)])]=None, non_padded_namespaces: Iterable[str]=DEFAULT_NON_PADDED_NAMESPACES, pretrained_files: Optional[Dict[(str, str)]]=None, only_include_pretrained_words: bool=False, tokens_to_add: Dict[(str, List[str])]=None, min_pretrained_embeddings: Dict[(str, int)]=None) -> 'Vocabulary':
logger.info('Fitting token dictionary from dataset.')
namespace_token_counts: Dict[(str, Dict[(str, int)])] = defaultdict((lambda : defaultdict(int)))
for instance in Tqdm.tqdm(instances):
instance.count_vocab_items(namespace_token_counts)
return cls(counter=namespace_token_counts, min_count=min_count, max_vocab_size=max_vocab_size, non_padded_namespaces=non_padded_namespaces, pretrained_files=pretrained_files, only_include_pretrained_words=only_include_pretrained_words, tokens_to_add=tokens_to_add, min_pretrained_embeddings=min_pretrained_embeddings)
def from_params(cls, params: Params, instances: Iterable['adi.Instance']=None):
vocab_type = params.pop('type', None)
if (vocab_type is not None):
return cls.by_name(vocab_type).from_params(params=params, instances=instances)
extend = params.pop('extend', False)
vocabulary_directory = params.pop('directory_path', None)
if ((not vocabulary_directory) and (not instances)):
raise ConfigurationError('You must provide either a Params object containing a vocab_directory key or a Dataset to build a vocabulary from.')
if (extend and (not instances)):
raise ConfigurationError("'extend' is true but there are not instances passed to extend.")
if (extend and (not vocabulary_directory)):
raise ConfigurationError("'extend' is true but there is not 'directory_path' to extend from.")
if (vocabulary_directory and instances):
if extend:
logger.info('Loading Vocab from files and extending it with dataset.')
else:
logger.info('Loading Vocab from files instead of dataset.')
if vocabulary_directory:
vocab = Vocabulary.from_files(vocabulary_directory)
if (not extend):
params.assert_empty('Vocabulary - from files')
return vocab
if extend:
vocab.extend_from_instances(params, instances=instances)
return vocab
min_count = params.pop('min_count', None)
max_vocab_size = pop_max_vocab_size(params)
non_padded_namespaces = params.pop('non_padded_namespaces', DEFAULT_NON_PADDED_NAMESPACES)
pretrained_files = params.pop('pretrained_files', {})
min_pretrained_embeddings = params.pop('min_pretrained_embeddings', None)
only_include_pretrained_words = params.pop_bool('only_include_pretrained_words', False)
tokens_to_add = params.pop('tokens_to_add', None)
params.assert_empty('Vocabulary - from dataset')
return Vocabulary.from_instances(instances=instances, min_count=min_count, max_vocab_size=max_vocab_size, non_padded_namespaces=non_padded_namespaces, pretrained_files=pretrained_files, only_include_pretrained_words=only_include_pretrained_words, tokens_to_add=tokens_to_add, min_pretrained_embeddings=min_pretrained_embeddings)
def _extend(self, counter: Dict[(str, Dict[(str, int)])]=None, min_count: Dict[(str, int)]=None, max_vocab_size: Union[(int, Dict[(str, int)])]=None, non_padded_namespaces: Iterable[str]=DEFAULT_NON_PADDED_NAMESPACES, pretrained_files: Optional[Dict[(str, str)]]=None, only_include_pretrained_words: bool=False, tokens_to_add: Dict[(str, List[str])]=None, min_pretrained_embeddings: Dict[(str, int)]=None) -> None:
if (not isinstance(max_vocab_size, dict)):
int_max_vocab_size = max_vocab_size
max_vocab_size = defaultdict((lambda : int_max_vocab_size))
min_count = (min_count or {})
pretrained_files = (pretrained_files or {})
min_pretrained_embeddings = (min_pretrained_embeddings or {})
non_padded_namespaces = set(non_padded_namespaces)
counter = (counter or {})
tokens_to_add = (tokens_to_add or {})
self._retained_counter = counter
current_namespaces = {*self._token_to_index}
extension_namespaces = {*counter, *tokens_to_add}
for namespace in (current_namespaces & extension_namespaces):
original_padded = (not any((namespace_match(pattern, namespace) for pattern in self._non_padded_namespaces)))
extension_padded = (not any((namespace_match(pattern, namespace) for pattern in non_padded_namespaces)))
if (original_padded != extension_padded):
raise ConfigurationError((('Common namespace {} has conflicting '.format(namespace) + 'setting of padded = True/False. ') + 'Hence extension cannot be done.'))
self._token_to_index.add_non_padded_namespaces(non_padded_namespaces)
self._index_to_token.add_non_padded_namespaces(non_padded_namespaces)
self._non_padded_namespaces.update(non_padded_namespaces)
for namespace in counter:
if (namespace in pretrained_files):
pretrained_list = _read_pretrained_tokens(pretrained_files[namespace])
min_embeddings = min_pretrained_embeddings.get(namespace, 0)
if (min_embeddings > 0):
tokens_old = tokens_to_add.get(namespace, [])
tokens_new = pretrained_list[:min_embeddings]
tokens_to_add[namespace] = (tokens_old + tokens_new)
pretrained_set = set(pretrained_list)
else:
pretrained_set = None
token_counts = list(counter[namespace].items())
token_counts.sort(key=(lambda x: x[1]), reverse=True)
try:
max_vocab = max_vocab_size[namespace]
except KeyError:
max_vocab = None
if max_vocab:
token_counts = token_counts[:max_vocab]
for (token, count) in token_counts:
if (pretrained_set is not None):
if only_include_pretrained_words:
if ((token in pretrained_set) and (count >= min_count.get(namespace, 1))):
self.add_token_to_namespace(token, namespace)
elif ((token in pretrained_set) or (count >= min_count.get(namespace, 1))):
self.add_token_to_namespace(token, namespace)
elif (count >= min_count.get(namespace, 1)):
self.add_token_to_namespace(token, namespace)
for (namespace, tokens) in tokens_to_add.items():
for token in tokens:
self.add_token_to_namespace(token, namespace)
def extend_from_instances(self, params: Params, instances: Iterable['adi.Instance']=()) -> None:
min_count = params.pop('min_count', None)
max_vocab_size = pop_max_vocab_size(params)
non_padded_namespaces = params.pop('non_padded_namespaces', DEFAULT_NON_PADDED_NAMESPACES)
pretrained_files = params.pop('pretrained_files', {})
min_pretrained_embeddings = params.pop('min_pretrained_embeddings', None)
only_include_pretrained_words = params.pop_bool('only_include_pretrained_words', False)
tokens_to_add = params.pop('tokens_to_add', None)
params.assert_empty('Vocabulary - from dataset')
logger.info('Fitting token dictionary from dataset.')
namespace_token_counts: Dict[(str, Dict[(str, int)])] = defaultdict((lambda : defaultdict(int)))
for instance in Tqdm.tqdm(instances):
instance.count_vocab_items(namespace_token_counts)
self._extend(counter=namespace_token_counts, min_count=min_count, max_vocab_size=max_vocab_size, non_padded_namespaces=non_padded_namespaces, pretrained_files=pretrained_files, only_include_pretrained_words=only_include_pretrained_words, tokens_to_add=tokens_to_add, min_pretrained_embeddings=min_pretrained_embeddings)
def is_padded(self, namespace: str) -> bool:
return (self._index_to_token[namespace][0] == self._padding_token)
def add_token_to_namespace(self, token: str, namespace: str='tokens') -> int:
if (not isinstance(token, str)):
raise ValueError(('Vocabulary tokens must be strings, or saving and loading will break. Got %s (with type %s)' % (repr(token), type(token))))
if (token not in self._token_to_index[namespace]):
index = len(self._token_to_index[namespace])
self._token_to_index[namespace][token] = index
self._index_to_token[namespace][index] = token
return index
else:
return self._token_to_index[namespace][token]
def get_index_to_token_vocabulary(self, namespace: str='tokens') -> Dict[(int, str)]:
return self._index_to_token[namespace]
def get_token_to_index_vocabulary(self, namespace: str='tokens') -> Dict[(str, int)]:
return self._token_to_index[namespace]
def get_token_index(self, token: str, namespace: str='tokens') -> int:
if (token in self._token_to_index[namespace]):
return self._token_to_index[namespace][token]
else:
try:
return self._token_to_index[namespace][self._oov_token]
except KeyError:
logger.error('Namespace: %s', namespace)
logger.error('Token: %s', token)
raise
def get_token_from_index(self, index: int, namespace: str='tokens') -> str:
return self._index_to_token[namespace][index]
def get_tokens_from_list(self, t, namespace):
return [self.get_token_from_index(i, namespace) for i in t]
def get_vocab_size(self, namespace: str='tokens') -> int:
return len(self._token_to_index[namespace])
def __eq__(self, other):
if isinstance(self, other.__class__):
return (self.__dict__ == other.__dict__)
return False
def __str__(self) -> str:
base_string = f'''Vocabulary with namespaces:
'''
non_padded_namespaces = f''' Non Padded Namespaces: {self._non_padded_namespaces}
'''
namespaces = [f''' Namespace: {name}, Size: {self.get_vocab_size(name)}
''' for name in self._index_to_token]
return ' '.join(([base_string, non_padded_namespaces] + namespaces))
def print_statistics(self) -> None:
if self._retained_counter:
logger.info("Printed vocabulary statistics are only for the part of the vocabulary generated from instances. If vocabulary is constructed by extending saved vocabulary with dataset instances, the directly loaded portion won't be considered here.")
print('\n\n----Vocabulary Statistics----\n')
for namespace in self._retained_counter:
tokens_with_counts = list(self._retained_counter[namespace].items())
tokens_with_counts.sort(key=(lambda x: x[1]), reverse=True)
print(f'''
Top 10 most frequent tokens in namespace '{namespace}':''')
for (token, freq) in tokens_with_counts[:10]:
print(f' Token: {token} Frequency: {freq}')
tokens_with_counts.sort(key=(lambda x: len(x[0])), reverse=True)
print(f'''
Top 10 longest tokens in namespace '{namespace}':''')
for (token, freq) in tokens_with_counts[:10]:
print(f' Token: {token} length: {len(token)} Frequency: {freq}')
print(f'''
Top 10 shortest tokens in namespace '{namespace}':''')
for (token, freq) in reversed(tokens_with_counts[(- 10):]):
print(f' Token: {token} length: {len(token)} Frequency: {freq}')
else:
logger.info('Vocabulary statistics cannot be printed since dataset instances were not used for its construction.') |
def compute_histogram_entropy(histograms: torch.Tensor) -> torch.Tensor:
assert (histograms.ndim == 2), f'Wrong shape: {histograms.shape}'
probs = (histograms / histograms.sum(dim=1, keepdim=True))
return ((- 1.0) * (torch.log((probs + 1e-12)) * probs).sum(dim=1)) |
class WaitPrint(threading.Thread):
def __init__(self, t, message):
super().__init__()
self.t = t
self.message = message
self.running = True
def stop(self):
self.running = False
def run(self):
for _ in range(int((self.t // 0.1))):
time.sleep(0.1)
if (not self.running):
return
print(self.message, end='') |
def box_iou_rotated(bboxes1, bboxes2, mode='iou', aligned=False):
assert (mode in ['iou', 'iof'])
mode_dict = {'iou': 0, 'iof': 1}
mode_flag = mode_dict[mode]
rows = bboxes1.size(0)
cols = bboxes2.size(0)
if aligned:
ious = bboxes1.new_zeros(rows)
else:
ious = bboxes1.new_zeros((rows * cols))
bboxes1 = bboxes1.contiguous()
bboxes2 = bboxes2.contiguous()
ext_module.box_iou_rotated(bboxes1, bboxes2, ious, mode_flag=mode_flag, aligned=aligned)
if (not aligned):
ious = ious.view(rows, cols)
return ious |
def static_loaders(paths, batch_size: int, seed: int=None, areas: list=None, layers: list=None, tier: str=None, neuron_ids: list=None, neuron_n: int=None, exclude_neuron_n=0, neuron_base_seed=None, image_ids=None, image_n=None, image_base_seed=None, cuda: bool=True, normalize: bool=True, include_behavior: bool=False, add_behavior_as_channels: bool=True, exclude: str=None, select_input_channel: int=None, file_tree: bool=True, image_condition=None, inputs_mean=None, inputs_std=None, scale: float=None, include_eye_position: bool=None, add_eye_pos_as_channels: bool=None, include_trial_info_keys: list=None, overwrite_data_path: bool=True, include_px_position=None, image_reshape_list=None, trial_idx_selection=None):
if (seed is not None):
set_random_seed(seed)
dls = OrderedDict({})
keys = ([tier] if tier else ['train', 'validation', 'test', 'final_test'])
for key in keys:
dls[key] = OrderedDict({})
neuron_ids = ([neuron_ids] if (neuron_ids is None) else neuron_ids)
image_ids = ([image_ids] if (image_ids is None) else image_ids)
trial_idx_selection = ([trial_idx_selection] if (trial_idx_selection is None) else trial_idx_selection)
basepath = '/data/mouse/toliaslab/static/'
for (path, neuron_id, image_id, trial_idx_selection) in zip_longest(paths, neuron_ids, image_ids, trial_idx_selection, fillvalue=None):
if (overwrite_data_path and os.path.exists(basepath)):
path = os.path.join(basepath, path)
out = static_loader(path, batch_size, areas=areas, layers=layers, cuda=cuda, tier=tier, get_key=True, neuron_ids=neuron_id, neuron_n=neuron_n, exclude_neuron_n=exclude_neuron_n, neuron_base_seed=neuron_base_seed, image_ids=image_id, image_n=image_n, image_base_seed=image_base_seed, normalize=normalize, include_behavior=include_behavior, add_behavior_as_channels=add_behavior_as_channels, exclude=exclude, select_input_channel=select_input_channel, file_tree=file_tree, image_condition=image_condition, inputs_mean=inputs_mean, inputs_std=inputs_std, scale=scale, include_eye_position=include_eye_position, add_eye_pos_as_channels=add_eye_pos_as_channels, include_trial_info_keys=include_trial_info_keys, include_px_position=include_px_position, image_reshape_list=image_reshape_list, trial_idx_selection=trial_idx_selection)
for k in dls:
dls[k][out[0]] = out[1][k]
return dls |
class TestGLPKExactBackend(GenericBackendTests):
def backend(self) -> GenericBackend:
return MixedIntegerLinearProgram(solver='GLPK/exact').get_backend() |
def read_point_ply(filename):
pd = PlyData.read(filename)['vertex']
v = np.array(np.stack([pd[i] for i in ['x', 'y', 'z']], axis=(- 1)))
try:
n = np.array(np.stack([pd[i] for i in ['nx', 'ny', 'nz']], axis=(- 1)))
except:
print(f'warning: cannot find normals in file {filename}')
n = np.ones_like(v)
n = (n / (np.linalg.norm(n, axis=1).reshape([(- 1), 1]) + 1e-06))
return (v, n) |
class BertTokenizationTest(CommonTestCases.CommonTokenizerTester):
tokenizer_class = BertTokenizer
def setUp(self):
super(BertTokenizationTest, self).setUp()
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([(x + '\n') for x in vocab_tokens]))
def get_tokenizer(self, **kwargs):
return BertTokenizer.from_pretrained(self.tmpdirname, **kwargs)
def get_input_output_texts(self):
input_text = u'UNwanted,running'
output_text = u'unwanted, running'
return (input_text, output_text)
def test_full_tokenizer(self):
tokenizer = self.tokenizer_class(self.vocab_file)
tokens = tokenizer.tokenize(u'UNwanted,running')
self.assertListEqual(tokens, ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
def test_chinese(self):
tokenizer = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize(u'ahzz'), [u'ah', u'', u'', u'zz'])
def test_basic_tokenizer_lower(self):
tokenizer = BasicTokenizer(do_lower_case=True)
self.assertListEqual(tokenizer.tokenize(u' \tHeLLo!how \n Are yoU? '), ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize(u'Hello'), ['hello'])
def test_basic_tokenizer_no_lower(self):
tokenizer = BasicTokenizer(do_lower_case=False)
self.assertListEqual(tokenizer.tokenize(u' \tHeLLo!how \n Are yoU? '), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def test_wordpiece_tokenizer(self):
vocab_tokens = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
vocab = {}
for (i, token) in enumerate(vocab_tokens):
vocab[token] = i
tokenizer = WordpieceTokenizer(vocab=vocab, unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize(''), [])
self.assertListEqual(tokenizer.tokenize('unwanted running'), ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running'), ['[UNK]', 'runn', '##ing'])
def test_is_whitespace(self):
self.assertTrue(_is_whitespace(u' '))
self.assertTrue(_is_whitespace(u'\t'))
self.assertTrue(_is_whitespace(u'\r'))
self.assertTrue(_is_whitespace(u'\n'))
self.assertTrue(_is_whitespace(u'\xa0'))
self.assertFalse(_is_whitespace(u'A'))
self.assertFalse(_is_whitespace(u'-'))
def test_is_control(self):
self.assertTrue(_is_control(u'\x05'))
self.assertFalse(_is_control(u'A'))
self.assertFalse(_is_control(u' '))
self.assertFalse(_is_control(u'\t'))
self.assertFalse(_is_control(u'\r'))
def test_is_punctuation(self):
self.assertTrue(_is_punctuation(u'-'))
self.assertTrue(_is_punctuation(u'$'))
self.assertTrue(_is_punctuation(u'`'))
self.assertTrue(_is_punctuation(u'.'))
self.assertFalse(_is_punctuation(u'A'))
self.assertFalse(_is_punctuation(u' '))
.slow
def test_sequence_builders(self):
tokenizer = self.tokenizer_class.from_pretrained('bert-base-uncased')
text = tokenizer.encode('sequence builders', add_special_tokens=False)
text_2 = tokenizer.encode('multi-sequence build', add_special_tokens=False)
encoded_sentence = tokenizer.build_inputs_with_special_tokens(text)
encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2)
assert (encoded_sentence == (([101] + text) + [102]))
assert (encoded_pair == (((([101] + text) + [102]) + text_2) + [102])) |
def cal_true_positive_char(pred, gt):
all_opt = SequenceMatcher(None, pred, gt)
true_positive_char_num = 0
for (opt, _, _, s2, e2) in all_opt.get_opcodes():
if (opt == 'equal'):
true_positive_char_num += (e2 - s2)
else:
pass
return true_positive_char_num |
def normal_init(module, mean=0, std=1, bias=0):
nn.init.normal_(module.weight, mean, std)
if hasattr(module, 'bias'):
nn.init.constant_(module.bias, bias) |
def test_obj_func_returns_scalar():
match = 'The user-provided objective function must return a scalar value.'
with assert_raises(ValueError, match=match):
optimize.minimize((lambda x: x), np.array([1, 1])) |
def _kmeans_single_elkan(X, sample_weight, centers_init, max_iter=300, verbose=False, tol=0.0001, n_threads=1):
n_samples = X.shape[0]
n_clusters = centers_init.shape[0]
centers = centers_init
centers_new = np.zeros_like(centers)
weight_in_clusters = np.zeros(n_clusters, dtype=X.dtype)
labels = np.full(n_samples, (- 1), dtype=np.int32)
labels_old = labels.copy()
center_half_distances = (euclidean_distances(centers) / 2)
distance_next_center = np.partition(np.asarray(center_half_distances), kth=1, axis=0)[1]
upper_bounds = np.zeros(n_samples, dtype=X.dtype)
lower_bounds = np.zeros((n_samples, n_clusters), dtype=X.dtype)
center_shift = np.zeros(n_clusters, dtype=X.dtype)
if sp.issparse(X):
init_bounds = init_bounds_sparse
elkan_iter = elkan_iter_chunked_sparse
_inertia = _inertia_sparse
else:
init_bounds = init_bounds_dense
elkan_iter = elkan_iter_chunked_dense
_inertia = _inertia_dense
init_bounds(X, centers, center_half_distances, labels, upper_bounds, lower_bounds, n_threads=n_threads)
strict_convergence = False
for i in range(max_iter):
elkan_iter(X, sample_weight, centers, centers_new, weight_in_clusters, center_half_distances, distance_next_center, upper_bounds, lower_bounds, labels, center_shift, n_threads)
center_half_distances = (euclidean_distances(centers_new) / 2)
distance_next_center = np.partition(np.asarray(center_half_distances), kth=1, axis=0)[1]
if verbose:
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
print(f'Iteration {i}, inertia {inertia}')
(centers, centers_new) = (centers_new, centers)
if np.array_equal(labels, labels_old):
if verbose:
print(f'Converged at iteration {i}: strict convergence.')
strict_convergence = True
break
else:
center_shift_tot = (center_shift ** 2).sum()
if (center_shift_tot <= tol):
if verbose:
print(f'Converged at iteration {i}: center shift {center_shift_tot} within tolerance {tol}.')
break
labels_old[:] = labels
if (not strict_convergence):
elkan_iter(X, sample_weight, centers, centers, weight_in_clusters, center_half_distances, distance_next_center, upper_bounds, lower_bounds, labels, center_shift, n_threads, update_centers=False)
inertia = _inertia(X, sample_weight, centers, labels, n_threads)
return (labels, inertia, centers, (i + 1)) |
def setup_args_gpu(args):
if ((args.local_rank == (- 1)) or args.no_cuda):
device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu'))
args.n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device('cuda', args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
ws = os.environ.get('WORLD_SIZE')
args.distributed_world_size = (int(ws) if ws else 1)
logger.info('Initialized host %s as d.rank %d on device=%s, n_gpu=%d, world size=%d', socket.gethostname(), args.local_rank, device, args.n_gpu, args.distributed_world_size)
logger.info('16-bits training: %s ', args.fp16) |
def find_images_and_targets(folder, types=IMG_EXTENSIONS, class_to_idx=None, leaf_name_only=True, sort=True):
labels = []
filenames = []
for (root, subdirs, files) in os.walk(folder, topdown=False):
rel_path = (os.path.relpath(root, folder) if (root != folder) else '')
label = (os.path.basename(rel_path) if leaf_name_only else rel_path.replace(os.path.sep, '_'))
for f in files:
(base, ext) = os.path.splitext(f)
if (ext.lower() in types):
filenames.append(os.path.join(root, f))
labels.append(label)
if (class_to_idx is None):
unique_labels = set(labels)
sorted_labels = list(sorted(unique_labels, key=natural_key))
class_to_idx = {c: idx for (idx, c) in enumerate(sorted_labels)}
images_and_targets = zip(filenames, [class_to_idx[l] for l in labels])
if sort:
images_and_targets = sorted(images_and_targets, key=(lambda k: natural_key(k[0])))
return (images_and_targets, class_to_idx) |
class ContinuousMLPQFunction(QFunction):
def __init__(self, env_spec, name='ContinuousMLPQFunction', hidden_sizes=(32, 32), action_merge_layer=(- 2), hidden_nonlinearity=tf.nn.relu, hidden_w_init=tf.glorot_uniform_initializer(), hidden_b_init=tf.zeros_initializer(), output_nonlinearity=None, output_w_init=tf.glorot_uniform_initializer(), output_b_init=tf.zeros_initializer(), input_include_goal=False, layer_normalization=False):
super().__init__(name)
self._env_spec = env_spec
self._hidden_sizes = hidden_sizes
self._action_merge_layer = action_merge_layer
self._hidden_nonlinearity = hidden_nonlinearity
self._hidden_w_init = hidden_w_init
self._hidden_b_init = hidden_b_init
self._output_nonlinearity = output_nonlinearity
self._output_w_init = output_w_init
self._output_b_init = output_b_init
self._input_include_goal = input_include_goal
self._layer_normalization = layer_normalization
if self._input_include_goal:
self._obs_dim = env_spec.observation_space.flat_dim_with_keys(['observation', 'desired_goal'])
else:
self._obs_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self.model = MLPMergeModel(output_dim=1, hidden_sizes=hidden_sizes, concat_layer=self._action_merge_layer, hidden_nonlinearity=hidden_nonlinearity, hidden_w_init=hidden_w_init, hidden_b_init=hidden_b_init, output_nonlinearity=output_nonlinearity, output_w_init=output_w_init, output_b_init=output_b_init, layer_normalization=layer_normalization)
self._initialize()
def _initialize(self):
obs_ph = tf.compat.v1.placeholder(tf.float32, (None, self._obs_dim), name='obs')
action_ph = tf.compat.v1.placeholder(tf.float32, (None, self._action_dim), name='act')
with tf.compat.v1.variable_scope(self.name) as vs:
self._variable_scope = vs
self.model.build(obs_ph, action_ph)
self._f_qval = tf.compat.v1.get_default_session().make_callable(self.model.networks['default'].outputs, feed_list=[obs_ph, action_ph])
def get_qval(self, observation, action):
return self._f_qval(observation, action)
def inputs(self):
return self.model.networks['default'].inputs
def get_qval_sym(self, state_input, action_input, name):
with tf.compat.v1.variable_scope(self._variable_scope):
return self.model.build(state_input, action_input, name=name)
def clone(self, name):
return self.__class__(name=name, env_spec=self._env_spec, hidden_sizes=self._hidden_sizes, action_merge_layer=self._action_merge_layer, hidden_nonlinearity=self._hidden_nonlinearity, hidden_w_init=self._hidden_w_init, hidden_b_init=self._hidden_b_init, output_nonlinearity=self._output_nonlinearity, output_w_init=self._output_w_init, output_b_init=self._output_b_init, layer_normalization=self._layer_normalization, input_include_goal=self._input_include_goal)
def __getstate__(self):
new_dict = self.__dict__.copy()
del new_dict['_f_qval']
return new_dict
def __setstate__(self, state):
self.__dict__.update(state)
self._initialize() |
def get_list_of_files(path_or_repo: Union[(str, os.PathLike)], revision: Optional[str]=None, use_auth_token: Optional[Union[(bool, str)]]=None, local_files_only: bool=False) -> List[str]:
path_or_repo = str(path_or_repo)
if os.path.isdir(path_or_repo):
list_of_files = []
for (path, dir_names, file_names) in os.walk(path_or_repo):
list_of_files.extend([os.path.join(path, f) for f in file_names])
return list_of_files
if (is_offline_mode() or local_files_only):
return []
if isinstance(use_auth_token, str):
token = use_auth_token
elif (use_auth_token is True):
token = HfFolder.get_token()
else:
token = None
try:
return list_repo_files(path_or_repo, revision=revision, token=token)
except HTTPError as e:
raise ValueError(f'{path_or_repo} is not a local path or a model identifier on the model Hub. Did you make a typo?') from e |
def format_message(message, status_message):
timestamp = datetime.now().strftime(u'%x %X')
left_delim = (u'<' if status_message else u'')
right_delim = (u'>' if status_message else u'')
return u'[{}] {}{}{}'.format(timestamp, left_delim, message, right_delim) |
class RuleSuperRSK(RuleRSK):
def to_pairs(self, obj1=None, obj2=None, check=True):
from sage.combinat.shifted_primed_tableau import PrimedEntry
itr = None
if (obj2 is None):
try:
itr = obj1._rsk_iter()
except AttributeError:
(obj2, obj1) = (obj1, [])
a = (ZZ.one() / ZZ(2))
for i in range(len(obj2)):
obj1.append(a)
a = (a + (ZZ.one() / ZZ(2)))
elif check:
if (len(obj1) != len(obj2)):
raise ValueError('the two arrays must be the same length')
mixed_parity = []
for (t, b) in zip(obj1, obj2):
if (PrimedEntry(t).is_primed() != PrimedEntry(b).is_primed()):
if ((t, b) in mixed_parity):
raise ValueError('invalid restricted superbiword')
else:
mixed_parity.append((t, b))
if itr:
(obj1, obj2) = ([], [])
for (i, j) in itr:
obj1.append(i)
obj2.append(j)
for i in range(len(obj1)):
obj1[i] = PrimedEntry(obj1[i])
obj2[i] = PrimedEntry(obj2[i])
return zip(obj1, obj2)
def _get_col(self, t, col_index):
num_rows_long_enough = 0
for row in t:
if (len(row) > col_index):
num_rows_long_enough += 1
else:
break
col = [t[row_index][col_index] for row_index in range(num_rows_long_enough)]
return col
def _set_col(self, t, col_index, col):
for (row_index, val) in enumerate(col):
if (row_index == len(t)):
t.append([])
if (col_index == len(t[row_index])):
t[row_index].append(None)
t[row_index][col_index] = val
def forward_rule(self, obj1, obj2, check_standard=False, check=True):
itr = self.to_pairs(obj1, obj2, check=check)
p = []
q = []
for (i, j) in itr:
row_index = (- 1)
col_index = (- 1)
epsilon = (1 if i.is_primed() else 0)
while True:
if (i.is_primed() == j.is_primed()):
row_index += 1
if (row_index == len(p)):
p.append([j])
q.append([i])
break
else:
(j1, col_index) = self.insertion(j, p[row_index], epsilon=epsilon)
if (j1 is None):
p[row_index].append(j)
q[row_index].append(i)
break
else:
j = j1
else:
col_index += 1
if ((not p) or (col_index == len(p[0]))):
self._set_col(p, col_index, [j])
self._set_col(q, col_index, [i])
break
else:
c = self._get_col(p, col_index)
(j1, row_index) = self.insertion(j, c, epsilon=epsilon)
if (j1 is None):
c.append(j)
self._set_col(p, col_index, c)
if (col_index == 0):
q.append([])
q[row_index].append(i)
break
else:
j = j1
self._set_col(p, col_index, c)
return self._forward_format_output(p, q, check_standard=check_standard)
def insertion(self, j, r, epsilon=0):
bisect = (bisect_right if (epsilon == 0) else bisect_left)
if ((r[(- 1)] < j) or ((r[(- 1)] == j) and (epsilon == 0))):
return (None, len(r))
y_pos = bisect(r, j)
(j, r[y_pos]) = (r[y_pos], j)
return (j, y_pos)
def _forward_format_output(self, p, q, check_standard):
from sage.combinat.tableau import StandardTableau
from sage.combinat.super_tableau import SemistandardSuperTableau, StandardSuperTableau
if (not p):
return [StandardTableau([]), StandardTableau([])]
if check_standard:
try:
P = StandardSuperTableau(p)
except ValueError:
P = SemistandardSuperTableau(p)
try:
Q = StandardSuperTableau(q)
except ValueError:
Q = SemistandardSuperTableau(q)
return [P, Q]
return [SemistandardSuperTableau(p), SemistandardSuperTableau(q)]
def backward_rule(self, p, q, output='array'):
p_copy = [list(row) for row in p]
upper_row = []
lower_row = []
d = {}
for (row, Li) in enumerate(q):
for (col, val) in enumerate(Li):
if ((val in d) and (col in d[val])):
d[val][col].append(row)
elif (val not in d):
d[val] = {col: [row]}
else:
d[val][col] = [row]
for (value, iter_dict) in sorted(d.items(), reverse=True, key=(lambda x: x[0])):
epsilon = (1 if value.is_primed() else 0)
if (epsilon == 1):
iter_copy = dict(iter_dict)
iter_dict = {}
for (k, v) in iter_copy.items():
for vi in v:
if (vi in iter_dict):
iter_dict[vi].append(k)
else:
iter_dict[vi] = [k]
for key in sorted(iter_dict, reverse=True):
for rows in iter_dict[key]:
(row_index, col_index) = ((rows, key) if (epsilon == 0) else (key, rows))
x = p_copy[row_index].pop()
while True:
if (value.is_primed() == x.is_primed()):
row_index -= 1
if (row_index < 0):
break
(x, col_index) = self.reverse_insertion(x, p_copy[row_index], epsilon=epsilon)
else:
col_index -= 1
if (col_index < 0):
break
c = self._get_col(p_copy, col_index)
(x, row_index) = self.reverse_insertion(x, c, epsilon=epsilon)
self._set_col(p_copy, col_index, c)
upper_row.append(value)
lower_row.append(x)
return self._backward_format_output(lower_row, upper_row, output, q.is_standard())
def reverse_insertion(self, x, row, epsilon=0):
bisect = (bisect_left if (epsilon == 0) else bisect_right)
y_pos = (bisect(row, x) - 1)
(x, row[y_pos]) = (row[y_pos], x)
return (x, y_pos)
def _backward_format_output(self, lower_row, upper_row, output, q_is_standard):
if (output == 'array'):
return [list(reversed(upper_row)), list(reversed(lower_row))]
if (output == 'word'):
if q_is_standard:
from sage.combinat.words.word import Word
return Word(reversed(lower_row))
else:
raise TypeError(('q must be standard to have a %s as valid output' % output))
raise ValueError('invalid output option') |
class CUBDataset(Dataset):
def __init__(self, root, cfg, is_train):
self.root = root
self.cfg = cfg
self.is_train = is_train
self.resize_size = cfg.DATA.RESIZE_SIZE
self.crop_size = cfg.DATA.CROP_SIZE
self.image_list = self.remove_1st_column(open(os.path.join(root, 'images.txt'), 'r').readlines())
self.label_list = self.remove_1st_column(open(os.path.join(root, 'image_class_labels.txt'), 'r').readlines())
self.split_list = self.remove_1st_column(open(os.path.join(root, 'train_test_split.txt'), 'r').readlines())
self.bbox_list = self.remove_1st_column(open(os.path.join(root, 'bounding_boxes.txt'), 'r').readlines())
(self.train_transform, self.onecrop_transform, self.tencrops_transform) = get_transforms(cfg)
if cfg.TEST.TEN_CROPS:
self.test_transform = self.tencrops_transform
else:
self.test_transform = self.onecrop_transform
if is_train:
self.index_list = self.get_index(self.split_list, '1')
else:
self.index_list = self.get_index(self.split_list, '0')
def get_index(self, list, value):
index = []
for i in range(len(list)):
if (list[i] == value):
index.append(i)
return index
def remove_1st_column(self, input_list):
output_list = []
for i in range(len(input_list)):
if (len(input_list[i][:(- 1)].split(' ')) == 2):
output_list.append(input_list[i][:(- 1)].split(' ')[1])
else:
output_list.append(input_list[i][:(- 1)].split(' ')[1:])
return output_list
def __getitem__(self, idx):
name = self.image_list[self.index_list[idx]]
image_path = os.path.join(self.root, 'images', name)
image = Image.open(image_path).convert('RGB')
image_size = list(image.size)
label = (int(self.label_list[self.index_list[idx]]) - 1)
if self.is_train:
image = self.train_transform(image)
return (image, label)
else:
image = self.test_transform(image)
bbox = self.bbox_list[self.index_list[idx]]
bbox = [int(float(value)) for value in bbox]
[x, y, bbox_width, bbox_height] = bbox
resize_size = self.crop_size
crop_size = self.crop_size
shift_size = 0
[image_width, image_height] = image_size
left_bottom_x = int(max((((x / image_width) * resize_size) - shift_size), 0))
left_bottom_y = int(max((((y / image_height) * resize_size) - shift_size), 0))
right_top_x = int(min(((((x + bbox_width) / image_width) * resize_size) - shift_size), (crop_size - 1)))
right_top_y = int(min(((((y + bbox_height) / image_height) * resize_size) - shift_size), (crop_size - 1)))
gt_bbox = np.array([left_bottom_x, left_bottom_y, right_top_x, right_top_y]).reshape((- 1))
gt_bbox = ' '.join(list(map(str, gt_bbox)))
return (image, label, gt_bbox, name)
def __len__(self):
return len(self.index_list) |
def test_graphsage_save_load(tmpdir):
gs = GraphSAGE(layer_sizes=[4, 4], n_samples=[2, 2], input_dim=2, multiplicity=1)
test_utils.model_save_load(tmpdir, gs) |
class CleanEvaluation():
def __init__(self, probabilities, labels, validation=0.1):
assert (validation >= 0)
labels = numpy.squeeze(labels)
assert (len(labels.shape) == 1)
assert (len(probabilities.shape) == 2)
assert (probabilities.shape[0] == labels.shape[0])
assert (probabilities.shape[1] == (numpy.max(labels) + 1))
marginals = numpy.sum(probabilities, axis=1)
assert numpy.allclose(marginals, numpy.ones(marginals.shape))
self.N = labels.shape[0]
self.test_N = math.ceil((self.N * (1 - validation)))
if (validation <= 0):
assert (self.test_N == self.N)
self.validation_N = (self.N - self.test_N)
if (validation <= 0):
assert (self.validation_N == 0)
self.test_probabilities = probabilities[:self.test_N]
self.test_labels = labels[:self.test_N]
self.test_predictions = numpy.argmax(self.test_probabilities, axis=1)
self.test_errors = (self.test_predictions != self.test_labels)
self.test_confidences = self.test_probabilities[(numpy.arange(self.test_predictions.shape[0]), self.test_predictions)]
self.validation_probabilities = None
self.validation_confidences = None
self.validation_predictions = None
self.validation_errors = None
self.validation_confidences = None
if (validation > 0):
self.validation_probabilities = probabilities[self.test_N:]
self.validation_labels = labels[self.test_N:]
self.validation_predictions = numpy.argmax(self.validation_probabilities, axis=1)
self.validation_errors = (self.validation_predictions != self.validation_labels)
self.validation_confidences = self.validation_probabilities[(numpy.arange(self.validation_predictions.shape[0]), self.validation_predictions)]
self.sorted_correct_validation_confidences = None
def confidence_at_tpr(self, tpr):
assert (self.validation_confidences is not None)
assert (tpr > 0)
if (self.sorted_correct_validation_confidences is None):
correct_validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]
self.sorted_correct_validation_confidences = numpy.sort(numpy.copy(correct_validation_confidences))
cutoff = math.floor((self.sorted_correct_validation_confidences.shape[0] * round((1 - tpr), 2)))
assert (cutoff >= 0)
assert (cutoff < self.sorted_correct_validation_confidences.shape[0])
return self.sorted_correct_validation_confidences[cutoff]
def tpr_at_confidence(self, threshold):
return (numpy.sum((self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold)) / float(numpy.sum(numpy.logical_not(self.test_errors))))
def validation_tpr_at_confidence(self, threshold):
validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]
return (numpy.sum((validation_confidences >= threshold)) / float(validation_confidences.shape[0]))
def fpr_at_confidence(self, threshold):
return (numpy.sum((self.test_confidences[self.test_errors] >= threshold)) / float(numpy.sum(self.test_errors)))
def test_error(self):
return (numpy.sum(self.test_errors.astype(int)) / float(self.test_N))
def test_error_at_confidence(self, threshold):
nominator = numpy.sum(numpy.logical_and(self.test_errors, (self.test_confidences >= threshold)))
denominator = numpy.sum((self.test_confidences >= threshold))
if (denominator > 0):
return (nominator / float(denominator))
else:
return 0
def test_error_curve(self):
scores = self.test_confidences
sort = numpy.argsort(scores, axis=0)
sorted_scores = scores[sort]
test_errors = numpy.zeros(scores.shape[0])
thresholds = numpy.zeros(scores.shape[0])
for i in range(sort.shape[0]):
thresholds[i] = sorted_scores[i]
test_errors[i] = (numpy.sum(self.test_errors[(self.test_confidences >= thresholds[i])]) / float(numpy.sum((self.test_confidences >= thresholds[i]))))
return (test_errors, thresholds)
def receiver_operating_characteristic_labels_scores(self):
return (numpy.logical_not(self.test_errors).astype(int), self.test_confidences)
def receiver_operating_characteristic_auc(self):
(labels, scores) = self.receiver_operating_characteristic_labels_scores()
if (numpy.unique(labels).shape[0] == 1):
return 1
else:
return sklearn.metrics.roc_auc_score(labels, scores)
def receiver_operating_characteristic_curve(self):
(labels, scores) = self.receiver_operating_characteristic_labels_scores()
return sklearn.metrics.roc_curve(labels, scores)
def confidence_at_95tpr(self):
return self.confidence_at_tpr(0.95)
def confidence_at_98tpr(self):
return self.confidence_at_tpr(0.98)
def confidence_at_99tpr(self):
return self.confidence_at_tpr(0.99)
def confidence_at_995tpr(self):
return self.confidence_at_tpr(0.995)
def tpr_at_95tpr(self):
return self.tpr_at_confidence(self.confidence_at_tpr(0.95))
def tpr_at_98tpr(self):
return self.tpr_at_confidence(self.confidence_at_tpr(0.98))
def tpr_at_99tpr(self):
return self.tpr_at_confidence(self.confidence_at_tpr(0.99))
def tpr_at_995tpr(self):
return self.tpr_at_confidence(self.confidence_at_tpr(0.995))
def validation_tpr_at_95tpr(self):
return self.validation_tpr_at_confidence(self.confidence_at_tpr(0.95))
def validation_tpr_at_98tpr(self):
return self.validation_tpr_at_confidence(self.confidence_at_tpr(0.98))
def validation_tpr_at_99tpr(self):
return self.validation_tpr_at_confidence(self.confidence_at_tpr(0.99))
def validation_tpr_at_995tpr(self):
return self.validation_tpr_at_confidence(self.confidence_at_tpr(0.995))
def fpr_at_95tpr(self):
return self.fpr_at_confidence(self.confidence_at_tpr(0.95))
def fpr_at_98tpr(self):
return self.fpr_at_confidence(self.confidence_at_tpr(0.98))
def fpr_at_99tpr(self):
return self.fpr_at_confidence(self.confidence_at_tpr(0.99))
def fpr_at_995tpr(self):
return self.fpr_at_confidence(self.confidence_at_tpr(0.995))
def test_error_at_95tpr(self):
return self.test_error_at_confidence(self.confidence_at_tpr(0.95))
def test_error_at_98tpr(self):
return self.test_error_at_confidence(self.confidence_at_tpr(0.98))
def test_error_at_99tpr(self):
return self.test_error_at_confidence(self.confidence_at_tpr(0.99))
def test_error_at_995tpr(self):
return self.test_error_at_confidence(self.confidence_at_tpr(0.995)) |
def test_multiple_rhs():
random = np.random.RandomState(1234)
c = random.randn(4)
r = random.randn(4)
for offset in [0, 1j]:
for yshape in ((4,), (4, 3), (4, 3, 2)):
y = (random.randn(*yshape) + offset)
actual = solve_toeplitz((c, r), b=y)
desired = solve(toeplitz(c, r=r), y)
assert_equal(actual.shape, yshape)
assert_equal(desired.shape, yshape)
assert_allclose(actual, desired) |
def test_read_file_not_found(agent: Agent):
filename = 'does_not_exist.txt'
content = file_ops.read_file(filename, agent=agent)
assert (('Error:' in content) and (filename in content) and ('no such file' in content)) |
def expected_num_cache_files(num_kernels: int=0) -> int:
if (num_kernels == 0):
return 0
return (num_kernels + 1) |
def test_option_integer():
result = ak.operations.from_json(' [ 1 ,2,null,4, 5]', schema={'type': 'array', 'items': {'type': ['null', 'integer']}})
assert (result.to_list() == [1, 2, None, 4, 5])
assert (str(result.type) == '5 * ?int64')
result = ak.operations.from_json((' [ 1 ,2,null,4, 5]' * 2), schema={'type': 'array', 'items': {'type': ['null', 'integer']}}, line_delimited=True)
assert (result.to_list() == ([1, 2, None, 4, 5] * 2))
assert (str(result.type) == '10 * ?int64')
result = ak.operations.from_json(' [ ]', schema={'type': 'array', 'items': {'type': ['null', 'integer']}})
assert (result.to_list() == [])
assert (str(result.type) == '0 * ?int64') |
_cache(maxsize=16384)
def symstr(sym, arrayexprs: Optional[Set[str]]=None, cpp_mode=False) -> str:
if isinstance(sym, SymExpr):
return symstr(sym.expr, arrayexprs, cpp_mode=cpp_mode)
try:
sym = sympy_numeric_fix(sym)
sym = sympy_intdiv_fix(sym)
sym = sympy_divide_fix(sym)
sstr = DaceSympyPrinter(arrayexprs, cpp_mode).doprint(sym)
if (isinstance(sym, symbol) or isinstance(sym, sympy.Symbol) or isinstance(sym, sympy.Number) or dtypes.isconstant(sym)):
return sstr
else:
return (('(' + sstr) + ')')
except (AttributeError, TypeError, ValueError):
sstr = DaceSympyPrinter(arrayexprs, cpp_mode).doprint(sym)
return (('(' + sstr) + ')') |
def get_world_size():
return (torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1) |
def train_segmentor(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None):
logger = get_root_logger(cfg.log_level)
dataset = (dataset if isinstance(dataset, (list, tuple)) else [dataset])
data_loaders = [build_dataloader(ds, cfg.data.samples_per_gpu, cfg.data.workers_per_gpu, len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, drop_last=True) for ds in dataset]
optimizer = build_optimizer(model, cfg.optimizer)
if (cfg.optimizer_config.get('type', None) and (cfg.optimizer_config['type'] == 'DistOptimizerHook')):
if cfg.optimizer_config.get('use_fp16', False):
(model, optimizer) = apex.amp.initialize(model.cuda(), optimizer, opt_level='O1')
for m in model.modules():
if hasattr(m, 'fp16_enabled'):
m.fp16_enabled = True
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = MMDistributedDataParallel(model.cuda(), device_ids=[torch.cuda.current_device()], broadcast_buffers=False, find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
if (cfg.get('runner') is None):
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn('config is now expected to have a `runner` section, please set `runner` in your config.', UserWarning)
if cfg.get('fast_eval', False):
(data_loaders, model) = fast_eval_wrapper(data_loaders, model, cfg, distributed)
runner = build_runner(cfg.runner, default_args=dict(model=model, batch_processor=None, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta))
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None))
runner.timestamp = timestamp
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(val_dataset, samples_per_gpu=1, workers_per_gpu=cfg.data.workers_per_gpu, dist=distributed, shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = ('IterBasedRunner' not in cfg.runner['type'])
eval_hook = (DistEvalHook if distributed else EvalHook)
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow) |
class Res2Layer(Sequential):
def __init__(self, block, inplanes, planes, num_blocks, stride=1, avg_down=True, conv_cfg=None, norm_cfg=dict(type='BN'), scales=4, base_width=26, **kwargs):
self.block = block
downsample = None
if ((stride != 1) or (inplanes != (planes * block.expansion))):
downsample = nn.Sequential(nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False), build_conv_layer(conv_cfg, inplanes, (planes * block.expansion), kernel_size=1, stride=1, bias=False), build_norm_layer(norm_cfg, (planes * block.expansion))[1])
layers = []
layers.append(block(inplanes=inplanes, planes=planes, stride=stride, downsample=downsample, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, stage_type='stage', **kwargs))
inplanes = (planes * block.expansion)
for i in range(1, num_blocks):
layers.append(block(inplanes=inplanes, planes=planes, stride=1, conv_cfg=conv_cfg, norm_cfg=norm_cfg, scales=scales, base_width=base_width, **kwargs))
super(Res2Layer, self).__init__(*layers) |
class InstrumentationFinder(MetaPathFinder):
_logger = logging.getLogger(__name__)
def __init__(self, original_pathfinder, module_to_instrument: str, tracer: ExecutionTracer, coverage_metrics: set[config.CoverageMetric], dynamic_constant_provider: (DynamicConstantProvider | None)=None) -> None:
self._module_to_instrument = module_to_instrument
self._original_pathfinder = original_pathfinder
self._tracer = tracer
self._coverage_metrics = coverage_metrics
self._dynamic_constant_provider = dynamic_constant_provider
def update_instrumentation_metrics(self, tracer: ExecutionTracer, coverage_metrics: set[config.CoverageMetric], dynamic_constant_provider: (DynamicConstantProvider | None)) -> None:
self._tracer = tracer
self._coverage_metrics = coverage_metrics
self._dynamic_constant_provider = dynamic_constant_provider
def _should_instrument(self, module_name: str):
return (module_name == self._module_to_instrument)
def find_spec(self, fullname: str, path=None, target=None):
if self._should_instrument(fullname):
spec: ModuleSpec = self._original_pathfinder.find_spec(fullname, path, target)
if (spec is not None):
if isinstance(spec.loader, FileLoader):
spec.loader = InstrumentationLoader(spec.loader.name, spec.loader.path, self._tracer, build_transformer(self._tracer, self._coverage_metrics, self._dynamic_constant_provider))
return spec
self._logger.error('Loader for module under test is not a FileLoader, can not instrument.')
return None |
def test_reduceat():
db = np.dtype([('name', 'S11'), ('time', np.int64), ('value', np.float32)])
a = np.empty([100], dtype=db)
a['name'] = 'Simple'
a['time'] = 10
a['value'] = 100
indx = [0, 7, 15, 25]
h2 = []
val1 = indx[0]
for val2 in indx[1:]:
h2.append(np.add.reduce(a['value'][val1:val2]))
val1 = val2
h2.append(np.add.reduce(a['value'][val1:]))
h2 = np.array(h2)
h1 = np.add.reduceat(a['value'], indx)
assert_array_almost_equal(h1, h2)
np.setbufsize(32)
h1 = np.add.reduceat(a['value'], indx)
np.setbufsize(np.UFUNC_BUFSIZE_DEFAULT)
assert_array_almost_equal(h1, h2) |
class GeneralizedMatrixFactorizationModel(keras.Model):
def __init__(self, num_users, num_items, embed_mf_size, is_edge_weight_train, learning_rate=0.01, name='GeneralizedMatrixFactorizationModel', **kwargs):
super().__init__(name=name, **kwargs)
tf.random.set_seed(42)
self.num_users = num_users
self.num_items = num_items
self.embed_mf_size = embed_mf_size
self.is_edge_weight_train = is_edge_weight_train
self.initializer = tf.initializers.GlorotUniform()
self.user_mf_embedding = keras.layers.Embedding(input_dim=self.num_users, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='U_GMF', dtype=tf.float32)
self.item_mf_embedding = keras.layers.Embedding(input_dim=self.num_items, output_dim=self.embed_mf_size, embeddings_initializer=self.initializer, name='I_GMF', dtype=tf.float32)
self.user_mf_embedding(0)
self.item_mf_embedding(0)
if self.is_edge_weight_train:
self.activation = keras.activations.sigmoid
self.edge_weight = tf.Variable(self.initializer([self.embed_mf_size, 1]), name='h')
self.loss = keras.losses.BinaryCrossentropy()
else:
self.activation = keras.activations.linear
self.edge_weight = tf.Variable(initial_value=1, shape=[self.embed_mf_size, 1], name='h')
self.loss = keras.losses.MeanSquaredError()
self.optimizer = tf.optimizers.Adam(learning_rate)
def call(self, inputs, training=None, mask=None):
(user, item) = inputs
user_mf_e = self.user_mf_embedding(user)
item_mf_e = self.item_mf_embedding(item)
mf_output = (user_mf_e * item_mf_e)
output = self.activation(tf.matmul(mf_output, self.edge_weight))
return tf.squeeze(output)
def train_step(self, batch):
(user, pos, label) = batch
with tf.GradientTape() as tape:
output = self(inputs=(user, pos), training=True)
loss = self.loss(label, output)
grads = tape.gradient(loss, self.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
return loss
def get_recs(self, inputs, training=False, **kwargs):
output = self(inputs, training=training)
return tf.squeeze(output)
def get_top_k(self, preds, train_mask, k=100):
return tf.nn.top_k(tf.where(train_mask, preds, (- np.inf)), k=k, sorted=True) |
class DummyOffPolicyAlgo(RLAlgorithm):
def init_opt(self):
def train(self, runner):
def train_once(self, itr, paths):
def optimize_policy(self, samples_data): |
def test_seterr():
entry_err = sc.geterr()
try:
for (category, error_code) in _sf_error_code_map.items():
for action in _sf_error_actions:
geterr_olderr = sc.geterr()
seterr_olderr = sc.seterr(**{category: action})
assert_((geterr_olderr == seterr_olderr))
newerr = sc.geterr()
assert_((newerr[category] == action))
geterr_olderr.pop(category)
newerr.pop(category)
assert_((geterr_olderr == newerr))
_check_action(_sf_error_test_function, (error_code,), action)
finally:
sc.seterr(**entry_err) |
class VGGTransformerEncoderTest(TestFairseqEncoderBase):
def setUp(self):
super().setUp()
self.setUpInput(get_dummy_input(T=50, D=80, B=5))
def test_forward(self):
print('1. test standard vggtransformer')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80))
super().test_forward()
print('2. test vggtransformer with limited right context')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80, transformer_context=((- 1), 5)))
super().test_forward()
print('3. test vggtransformer with limited left context')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80, transformer_context=(5, (- 1))))
super().test_forward()
print('4. test vggtransformer with limited right context and sampling')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80, transformer_context=((- 1), 12), transformer_sampling=(2, 2)))
super().test_forward()
print('5. test vggtransformer with windowed context and sampling')
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80, transformer_context=(12, 12), transformer_sampling=(2, 2))) |
.experimental
def test_check_df_errors(data_preparator, long_log_with_features, mapping):
with pytest.raises(ValueError, match='DataFrame is empty'):
data_preparator.check_df(dataframe=long_log_with_features.filter((sf.col('user_idx') > 10)), columns_mapping=mapping)
with pytest.raises(ValueError, match='Column `relevance` stated in mapping is absent in dataframe'):
col_map = mapping
data_preparator.check_df(dataframe=long_log_with_features.drop('relevance'), columns_mapping=col_map) |
class Timer(object):
_TIMERS = dict()
def __init__(self, name):
self._name = name
self.__tic_time = None
self.__total_duration = 0.0
def __enter__(self):
self.tic()
def __exit__(self, exc_type, exc_val, exc_tb):
self.toc()
def tic(self):
assert (self.__tic_time is None), "tic() has already been called for timer '{}'".format(self._name)
self.__tic_time = current_time()
def toc(self):
assert (self.__tic_time is not None), "tic() has not been called for timer '{}'".format(self._name)
self.__total_duration += (current_time() - self.__tic_time)
self.__tic_time = None
def running(self):
return (self.__tic_time is not None)
def paused(self):
return (not self.running)
def total_duration(self):
return self.__total_duration
def create(name):
assert (name not in Timer._TIMERS), "Timer with name '{}' already exists".format(name)
timer = Timer(name)
Timer._TIMERS[name] = timer
return timer
def get(name):
if (name not in Timer._TIMERS):
return Timer.create(name)
else:
return Timer._TIMERS[name]
def get_duration(name):
timer = Timer._TIMERS.get(name, None)
assert (timer is not None), "No timer named '{}' exists".format(name)
return timer.total_duration
def get_durations_sum():
return sum([timer.total_duration for timer in Timer._TIMERS.values()])
def print_durations():
durations_sum = 0.0
for (name, timer) in Timer._TIMERS.items():
print(' - {}: {:03f} sec'.format(name, timer.total_duration))
durations_sum += timer.total_duration
print(' - TOTAL: {:03f} sec'.format(durations_sum))
def log_duration(*timer_names):
def wrap(f):
def wrap2(*args, **kwargs):
timers_to_pause = []
for name in timer_names:
timer = Timer.get(name)
if timer.paused:
timers_to_pause.append(timer)
timer.tic()
output = f(*args, **kwargs)
for timer in timers_to_pause:
timer.toc()
return output
return wrap2
return wrap
def exclude_duration(*timer_names):
def wrap(f):
def wrap2(*args, **kwargs):
timers_to_resume = []
for name in timer_names:
if (name in Timer._TIMERS):
timer = Timer.get(name)
if timer.running:
timer.toc()
timers_to_resume.append(timer)
output = f(*args, **kwargs)
for timer in timers_to_resume:
timer.tic()
return output
return wrap2
return wrap |
def test_ipw_learner_create_train_data_for_opl():
context = np.array([1.0, 1.0]).reshape(1, (- 1))
learner = IPWLearner(n_actions=2)
action = np.array([0])
reward = np.array([1.0])
pscore = np.array([0.5])
(X, sample_weight, y) = learner._create_train_data_for_opl(context=context, action=action, reward=reward, pscore=pscore)
assert np.allclose(X, np.array([1.0, 1.0]).reshape(1, (- 1)))
assert np.allclose(sample_weight, np.array([2.0]))
assert np.allclose(y, np.array([0])) |
def main():
tool_thoughts = DataLoader.from_args(args, item_name='toolkit thought')
format_example = read_file(args.format_example_file)
output_file = f'{osp.splitext(tool_thoughts._input_path)[0]}_spec.jsonl'
if (generator._stop_at in ['preprocess', 'prompt']):
result = generator(dict(example_tools=[format_example], toolkit=tool_thoughts[0]))
print_intermediate_result_and_stop(result, generator._stop_at)
def transform_thought(thought):
inputs = dict(example_tools=[format_example], toolkit=thought)
try:
return (None, generator(inputs))
except Exception as e:
print(f'Error encountered: {e}')
return (thought, None)
(_, remaining_dataset, _) = runner.run(transform_thought, output_file, tool_thoughts)
print(f'{len(remaining_dataset)} toolkits failed to be generated.') |
def _calculate_asv_score(model, file_list, gt_root, trgspk, threshold):
results = {}
for (i, cvt_wav_path) in enumerate(tqdm(file_list)):
basename = get_basename(cvt_wav_path)
number = get_number(basename)
gt_wav_path = os.path.join(gt_root, trgspk, (number + '.wav'))
results[basename] = calculate_accept(cvt_wav_path, gt_wav_path, model, threshold)
return (results, (100.0 * float(np.mean(np.array(list(results.values())))))) |
def test_detect_action_size_from_env() -> None:
env: Union[(gym.Env[(Any, Any)], gymnasium.Env[(Any, Any)])] = gym.make('CartPole-v1')
assert (detect_action_size_from_env(env) == 2)
env = gym.make('Pendulum-v1')
assert (detect_action_size_from_env(env) == 1)
env = gymnasium.make('CartPole-v1')
assert (detect_action_size_from_env(env) == 2)
env = gymnasium.make('Pendulum-v1')
assert (detect_action_size_from_env(env) == 1) |
def conv1x1(in_channels, out_channels, stride=1):
return nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, padding=0, bias=True) |
def parameter_table(model):
table = PrettyTable(['Modules', 'Parameters'])
total = 0
for (name, parameter) in model.named_parameters():
if (not parameter.requires_grad):
continue
params = parameter.numel()
table.add_row([name, params])
total += params
table.add_row(['TOTAL', total])
return table |
def determine_repayment(rng, group, score):
repayment_rate = ((LOAN_REPAY_PROBS[0](score) ** (1 - group)) * (LOAN_REPAY_PROBS[1](score) ** group))
uniform = rng.uniform()
return ((np.log((repayment_rate / (1.0 - repayment_rate))) + np.log((uniform / (1.0 - uniform)))) > 0.0) |
def centroid(vectors: List[np.array]) -> np.array:
centroid = np.stack(vectors).mean(axis=0)
return centroid |
def test_tie_breaking_sample_order_invariance():
vec = CountVectorizer(max_features=1)
vocab1 = vec.fit(['hello', 'world']).vocabulary_
vocab2 = vec.fit(['world', 'hello']).vocabulary_
assert (vocab1 == vocab2) |
class Ibgp(Layer, Graphable):
__masked: Set[int]
def __init__(self):
super().__init__()
self.__masked = set()
self.addDependency('Ospf', False, False)
def __dfs(self, start: Node, visited: List[Node], netname: str='self'):
if (start in visited):
return
self._log('found node: as{}/{} via {}'.format(start.getAsn(), start.getName(), netname))
visited.append(start)
for iface in start.getInterfaces():
net = iface.getNet()
if (net.getType() != NetworkType.Local):
continue
neighs: List[Node] = net.getAssociations()
for neigh in neighs:
if (neigh.getRole() != NodeRole.Router):
continue
self.__dfs(neigh, visited, net.getName())
def getName(self) -> str:
return 'Ibgp'
def maskAsn(self, asn: int) -> Ibgp:
self.__masked.add(asn)
return self
def getMaskedAsns(self) -> Set[int]:
return self.__masked
def render(self, emulator: Emulator):
reg = emulator.getRegistry()
base: Base = reg.get('seedemu', 'layer', 'Base')
for asn in base.getAsns():
if (asn in self.__masked):
continue
self._log('setting up IBGP peering for as{}...'.format(asn))
routers: List[Node] = ScopedRegistry(str(asn), reg).getByType('rnode')
for local in routers:
self._log('setting up IBGP peering on as{}/{}...'.format(asn, local.getName()))
remotes = []
self.__dfs(local, remotes)
n = 1
for remote in remotes:
if (local == remote):
continue
laddr = local.getLoopbackAddress()
raddr = remote.getLoopbackAddress()
local.addTable('t_bgp')
local.addTablePipe('t_bgp')
local.addTablePipe('t_direct', 't_bgp')
local.addProtocol('bgp', 'ibgp{}'.format(n), IbgpFileTemplates['ibgp_peer'].format(localAddress=laddr, peerAddress=raddr, asn=asn))
n += 1
self._log('adding peering: {} <-> {} (ibgp, as{})'.format(laddr, raddr, asn))
def _doCreateGraphs(self, emulator: Emulator):
base: Base = emulator.getRegistry().get('seedemu', 'layer', 'Base')
for asn in base.getAsns():
if (asn in self.__masked):
continue
asobj = base.getAutonomousSystem(asn)
asobj.createGraphs(emulator)
l2graph = asobj.getGraph('AS{}: Layer 2 Connections'.format(asn))
ibgpgraph = self._addGraph('AS{}: iBGP sessions'.format(asn), False)
ibgpgraph.copy(l2graph)
for edge in ibgpgraph.edges:
edge.style = 'dotted'
rtrs = ScopedRegistry(str(asn), emulator.getRegistry()).getByType('rnode').copy()
while (len(rtrs) > 0):
a = rtrs.pop()
for b in rtrs:
ibgpgraph.addEdge('Router: {}'.format(a.getName()), 'Router: {}'.format(b.getName()), style='solid')
def print(self, indent: int) -> str:
out = (' ' * indent)
out += 'IbgpLayer:\n'
indent += 4
out += (' ' * indent)
out += 'Masked ASes:\n'
indent += 4
for asn in self.__masked:
out += (' ' * indent)
out += '{}\n'.format(asn)
return out |
class Dstc8DataProcessor(object):
def __init__(self, dstc8_data_dir, dataset_config, vocab_file, do_lower_case, max_seq_length=DEFAULT_MAX_SEQ_LENGTH, log_data_warnings=False):
self.dstc8_data_dir = dstc8_data_dir
self._log_data_warnings = log_data_warnings
self._dataset_config = dataset_config
self._tokenizer = tokenization.FullTokenizer(vocab_file=vocab_file, do_lower_case=do_lower_case)
self._max_seq_length = max_seq_length
def dataset_config(self):
return self._dataset_config
def get_dialog_examples(self, dataset):
dialog_paths = [os.path.join(self.dstc8_data_dir, dataset, 'dialogues_{:03d}.json'.format(i)) for i in self._dataset_config.file_ranges[dataset]]
dialogs = load_dialogues(dialog_paths)
schema_path = os.path.join(self.dstc8_data_dir, dataset, 'schema.json')
schemas = schema.Schema(schema_path)
examples = []
for (dialog_idx, dialog) in enumerate(dialogs):
tf.logging.log_every_n(tf.logging.INFO, 'Processed %d dialogs.', 1000, dialog_idx)
examples.extend(self._create_examples_from_dialog(dialog, schemas, dataset))
return examples
def _create_examples_from_dialog(self, dialog, schemas, dataset):
dialog_id = dialog['dialogue_id']
examples = []
for (turn_idx, turn) in enumerate(dialog['turns']):
if (turn['speaker'] == 'USER'):
user_utterance = turn['utterance']
user_frames = {f['service']: f for f in turn['frames']}
if (turn_idx > 0):
last_user_turn = dialog['turns'][(turn_idx - 2)]
last_user_utterance = last_user_turn['utterance']
last_user_frames = {f['service']: f for f in last_user_turn['frames']}
last_system_turn = dialog['turns'][(turn_idx - 1)]
last_system_utterance = last_system_turn['utterance']
last_system_frames = {f['service']: f for f in last_system_turn['frames']}
history_utterance.append([last_system_utterance, last_user_utterance])
history_frames.append([last_system_frames, last_user_frames])
else:
history_utterance = []
history_frames = []
turn_id = '{}-{}-{:02d}'.format(dataset, dialog_id, turn_idx)
(turn_examples, turn_state) = self._create_examples_from_turn(turn_id, history_utterance, history_frames, user_utterance, user_frames, schemas)
examples.extend(turn_examples)
return examples
def _get_state_update(self, current_state, prev_state):
state_update = dict(current_state)
for (slot, values) in current_state.items():
if ((slot in prev_state) and (prev_state[slot][0] in values)):
state_update.pop(slot)
return state_update
def _create_examples_from_turn(self, turn_id, history_utterance, history_frames, current_user_utterance, current_user_frames, schemas):
history_system_tokens = []
history_user_tokens = []
history_system_inv_alignments = []
history_user_inv_alignments = []
history_system_alignments = []
history_user_alignments = []
history_system_utterance = []
history_user_utterance = []
for [system_utterance, user_utterance] in history_utterance:
(system_tokens, system_alignments, system_inv_alignments) = self._tokenize(system_utterance)
(user_tokens, user_alignments, user_inv_alignments) = self._tokenize(user_utterance)
history_system_tokens.append(system_tokens)
history_user_tokens.append(user_tokens)
history_system_inv_alignments.append(system_inv_alignments)
history_user_inv_alignments.append(user_inv_alignments)
history_system_alignments.append(system_alignments)
history_user_alignments.append(user_alignments)
history_system_utterance.append(system_utterance)
history_user_utterance.append(user_utterance)
history_system_frames = []
history_user_frames = []
for [system_frames, user_frames] in history_frames:
history_system_frames.append(system_frames)
history_user_frames.append(user_frames)
(current_user_tokens, current_user_alignments, current_user_inv_alignments) = self._tokenize(current_user_utterance)
states = {}
base_example = InputExample(dataset_config=self._dataset_config, max_seq_length=self._max_seq_length, is_real_example=True, tokenizer=self._tokenizer, log_data_warnings=self._log_data_warnings)
base_example.example_id = turn_id
base_example.add_utterance_features(current_user_tokens, current_user_inv_alignments, history_user_tokens, history_user_inv_alignments, history_system_tokens, history_system_inv_alignments, history_system_utterance, history_user_utterance)
examples = []
for (service, user_frame) in current_user_frames.items():
try:
example = base_example.make_copy_with_utterance_features()
example.example_id = '{}-{}'.format(turn_id, service)
example.service_schema = schemas.get_service_schema(service)
state = user_frame['state']['slot_values']
states[service] = state
example.add_intents(user_frame)
example.add_categorical_slots(state)
user_span_boundaries = self._find_subword_indices(state, current_user_utterance, user_frame['slots'], current_user_alignments, current_user_tokens, 2)
history_span_boundaries = {}
bias = (2 + len(current_user_tokens))
for (turn_system_utterance, turn_user_utterance, turn_system_frames, turn_user_frames, turn_system_alignments, turn_user_alignments, turn_system_tokens, turn_user_tokens) in zip(history_system_utterance, history_user_utterance, history_system_frames, history_user_frames, history_system_alignments, history_user_alignments, history_system_tokens, history_user_tokens):
bias += 2
turn_user_frame = turn_user_frames.get(service, None)
if (turn_user_frame is not None):
his_user_span_boundaries = self._find_subword_indices(state, turn_user_utterance, turn_user_frame['slots'], turn_user_alignments, turn_user_tokens, bias)
else:
his_user_span_boundaries = {}
bias += (len(turn_user_tokens) + 1)
turn_system_frame = turn_system_frames.get(service, None)
if (turn_system_frame is not None):
his_system_span_boundaries = self._find_subword_indices(state, turn_system_utterance, turn_system_frame['slots'], turn_system_alignments, turn_system_tokens, bias)
else:
his_system_span_boundaries = {}
bias += len(turn_system_tokens)
for (turn_user_value, turn_user_span_boundaries) in his_user_span_boundaries.items():
if (turn_user_value in history_span_boundaries):
continue
else:
history_span_boundaries[turn_user_value] = turn_user_span_boundaries
for (turn_system_value, turn_system_span_boundaries) in his_system_span_boundaries.items():
if (turn_system_value in history_span_boundaries):
continue
else:
history_span_boundaries[turn_system_value] = turn_system_span_boundaries
example.add_noncategorical_slots(state, user_span_boundaries, history_span_boundaries)
position_bias = 1
example.output[(position_bias + example.dec_output_len)] = END_ID
example.dec_output_len = (example.dec_output_len + 1)
examples.append(example)
except Exception as e:
traceback.print_exc()
print(current_user_utterance)
return (examples, states)
def _find_subword_indices(self, slot_values, utterance, char_slot_spans, alignments, subwords, bias):
span_boundaries = {}
for (slot, values) in slot_values.items():
value_char_spans = {}
for slot_span in char_slot_spans:
if ((slot_span['slot'] == slot) and ('start' in slot_span)):
value = utterance[slot_span['start']:slot_span['exclusive_end']]
start_tok_idx = alignments[slot_span['start']]
end_tok_idx = alignments[(slot_span['exclusive_end'] - 1)]
if (0 <= start_tok_idx < len(subwords)):
end_tok_idx = min(end_tok_idx, (len(subwords) - 1))
value_char_spans[value] = ((start_tok_idx + bias), (end_tok_idx + bias))
for v in values:
if (v in value_char_spans):
span_boundaries[slot] = value_char_spans[v]
break
return span_boundaries
def _tokenize(self, utterance):
utterance = tokenization.convert_to_unicode(utterance)
tokens = _naive_tokenize(utterance)
alignments = {}
char_index = 0
bert_tokens = []
bert_tokens_start_chars = []
bert_tokens_end_chars = []
for token in tokens:
if token.strip():
subwords = self._tokenizer.tokenize(token)
alignments[char_index] = len(bert_tokens)
bert_tokens_start_chars.extend(([char_index] * len(subwords)))
bert_tokens.extend(subwords)
inclusive_char_end = ((char_index + len(token)) - 1)
alignments[inclusive_char_end] = (len(bert_tokens) - 1)
bert_tokens_end_chars.extend(([inclusive_char_end] * len(subwords)))
char_index += len(token)
inverse_alignments = list(zip(bert_tokens_start_chars, bert_tokens_end_chars))
return (bert_tokens, alignments, inverse_alignments)
def get_num_dialog_examples(self, dataset):
example_count = 0
dialog_paths = [os.path.join(self.dstc8_data_dir, dataset, 'dialogues_{:03d}.json'.format(i)) for i in self._dataset_config.file_ranges[dataset]]
dst_set = load_dialogues(dialog_paths)
for dialog in dst_set:
for turn in dialog['turns']:
if (turn['speaker'] == 'USER'):
example_count += len(turn['frames'])
return example_count |
def paint(t: ti.f32, tex: ti.types.texture(num_dimensions=2), n: ti.i32):
for (i, j) in pixels:
uv = ti.Vector([(i / res[0]), (j / res[1])])
warp_uv = (uv + (ti.Vector([ti.cos((t + (uv.x * 5.0))), ti.sin((t + (uv.y * 5.0)))]) * 0.1))
c = ti.math.vec4(0.0)
if (uv.x > 0.5):
c = tex.sample_lod(warp_uv, 0.0)
else:
c = tex.fetch(ti.cast((warp_uv * n), ti.i32), 0)
pixels[(i, j)] = [c.r, c.r, c.r] |
_node_type()
class Sum(optplan.Function):
type = schema_utils.polymorphic_model_type('function.sum')
functions = types.ListType(optplan.ReferenceType(optplan.Function), default=[])
def __add__(self, obj):
if isinstance(obj, Sum):
return Sum(functions=(self.functions + obj.functions))
if isinstance(obj, optplan.Function):
return Sum(functions=(self.functions + [obj]))
if isinstance(obj, (numbers.Number, optplan.ComplexNumber)):
return Sum(functions=(self.functions + [make_constant(obj)]))
raise TypeError('Attempting to add a node with type {} to type `Sum`.'.format(type(obj))) |
class DeepGuidedFilter(nn.Module):
def __init__(self, radius=1, eps=1e-08):
super(DeepGuidedFilter, self).__init__()
self.lr = build_lr_net()
self.gf = FastGuidedFilter(radius, eps)
def forward(self, x_lr, x_hr):
return self.gf(x_lr, self.lr(x_lr), x_hr).clamp(0, 1)
def init_lr(self, path):
self.lr.load_state_dict(torch.load(path), strict=False) |
def get_discriminator_optimizer():
module = discriminator_dict[FLAGS.g_model_name.lower()]
(hw, c, nlabel) = hw_dict[FLAGS.dataset.lower()]
D = module(z_dim=FLAGS.d_z_dim, n_label=nlabel, im_size=hw, im_chan=c, embed_size=FLAGS.d_embed_size, nfilter=FLAGS.d_nfilter, nfilter_max=FLAGS.d_nfilter_max, actvn=actvn_dict[FLAGS.d_actvn]())
D = discriminator_wrapper(D)
optim = get_optimizer(D.parameters(), FLAGS.d_optim, FLAGS.d_lr, FLAGS.d_beta1, FLAGS.d_beta2)
return (D, optim) |
def test_test_case_to_ast_once(simple_test_case):
visitor = tc_to_ast.TestCaseToAstVisitor(ns.NamingScope('module'), set())
simple_test_case.accept(visitor)
simple_test_case.accept(visitor)
assert (ast.unparse(ast.fix_missing_locations(Module(body=visitor.test_case_ast, type_ignores=[]))) == 'int_0 = 5\nsome_type_0 = module_0.SomeType(int_0)\nassert some_type_0 == 3') |
.parametrize('csr_container', CSR_CONTAINERS)
def test_dbscan_input_not_modified_precomputed_sparse_nodiag(csr_container):
X = np.random.RandomState(0).rand(10, 10)
np.fill_diagonal(X, 0)
X = csr_container(X)
assert all(((row != col) for (row, col) in zip(*X.nonzero())))
X_copy = X.copy()
dbscan(X, metric='precomputed')
assert (X.nnz == X_copy.nnz)
assert_array_equal(X.toarray(), X_copy.toarray()) |
def __starts_with(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
for (ana_token, ante_token) in zip(anaphor_cleaned_tokens, antecedent_cleaned_tokens):
if (ana_token != ante_token):
return False
return True |
_pipeline_test
class CustomPipelineTest(unittest.TestCase):
def test_warning_logs(self):
transformers_logging.set_verbosity_debug()
logger_ = transformers_logging.get_logger('transformers.pipelines.base')
alias = 'text-classification'
(_, original_task, _) = PIPELINE_REGISTRY.check_task(alias)
try:
with CaptureLogger(logger_) as cm:
PIPELINE_REGISTRY.register_pipeline(alias, PairClassificationPipeline)
self.assertIn(f'{alias} is already registered', cm.out)
finally:
PIPELINE_REGISTRY.supported_tasks[alias] = original_task
def test_register_pipeline(self):
PIPELINE_REGISTRY.register_pipeline('custom-text-classification', pipeline_class=PairClassificationPipeline, pt_model=(AutoModelForSequenceClassification if is_torch_available() else None), tf_model=(TFAutoModelForSequenceClassification if is_tf_available() else None), default={'pt': 'hf-internal-testing/tiny-random-distilbert'}, type='text')
assert ('custom-text-classification' in PIPELINE_REGISTRY.get_supported_tasks())
(_, task_def, _) = PIPELINE_REGISTRY.check_task('custom-text-classification')
self.assertEqual(task_def['pt'], ((AutoModelForSequenceClassification,) if is_torch_available() else ()))
self.assertEqual(task_def['tf'], ((TFAutoModelForSequenceClassification,) if is_tf_available() else ()))
self.assertEqual(task_def['type'], 'text')
self.assertEqual(task_def['impl'], PairClassificationPipeline)
self.assertEqual(task_def['default'], {'model': {'pt': 'hf-internal-testing/tiny-random-distilbert'}})
del PIPELINE_REGISTRY.supported_tasks['custom-text-classification']
_torch_or_tf
def test_dynamic_pipeline(self):
PIPELINE_REGISTRY.register_pipeline('pair-classification', pipeline_class=PairClassificationPipeline, pt_model=(AutoModelForSequenceClassification if is_torch_available() else None), tf_model=(TFAutoModelForSequenceClassification if is_tf_available() else None))
classifier = pipeline('pair-classification', model='hf-internal-testing/tiny-random-bert')
del PIPELINE_REGISTRY.supported_tasks['pair-classification']
with tempfile.TemporaryDirectory() as tmp_dir:
classifier.save_pretrained(tmp_dir)
self.assertDictEqual(classifier.model.config.custom_pipelines, {'pair-classification': {'impl': 'custom_pipeline.PairClassificationPipeline', 'pt': (('AutoModelForSequenceClassification',) if is_torch_available() else ()), 'tf': (('TFAutoModelForSequenceClassification',) if is_tf_available() else ())}})
with self.assertRaises(ValueError):
_ = pipeline(model=tmp_dir)
new_classifier = pipeline(model=tmp_dir, trust_remote_code=True)
old_classifier = pipeline('text-classification', model=tmp_dir, trust_remote_code=False)
self.assertEqual(new_classifier.__class__.__name__, 'PairClassificationPipeline')
self.assertEqual(new_classifier.task, 'pair-classification')
results = new_classifier('I hate you', second_text='I love you')
self.assertDictEqual(nested_simplify(results), {'label': 'LABEL_0', 'score': 0.505, 'logits': [(- 0.003), (- 0.024)]})
self.assertEqual(old_classifier.__class__.__name__, 'TextClassificationPipeline')
self.assertEqual(old_classifier.task, 'text-classification')
results = old_classifier('I hate you', text_pair='I love you')
self.assertListEqual(nested_simplify(results), [{'label': 'LABEL_0', 'score': 0.505}])
_torch_or_tf
def test_cached_pipeline_has_minimum_calls_to_head(self):
_ = pipeline('text-classification', model='hf-internal-testing/tiny-random-bert')
with RequestCounter() as counter:
_ = pipeline('text-classification', model='hf-internal-testing/tiny-random-bert')
self.assertEqual(counter.get_request_count, 0)
self.assertEqual(counter.head_request_count, 1)
self.assertEqual(counter.other_request_count, 0)
_torch
def test_chunk_pipeline_batching_single_file(self):
pipe = pipeline(model='hf-internal-testing/tiny-random-Wav2Vec2ForCTC')
ds = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy', 'clean', split='validation').sort('id')
audio = ds[40]['audio']['array']
pipe = pipeline(model='hf-internal-testing/tiny-random-Wav2Vec2ForCTC')
self.COUNT = 0
forward = pipe.model.forward
def new_forward(*args, **kwargs):
self.COUNT += 1
return forward(*args, **kwargs)
pipe.model.forward = new_forward
for out in pipe(audio, return_timestamps='char', chunk_length_s=3, stride_length_s=[1, 1], batch_size=1024):
pass
self.assertEqual(self.COUNT, 1) |
def changeBipartiteEgoTwoStar(mode, G, A, i):
return (changeStatisticsALAAM.changeTwoStar(G, A, i) if (G.bipartite_node_mode(i) == mode) else 0) |
def get_dataset(split_name='train', **kwargs):
datasets = [get_single_dataset(name='dicta_sign', **kwargs)]
all_data = list(chain.from_iterable([d.data for d in datasets]))
return PoseTextDataset(TextPoseDataset(all_data), split=split_name) |
def bold_extreme_values(data, data_max=(- 1), col_name=None):
(data, err) = data
if (data == err == 0.0):
return '---'
if (data == data_max):
bold = True
else:
bold = False
if ('QD score' in col_name):
if np.isnan(data):
data = np.nan
else:
data = int((data / 10000))
if np.isnan(err):
err = np.nan
else:
err = (err / 10000)
if any(((c in col_name) for c in ['archive size', 'QD score'])):
data = '{:,.0f}'.format(data)
elif ('diversity' in col_name[1]):
data = '{:.2f}'.format(data)
else:
data = '{:.1f}'.format(data)
print(col_name)
if ('maintained' in col_name[1]):
data = '{} \\%'.format(data)
if (False and np.any([('diversity' in c) for c in col_name])):
err = '{:.1e}'.format(err)
else:
err = '{:.1f}'.format(err)
if bold:
data = ((('\\textbf{' + str(data)) + '} ') + str(err))
else:
data = f'{data} {err}'
return data |
def mockingjay_100hr(refresh=False, *args, **kwargs):
return mockingjay_logMelBase_T_AdamW_b32_200k_100hr(*args, refresh=refresh, **kwargs) |
def test_validation(skip_remote, dataset):
if (dataset is None):
pytest.skip()
(missing_files, invalid_checksums) = dataset.validate(verbose=True)
assert (missing_files == {key: {} for key in dataset._index.keys() if (not (key == 'version'))})
assert (invalid_checksums == {key: {} for key in dataset._index.keys() if (not (key == 'version'))}) |
class ATAE_LSTM(nn.Module):
def __init__(self, embedding_matrix, opt):
super(ATAE_LSTM, self).__init__()
self.opt = opt
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.squeeze_embedding = SqueezeEmbedding()
self.lstm = DynamicLSTM((opt.embed_dim * 2), opt.hidden_dim, num_layers=1, batch_first=True)
self.attention = NoQueryAttention((opt.hidden_dim + opt.embed_dim), score_function='bi_linear')
self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
def forward(self, inputs):
(text_indices, aspect_indices) = (inputs[0], inputs[1])
x_len = torch.sum((text_indices != 0), dim=(- 1))
x_len_max = torch.max(x_len)
aspect_len = torch.sum((aspect_indices != 0), dim=(- 1)).float()
x = self.embed(text_indices)
x = self.squeeze_embedding(x, x_len)
aspect = self.embed(aspect_indices)
aspect_pool = torch.div(torch.sum(aspect, dim=1), aspect_len.unsqueeze(1))
aspect = aspect_pool.unsqueeze(1).expand((- 1), x_len_max, (- 1))
x = torch.cat((aspect, x), dim=(- 1))
(h, (_, _)) = self.lstm(x, x_len)
ha = torch.cat((h, aspect), dim=(- 1))
(_, score) = self.attention(ha)
output = torch.squeeze(torch.bmm(score, h), dim=1)
out = self.dense(output)
return out |
class PReLU_MobileNet(nn.Module):
cfg = [64, (128, 2), 128, (256, 2), 256, (512, 2), 512, 512, 512, 512, 512, (1024, 2), 1024]
def __init__(self, num_classes=10):
super(PReLU_MobileNet, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(32)
self.layers = self._make_layers(in_planes=32)
self.linear = nn.Linear(1024, num_classes)
self.prelu = nn.PReLU()
def _make_layers(self, in_planes):
layers = []
for x in self.cfg:
out_planes = (x if isinstance(x, int) else x[0])
stride = (1 if isinstance(x, int) else x[1])
layers.append(Block(in_planes, out_planes, stride))
in_planes = out_planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.prelu(self.bn1(self.conv1(x)))
out = self.layers(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), (- 1))
out = self.linear(out)
return out |
def main(command_line=0):
args = sys.argv[1:]
any_failures = 0
if command_line:
from .CmdLine import parse_command_line
(options, sources) = parse_command_line(args)
else:
options = CompilationOptions(default_options)
sources = args
if options.show_version:
sys.stderr.write(('Cython version %s\n' % version))
if (options.working_path != ''):
os.chdir(options.working_path)
try:
result = compile(sources, options)
if (result.num_errors > 0):
any_failures = 1
except (EnvironmentError, PyrexError) as e:
sys.stderr.write((str(e) + '\n'))
any_failures = 1
if any_failures:
sys.exit(1) |
def test_ricci_community_all_possible_clusterings():
G = nx.karate_club_graph()
for (n1, n2, d) in G.edges(data=True):
d.clear()
orc = OllivierRicci(G, exp_power=1, alpha=0.5)
orc.compute_ricci_flow(iterations=40)
cc = orc.ricci_community_all_possible_clusterings()
cuts = [x[0] for x in cc]
clusterings = [x[1] for x in cc]
cuts_ans = [1., 1., 1., 1., 1., 1., 1.]
clusterings_ans = [{0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 23: 2, 24: 2, 25: 2, 26: 2, 27: 2, 28: 2, 29: 2, 30: 2, 31: 2}, {0: 0, 1: 0, 2: 0, 3: 0, 7: 0, 9: 0, 11: 0, 12: 0, 13: 0, 17: 0, 19: 0, 21: 0, 4: 1, 5: 1, 6: 1, 10: 1, 16: 1, 32: 2, 33: 2, 8: 2, 14: 2, 15: 2, 18: 2, 20: 2, 22: 2, 30: 2, 23: 3, 24: 3, 25: 3, 26: 3, 27: 3, 28: 3, 29: 3, 31: 3}]
npt.assert_array_almost_equal(cuts, cuts_ans)
assert (clusterings == clusterings_ans) |
def rnn_helper(inp, length, cell_type=None, direction='forward', name=None, *args, **kwargs):
assert (cell_type is not None)
rnn_func = None
if (cell_type == 'lstm'):
rnn_func = lstm_layer
assert (rnn_func is not None)
assert (direction in ['forward', 'backward', 'bidirectional'])
with tf.variable_scope(name):
if (direction in ['forward', 'bidirectional']):
forward = rnn_func(*args, inp=inp, length=length, backward=False, name='forward', **kwargs)
if isinstance(forward, tuple):
forward = forward[0]
if (direction in ['backward', 'bidirectional']):
backward = rnn_func(*args, inp=inp, length=length, backward=True, name='backward', **kwargs)
if isinstance(backward, tuple):
backward = backward[0]
if (direction == 'forward'):
out = forward
elif (direction == 'backward'):
out = backward
else:
out = tf.concat(2, [forward, backward])
return out |
def register_Ns3AodvRoutingTableEntry_methods(root_module, cls):
cls.add_constructor([param('ns3::aodv::RoutingTableEntry const &', 'arg0')])
cls.add_constructor([param('ns3::Ptr< ns3::NetDevice >', 'dev', default_value='0'), param('ns3::Ipv4Address', 'dst', default_value='ns3::Ipv4Address()'), param('bool', 'vSeqNo', default_value='false'), param('uint32_t', 'seqNo', default_value='0'), param('ns3::Ipv4InterfaceAddress', 'iface', default_value='ns3::Ipv4InterfaceAddress()'), param('uint16_t', 'hops', default_value='0'), param('ns3::Ipv4Address', 'nextHop', default_value='ns3::Ipv4Address()'), param('ns3::Time', 'lifetime', default_value='ns3::Simulator::Now()')])
cls.add_method('DeleteAllPrecursors', 'void', [])
cls.add_method('DeletePrecursor', 'bool', [param('ns3::Ipv4Address', 'id')])
cls.add_method('GetBlacklistTimeout', 'ns3::Time', [], is_const=True)
cls.add_method('GetDestination', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetFlag', 'ns3::aodv::RouteFlags', [], is_const=True)
cls.add_method('GetHop', 'uint16_t', [], is_const=True)
cls.add_method('GetInterface', 'ns3::Ipv4InterfaceAddress', [], is_const=True)
cls.add_method('GetLifeTime', 'ns3::Time', [], is_const=True)
cls.add_method('GetNextHop', 'ns3::Ipv4Address', [], is_const=True)
cls.add_method('GetOutputDevice', 'ns3::Ptr< ns3::NetDevice >', [], is_const=True)
cls.add_method('GetPrecursors', 'void', [param('std::vector< ns3::Ipv4Address > &', 'prec')], is_const=True)
cls.add_method('GetRoute', 'ns3::Ptr< ns3::Ipv4Route >', [], is_const=True)
cls.add_method('GetRreqCnt', 'uint8_t', [], is_const=True)
cls.add_method('GetSeqNo', 'uint32_t', [], is_const=True)
cls.add_method('GetValidSeqNo', 'bool', [], is_const=True)
cls.add_method('IncrementRreqCnt', 'void', [])
cls.add_method('InsertPrecursor', 'bool', [param('ns3::Ipv4Address', 'id')])
cls.add_method('Invalidate', 'void', [param('ns3::Time', 'badLinkLifetime')])
cls.add_method('IsPrecursorListEmpty', 'bool', [], is_const=True)
cls.add_method('IsUnidirectional', 'bool', [], is_const=True)
cls.add_method('LookupPrecursor', 'bool', [param('ns3::Ipv4Address', 'id')])
cls.add_method('Print', 'void', [param('ns3::Ptr< ns3::OutputStreamWrapper >', 'stream')], is_const=True)
cls.add_method('SetBlacklistTimeout', 'void', [param('ns3::Time', 't')])
cls.add_method('SetFlag', 'void', [param('ns3::aodv::RouteFlags', 'flag')])
cls.add_method('SetHop', 'void', [param('uint16_t', 'hop')])
cls.add_method('SetInterface', 'void', [param('ns3::Ipv4InterfaceAddress', 'iface')])
cls.add_method('SetLifeTime', 'void', [param('ns3::Time', 'lt')])
cls.add_method('SetNextHop', 'void', [param('ns3::Ipv4Address', 'nextHop')])
cls.add_method('SetOutputDevice', 'void', [param('ns3::Ptr< ns3::NetDevice >', 'dev')])
cls.add_method('SetRoute', 'void', [param('ns3::Ptr< ns3::Ipv4Route >', 'r')])
cls.add_method('SetRreqCnt', 'void', [param('uint8_t', 'n')])
cls.add_method('SetSeqNo', 'void', [param('uint32_t', 'sn')])
cls.add_method('SetUnidirectional', 'void', [param('bool', 'u')])
cls.add_method('SetValidSeqNo', 'void', [param('bool', 's')])
cls.add_instance_attribute('m_ackTimer', 'ns3::Timer', is_const=False)
return |
def find_backward_implementation(forward_sdfg: SDFG, forward_state: SDFGState, node: nd.Node) -> typing.Optional[BackwardImplementation]:
valid_impls = []
for (impl, args) in BackwardImplementation.extensions().items():
if ('name' not in args):
raise ValueError(f'Expected name in arguments of implementation {impl}.')
if ((('node_type' in args) and isinstance(node, args['node_type'])) or (isinstance(node, ONNXOp) and ('op' in args) and (node.schema.name == args['op']))):
if impl.backward_can_be_applied(node, forward_state, forward_sdfg):
valid_impls.append((args['name'], impl))
if (isinstance(node, ONNXOp) and node.backward_implementation):
implementation = node.backward_implementation
elif (isinstance(node, ONNXOp) and node.default_backward_implementation):
implementation = node.default_backward_implementation
else:
implementation = None
if implementation:
filtered_impls = [i for (name, i) in valid_impls if (name == implementation)]
if filtered_impls:
return filtered_impls[0]
log.warning(f'Set backward_implementation {node.backward_implementation} on {node}, but it could not be applied. Falling back to default selection.')
if valid_impls:
return valid_impls[0][1]
else:
return None |
def generate_ccp_dataset(args):
args.data_root = Path(args.data_root)
args.img_root = (args.data_root / 'photos')
args.pix_ann_root = ((args.data_root / 'annotations') / 'pixel-level')
args.img_ann_root = ((args.data_root / 'annotations') / 'image-level')
args.pix_ann_ids = get_ann_ids(args.pix_ann_root)
args.img_ann_ids = get_ann_ids(args.img_ann_root)
args.label_list = sio.loadmat(str((args.data_root / 'label_list.mat')))['label_list'].squeeze()
args.save_root = Path(args.save_root)
args.save_root.mkdir()
generate_ccp_dataset_train(args, 'A', args.cat1)
generate_ccp_dataset_train(args, 'B', args.cat2)
generate_ccp_dataset_val(args, 'A', args.cat1)
generate_ccp_dataset_val(args, 'B', args.cat2) |
def test_update_user(testdir):
testdir.make_petstore_test('\(method="PUT", endpoint="/user/{username}$")\(max_examples=5, deadline=None)\ndef test_(request, case):\n request.config.HYPOTHESIS_CASES += 1\n assert_str(case.path_parameters["username"])\n assert isinstance(case.body, dict)\n assert_requests_call(case)\n')
testdir.assert_petstore() |
class ZeroLayer(MyModule):
def __init__(self, stride):
super(ZeroLayer, self).__init__()
self.stride = stride
def forward(self, x):
raise ValueError
def module_str(self):
return 'Zero'
def config(self):
return {'name': ZeroLayer.__name__, 'stride': self.stride}
def build_from_config(config):
return ZeroLayer(**config) |
((device_cc() < 80), 'Device compute capability is insufficient for SM80 tests.')
class Conv2dWgradImplicitGemmF16nhwcF16nhwcF32nhwcTensorOpF32SM80(unittest.TestCase):
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(instruction_shape=[16, 8, 8], element_a=cutlass.float16, element_b=cutlass.float16, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add)
A = TensorDescription(element=math_inst.element_a, layout=cutlass.TensorNHWC, alignment=8)
B = TensorDescription(element=math_inst.element_b, layout=cutlass.TensorNHWC, alignment=8)
C = TensorDescription(element=cutlass.float32, layout=cutlass.TensorNHWC, alignment=4)
tile_description = TileDescription(threadblock_shape=[128, 128, 16], stages=3, warp_count=[2, 2, 1], math_instruction=math_inst)
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic, arch=80, tile_description=tile_description, A=A, B=B, C=C, stride_support=StrideSupport.Strided, epilogue_functor=epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32(self):
math_inst = MathInstruction(instruction_shape=[16, 8, 8], element_a=cutlass.float16, element_b=cutlass.float16, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add)
A = TensorDescription(element=math_inst.element_a, layout=cutlass.TensorNHWC, alignment=8)
B = TensorDescription(element=math_inst.element_b, layout=cutlass.TensorNHWC, alignment=8)
C = TensorDescription(element=cutlass.float32, layout=cutlass.TensorNHWC, alignment=4)
tile_description = TileDescription(threadblock_shape=[128, 128, 16], stages=3, warp_count=[2, 2, 1], math_instruction=math_inst)
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized, arch=80, tile_description=tile_description, A=A, B=B, C=C, stride_support=StrideSupport.Strided, epilogue_functor=epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1)
self.assertTrue(test_all_conv2d(operation))
def test_SM80_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_64x256_32x4_64x64x32(self):
math_inst = MathInstruction(instruction_shape=[16, 8, 16], element_a=cutlass.float16, element_b=cutlass.float16, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add)
A = TensorDescription(element=math_inst.element_a, layout=cutlass.TensorNHWC, alignment=8)
B = TensorDescription(element=math_inst.element_b, layout=cutlass.TensorNHWC, alignment=8)
C = TensorDescription(element=cutlass.float32, layout=cutlass.TensorNHWC, alignment=4)
tile_description = TileDescription(threadblock_shape=[64, 256, 32], stages=3, warp_count=[1, 4, 1], math_instruction=math_inst)
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized, arch=80, tile_description=tile_description, A=A, B=B, C=C, stride_support=StrideSupport.Strided, epilogue_functor=epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1)
self.assertTrue(test_all_conv2d(operation))
def test_Device_Conv2d_Wgrad_Analytic_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
math_inst = MathInstruction(instruction_shape=[16, 8, 8], element_a=cutlass.float16, element_b=cutlass.float16, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add)
A = TensorDescription(element=math_inst.element_a, layout=cutlass.TensorNHWC, alignment=4)
B = TensorDescription(element=math_inst.element_b, layout=cutlass.TensorNHWC, alignment=4)
C = TensorDescription(element=cutlass.float32, layout=cutlass.TensorNHWC, alignment=4)
tile_description = TileDescription(threadblock_shape=[128, 128, 16], stages=3, warp_count=[2, 2, 1], math_instruction=math_inst)
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.analytic, arch=80, tile_description=tile_description, A=A, B=B, C=C, stride_support=StrideSupport.Strided, epilogue_functor=epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1)
problem_sizes = [cutlass.conv.Conv2dProblemSize(cutlass.Tensor4DCoord(1, 4, 4, 12), cutlass.Tensor4DCoord(8, 3, 3, 12), cutlass.Tensor4DCoord(0, 0, 0, 0), cutlass.MatrixCoord(3, 3), cutlass.MatrixCoord(1, 1), cutlass.conv.Mode.cross_correlation, 1, 1)]
self.assertTrue(test_all_conv2d(operation, problem_sizes))
def test_Device_Conv2d_Wgrad_Optimized_ImplicitGemm_f16nhwc_f16nhwc_f32nhwc_tensor_op_f32_align4(self):
math_inst = MathInstruction(instruction_shape=[16, 8, 8], element_a=cutlass.float16, element_b=cutlass.float16, element_accumulator=cutlass.float32, opcode_class=cutlass.OpClass.TensorOp, math_operation=MathOperation.multiply_add)
A = TensorDescription(element=math_inst.element_a, layout=cutlass.TensorNHWC, alignment=4)
B = TensorDescription(element=math_inst.element_b, layout=cutlass.TensorNHWC, alignment=4)
C = TensorDescription(element=cutlass.float32, layout=cutlass.TensorNHWC, alignment=4)
tile_description = TileDescription(threadblock_shape=[128, 128, 16], stages=3, warp_count=[2, 2, 1], math_instruction=math_inst)
epilogue_functor = LinearCombination(C.element, C.alignment, math_inst.element_accumulator, cutlass.float32)
operation = Conv2dOperation(conv_kind=cutlass.conv.Operator.wgrad, iterator_algorithm=cutlass.conv.IteratorAlgorithm.optimized, arch=80, tile_description=tile_description, A=A, B=B, C=C, stride_support=StrideSupport.Strided, epilogue_functor=epilogue_functor, swizzling_functor=cutlass.IdentitySwizzle1)
problem_sizes = [cutlass.conv.Conv2dProblemSize(cutlass.Tensor4DCoord(1, 4, 4, 12), cutlass.Tensor4DCoord(8, 3, 3, 12), cutlass.Tensor4DCoord(0, 0, 0, 0), cutlass.MatrixCoord(3, 3), cutlass.MatrixCoord(1, 1), cutlass.conv.Mode.cross_correlation, 1, 1)]
self.assertTrue(test_all_conv2d(operation, problem_sizes)) |
class TransformerEncoderLayerImproved(Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation='relu', d_global2=None):
super(TransformerEncoderLayerImproved, self).__init__()
self.self_attn = MultiheadAttention(d_model, nhead, dropout=dropout)
if (d_global2 is not None):
self.linear_global2 = Linear(d_global2, d_model)
self.linear1 = Linear(d_model, dim_feedforward)
self.dropout = Dropout(dropout)
self.linear2 = Linear(dim_feedforward, d_model)
self.norm1 = LayerNorm(d_model)
self.norm2 = LayerNorm(d_model)
self.dropout1 = Dropout(dropout)
self.dropout2_2 = Dropout(dropout)
self.dropout2 = Dropout(dropout)
self.activation = _get_activation_fn(activation)
def __setstate__(self, state):
if ('activation' not in state):
state['activation'] = F.relu
super(TransformerEncoderLayerImproved, self).__setstate__(state)
def forward(self, src, memory2=None, src_mask=None, src_key_padding_mask=None):
src1 = self.norm1(src)
src2 = self.self_attn(src1, src1, src1, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = (src + self.dropout1(src2))
if (memory2 is not None):
src2_2 = self.linear_global2(memory2)
src = (src + self.dropout2_2(src2_2))
src1 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src1))))
src = (src + self.dropout2(src2))
return src |
def test_java_options_default_empty():
parser = _get_command_line_parser(['valid-detector'], [], [])
result = parser.parse_args(['run', 'ex1', 'valid-detector'])
assert_equals([], result.java_options) |
class PositionwiseFeedForward(nn.Module):
def __init__(self, d_model: int, d_ff: Optional[int]=128):
super().__init__()
self._linear1 = nn.Linear(d_model, d_ff)
self._linear2 = nn.Linear(d_ff, d_model)
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self._linear2(F.relu(self._linear1(x))) |
def test_ListArray_nbytes():
np_starts = np.array([4, 100, 1])
np_stops = np.array([7, 100, 3, 200])
np_content = np.array([6.6, 4.4, 5.5, 7.7, 3.3, 2.2, 1.1, 8.8])
array = ak.contents.listarray.ListArray(ak.index.Index(np_starts), ak.index.Index(np_stops), ak.contents.numpyarray.NumpyArray(np_content))
assert (array.nbytes == ((np_starts.nbytes + np_stops.nbytes) + np_content.nbytes)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.