code stringlengths 101 5.91M |
|---|
class LastLevelP6P7(nn.Module):
def __init__(self, in_channels, out_channels):
super(LastLevelP6P7, self).__init__()
self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1)
self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1)
for module in [self.p6, self.p7]:
nn.init.kaiming_uniform_(module.weight, a=1)
nn.init.constant_(module.bias, 0)
self.use_P5 = (in_channels == out_channels)
def forward(self, c5, p5):
x = (p5 if self.use_P5 else c5)
p6 = self.p6(x)
p7 = self.p7(F.relu(p6))
return [p6, p7] |
class ResNetForImageClassification(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def build_java_base_graphs():
definitions = pickle.load(open(JAVA_BASE, 'rb'))
if (not os.path.exists(JAVA_BASE_DIR)):
os.makedirs(JAVA_BASE_DIR)
results = parallel_process(definitions, build_single_graph, args=(JAVA_BASE_DIR,))
succeed = 0
for result in results:
if result:
succeed += 1
print(('%s built %d graphs totally' % (type, succeed))) |
class TestExpmActionInterval():
def test_sparse_expm_multiply_interval(self):
np.random.seed(1234)
start = 0.1
stop = 3.2
n = 40
k = 3
endpoint = True
for num in (14, 13, 2):
A = scipy.sparse.rand(n, n, density=0.05)
B = np.random.randn(n, k)
v = np.random.randn(n)
for target in (B, v):
X = expm_multiply(A, target, start=start, stop=stop, num=num, endpoint=endpoint)
samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint)
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning, 'splu converted its input to CSC format')
sup.filter(SparseEfficiencyWarning, 'spsolve is more efficient when sparse b is in the CSC matrix format')
for (solution, t) in zip(X, samples):
assert_allclose(solution, sp_expm((t * A)).dot(target))
def test_expm_multiply_interval_vector(self):
np.random.seed(1234)
interval = {'start': 0.1, 'stop': 3.2, 'endpoint': True}
for (num, n) in product([14, 13, 2], [1, 2, 5, 20, 40]):
A = scipy.linalg.inv(np.random.randn(n, n))
v = np.random.randn(n)
samples = np.linspace(num=num, **interval)
X = expm_multiply(A, v, num=num, **interval)
for (solution, t) in zip(X, samples):
assert_allclose(solution, sp_expm((t * A)).dot(v))
Xguess = estimated(expm_multiply)(aslinearoperator(A), v, num=num, **interval)
Xgiven = expm_multiply(aslinearoperator(A), v, num=num, **interval, traceA=np.trace(A))
Xwrong = expm_multiply(aslinearoperator(A), v, num=num, **interval, traceA=(np.trace(A) * 5))
for (sol_guess, sol_given, sol_wrong, t) in zip(Xguess, Xgiven, Xwrong, samples):
correct = sp_expm((t * A)).dot(v)
assert_allclose(sol_guess, correct)
assert_allclose(sol_given, correct)
assert_allclose(sol_wrong, correct)
def test_expm_multiply_interval_matrix(self):
np.random.seed(1234)
interval = {'start': 0.1, 'stop': 3.2, 'endpoint': True}
for (num, n, k) in product([14, 13, 2], [1, 2, 5, 20, 40], [1, 2]):
A = scipy.linalg.inv(np.random.randn(n, n))
B = np.random.randn(n, k)
samples = np.linspace(num=num, **interval)
X = expm_multiply(A, B, num=num, **interval)
for (solution, t) in zip(X, samples):
assert_allclose(solution, sp_expm((t * A)).dot(B))
X = estimated(expm_multiply)(aslinearoperator(A), B, num=num, **interval)
for (solution, t) in zip(X, samples):
assert_allclose(solution, sp_expm((t * A)).dot(B))
def test_sparse_expm_multiply_interval_dtypes(self):
A = scipy.sparse.diags(np.arange(5), format='csr', dtype=int)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)), format='csr')
assert_allclose(expm_multiply(A, B, 0, 1)[(- 1)], Aexpm.dot(B))
A = scipy.sparse.diags(((- 1j) * np.arange(5)), format='csr', dtype=complex)
B = np.ones(5, dtype=int)
Aexpm = scipy.sparse.diags(np.exp(((- 1j) * np.arange(5))), format='csr')
assert_allclose(expm_multiply(A, B, 0, 1)[(- 1)], Aexpm.dot(B))
A = scipy.sparse.diags(np.arange(5), format='csr', dtype=int)
B = np.full(5, 1j, dtype=complex)
Aexpm = scipy.sparse.diags(np.exp(np.arange(5)), format='csr')
assert_allclose(expm_multiply(A, B, 0, 1)[(- 1)], Aexpm.dot(B))
def test_expm_multiply_interval_status_0(self):
self._help_test_specific_expm_interval_status(0)
def test_expm_multiply_interval_status_1(self):
self._help_test_specific_expm_interval_status(1)
def test_expm_multiply_interval_status_2(self):
self._help_test_specific_expm_interval_status(2)
def _help_test_specific_expm_interval_status(self, target_status):
np.random.seed(1234)
start = 0.1
stop = 3.2
num = 13
endpoint = True
n = 5
k = 2
nrepeats = 10
nsuccesses = 0
for num in ([14, 13, 2] * nrepeats):
A = np.random.randn(n, n)
B = np.random.randn(n, k)
status = _expm_multiply_interval(A, B, start=start, stop=stop, num=num, endpoint=endpoint, status_only=True)
if (status == target_status):
(X, status) = _expm_multiply_interval(A, B, start=start, stop=stop, num=num, endpoint=endpoint, status_only=False)
assert_equal(X.shape, (num, n, k))
samples = np.linspace(start=start, stop=stop, num=num, endpoint=endpoint)
for (solution, t) in zip(X, samples):
assert_allclose(solution, sp_expm((t * A)).dot(B))
nsuccesses += 1
if (not nsuccesses):
msg = (('failed to find a status-' + str(target_status)) + ' interval')
raise Exception(msg) |
class CovarGMM():
def __init__(self, mins, maxs, seed=None, params=dict()):
self.seed = seed
if (not seed):
self.seed = np.random.randint(42, 424242)
np.random.seed(self.seed)
self.mins = np.array(mins)
self.maxs = np.array(maxs)
self.potential_ks = (np.arange(2, 11, 1) if ('potential_ks' not in params) else params['potential_ks'])
self.random_task_ratio = (0.2 if ('random_task_ratio' not in params) else params['random_task_ratio'])
self.random_task_generator = Box(self.mins, self.maxs, dtype=np.float32)
self.fit_rate = (250 if ('fit_rate' not in params) else params['fit_rate'])
self.nb_random = self.fit_rate
self.absolute_lp = (False if ('absolute_lp' not in params) else params['absolute_lp'])
self.tasks = []
self.tasks_times_reward_weights = []
self.all_times = np.arange(0, 1, (1 / self.fit_rate))
self.bk = {'weights': [], 'covariances': [], 'means': [], 'tasks_lps': [], 'episodes': []}
def update(self, task, reward):
current_time = self.all_times[(len(self.tasks) % self.fit_rate)]
self.tasks.append(task)
self.tasks_times_reward_weights.append(np.array(((task.tolist() + [current_time]) + [reward])))
if (len(self.tasks) >= self.nb_random):
if ((len(self.tasks) % self.fit_rate) == 0):
cur_tasks_times_reward_weights = np.array(self.tasks_times_reward_weights[(- self.fit_rate):])
potential_gmms = [GMM(n_components=k, covariance_type='full') for k in self.potential_ks]
potential_gmms = [g.fit(cur_tasks_times_reward_weights) for g in potential_gmms]
aics = [m.aic(cur_tasks_times_reward_weights) for m in potential_gmms]
self.gmm = potential_gmms[np.argmin(aics)]
self.bk['weights'].append(self.gmm.weights_.copy())
self.bk['covariances'].append(self.gmm.covariances_.copy())
self.bk['means'].append(self.gmm.means_.copy())
self.bk['tasks_lps'] = self.tasks_times_reward_weights
self.bk['episodes'].append(len(self.tasks))
def sample_task(self):
if ((len(self.tasks) < self.nb_random) or (np.random.random() < self.random_task_ratio)):
new_task = self.random_task_generator.sample()
else:
self.times_reward_weights_covars = []
for (pos, covar, w) in zip(self.gmm.means_, self.gmm.covariances_, self.gmm.weights_):
if self.absolute_lp:
self.times_reward_weights_covars.append(np.abs(covar[((- 2), (- 1))]))
else:
self.times_reward_weights_covars.append(max(0, covar[((- 2), (- 1))]))
idx = proportional_choice(self.times_reward_weights_covars, eps=0.0)
new_task = np.random.multivariate_normal(self.gmm.means_[idx], self.gmm.covariances_[idx])[:(- 2)]
new_task = np.clip(new_task, self.mins, self.maxs).astype(np.float32)
return new_task
def dump(self, dump_dict):
dump_dict.update(self.bk)
return dump_dict |
def register_Ns3MgtProbeResponseHeader_methods(root_module, cls):
cls.add_constructor([param('ns3::MgtProbeResponseHeader const &', 'arg0')])
cls.add_constructor([])
cls.add_method('Deserialize', 'uint32_t', [param('ns3::Buffer::Iterator', 'start')], is_virtual=True)
cls.add_method('GetBeaconIntervalUs', 'uint64_t', [], is_const=True)
cls.add_method('GetCapabilities', 'ns3::CapabilityInformation', [], is_const=True)
cls.add_method('GetDsssParameterSet', 'ns3::DsssParameterSet', [], is_const=True)
cls.add_method('GetEdcaParameterSet', 'ns3::EdcaParameterSet', [], is_const=True)
cls.add_method('GetErpInformation', 'ns3::ErpInformation', [], is_const=True)
cls.add_method('GetHeCapabilities', 'ns3::HeCapabilities', [], is_const=True)
cls.add_method('GetHtCapabilities', 'ns3::HtCapabilities', [], is_const=True)
cls.add_method('GetHtOperation', 'ns3::HtOperation', [], is_const=True)
cls.add_method('GetInstanceTypeId', 'ns3::TypeId', [], is_const=True, is_virtual=True)
cls.add_method('GetSerializedSize', 'uint32_t', [], is_const=True, is_virtual=True)
cls.add_method('GetSsid', 'ns3::Ssid', [], is_const=True)
cls.add_method('GetSupportedRates', 'ns3::SupportedRates', [], is_const=True)
cls.add_method('GetTimestamp', 'uint64_t', [])
cls.add_method('GetTypeId', 'ns3::TypeId', [], is_static=True)
cls.add_method('GetVhtCapabilities', 'ns3::VhtCapabilities', [], is_const=True)
cls.add_method('GetVhtOperation', 'ns3::VhtOperation', [], is_const=True)
cls.add_method('Print', 'void', [param('std::ostream &', 'os')], is_const=True, is_virtual=True)
cls.add_method('Serialize', 'void', [param('ns3::Buffer::Iterator', 'start')], is_const=True, is_virtual=True)
cls.add_method('SetBeaconIntervalUs', 'void', [param('uint64_t', 'us')])
cls.add_method('SetCapabilities', 'void', [param('ns3::CapabilityInformation', 'capabilities')])
cls.add_method('SetDsssParameterSet', 'void', [param('ns3::DsssParameterSet', 'dsssParameterSet')])
cls.add_method('SetEdcaParameterSet', 'void', [param('ns3::EdcaParameterSet', 'edcaParameterSet')])
cls.add_method('SetErpInformation', 'void', [param('ns3::ErpInformation', 'erpInformation')])
cls.add_method('SetHeCapabilities', 'void', [param('ns3::HeCapabilities', 'hecapabilities')])
cls.add_method('SetHtCapabilities', 'void', [param('ns3::HtCapabilities', 'htcapabilities')])
cls.add_method('SetHtOperation', 'void', [param('ns3::HtOperation', 'htoperation')])
cls.add_method('SetSsid', 'void', [param('ns3::Ssid', 'ssid')])
cls.add_method('SetSupportedRates', 'void', [param('ns3::SupportedRates', 'rates')])
cls.add_method('SetVhtCapabilities', 'void', [param('ns3::VhtCapabilities', 'vhtcapabilities')])
cls.add_method('SetVhtOperation', 'void', [param('ns3::VhtOperation', 'vhtoperation')])
return |
class TuneReportHook(EvalHook):
def __init__(self, eval_period, eval_function):
super().__init__(eval_period, eval_function)
self.step = 0
def _do_eval(self):
results = self._func()
if results:
assert isinstance(results, dict), 'Eval function must return a dict. Got {} instead.'.format(results)
flattened_results = flatten_results_dict(results)
for (k, v) in flattened_results.items():
try:
v = float(v)
except Exception:
raise ValueError("[EvalHook] eval_function should return a nested dict of float. Got '{}: {}' instead.".format(k, v))
torch.cuda.empty_cache()
self.step += 1
with tune.checkpoint_dir(step=self.step) as checkpoint_dir:
additional_state = {'epoch': int(self.trainer.epoch)}
self.trainer.checkpointer.save_dir = checkpoint_dir
self.trainer.checkpointer.save(name='checkpoint', **additional_state)
metrics = dict(r1=results['Rank-1'], map=results['mAP'], score=((results['Rank-1'] + results['mAP']) / 2))
tune.report(**metrics) |
def ensure_same_backend(*layouts: Any, default_backend: (str | Backend)='cpu') -> list[Any]:
backends: set[Backend] = {layout.backend for layout in layouts if hasattr(layout, 'backend')}
backend: Backend
if (len(backends) >= 1):
backend = common_backend(backends)
else:
backend = regularize_backend(default_backend)
return [(layout.to_backend(backend) if hasattr(layout, 'to_backend') else layout) for layout in layouts] |
def get_config():
config = ml_collections.ConfigDict()
config.learning_rate = 0.01
config.momentum = 0.9
config.batch_size = 128
config.num_epochs = 10
config.rounds_to_train = 3
return config |
class LanguageDecoder(nn.Module):
def __init__(self, in_dim, out_dim, **kwargs):
super().__init__()
self.language_lstm = nn.LSTMCell((in_dim + kwargs['hidden_dim']), kwargs['hidden_dim'], bias=True)
self.fc = weight_norm(nn.Linear(kwargs['hidden_dim'], out_dim))
self.dropout = nn.Dropout(p=kwargs['dropout'])
self.init_weights(kwargs['fc_bias_init'])
def init_weights(self, fc_bias_init):
self.fc.bias.data.fill_(fc_bias_init)
self.fc.weight.data.uniform_((- 0.1), 0.1)
def forward(self, weighted_attn):
state = registry.get(f'{weighted_attn.device}_lstm_state')
(h1, c1) = state['td_hidden']
(h2, c2) = state['lm_hidden']
(h2, c2) = self.language_lstm(torch.cat([weighted_attn, h1], dim=1), (h2, c2))
predictions = self.fc(self.dropout(h2))
state['lm_hidden'] = (h2, c2)
return predictions |
def writeJSONLine(data, path):
with open(path, 'w') as f:
for i in data:
f.write(('%s\n' % json.dumps(i)))
return None |
_utils.test(arch=get_host_arch_list())
def test_offset_must_throw_vector():
with pytest.raises(ti.TaichiCompilationError, match='The dimensionality of shape and offset must be the same'):
a = ti.Vector.field(3, dtype=ti.f32, shape=3, offset=(3, 4))
with pytest.raises(ti.TaichiCompilationError, match='shape cannot be None when offset is set'):
b = ti.Vector.field(3, dtype=ti.f32, shape=None, offset=(3,)) |
def subprocess_fn(rank, args, temp_dir):
dnnlib.util.Logger(should_flush=True)
if (args.num_gpus > 1):
init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
if (os.name == 'nt'):
init_method = ('file:///' + init_file.replace('\\', '/'))
torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
else:
init_method = f'file://{init_file}'
torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
sync_device = (torch.device('cuda', rank) if (args.num_gpus > 1) else None)
training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
if ((rank != 0) or (not args.verbose)):
custom_ops.verbosity = 'none'
device = torch.device('cuda', rank)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
G = copy.deepcopy(args.G).eval().requires_grad_(False).to(device)
with torch.no_grad():
from training.networks import Generator
G2 = Generator(*G.init_args, **G.init_kwargs).to(device)
misc.copy_params_and_buffers(G, G2, require_all=False)
G = G2
if ((rank == 0) and args.verbose):
z = torch.empty([1, G.z_dim], device=device)
c = torch.empty([1, G.c_dim], device=device)
misc.print_module_summary(G, [z, c])
for metric in args.metrics:
if ((rank == 0) and args.verbose):
print(f'Calculating {metric}...')
progress = metric_utils.ProgressMonitor(verbose=args.verbose)
result_dict = metric_main.calc_metric(metric=metric, G=G, dataset_kwargs=args.dataset_kwargs, num_gpus=args.num_gpus, rank=rank, device=device, progress=progress)
if (rank == 0):
metric_main.report_metric(result_dict, run_dir=args.run_dir, snapshot_pkl=args.network_pkl)
if ((rank == 0) and args.verbose):
print()
if ((rank == 0) and args.verbose):
print('Exiting...') |
def collect_files(patterns):
files = []
for (root, spec) in patterns:
if spec.endswith('.sbd'):
contracts = []
for sbdfile in glob.glob(spec, recursive=True):
contracts.extend(sb.io.read_lines(sbdfile))
elif root:
try:
contracts = glob.glob(spec, root_dir=root, recursive=True)
except TypeError:
raise sb.errors.SmartBugsError(f'{root}:{spec}: colons in file patterns only supported for Python>=3.10')
else:
contracts = glob.glob(spec, recursive=True)
for relfn in contracts:
root_relfn = (os.path.join(root, relfn) if root else relfn)
absfn = os.path.normpath(os.path.abspath(root_relfn))
if (os.path.isfile(absfn) and (absfn[(- 4):] in ('.hex', '.sol'))):
files.append((absfn, relfn))
return files |
_if_32bit
.parametrize('csr_container', CSR_CONTAINERS)
.parametrize('kernel', ['linear', 'poly', 'rbf'])
def test_svc_iris(csr_container, kernel):
iris_data_sp = csr_container(iris.data)
sp_clf = svm.SVC(kernel=kernel).fit(iris_data_sp, iris.target)
clf = svm.SVC(kernel=kernel).fit(iris.data, iris.target)
assert_allclose(clf.support_vectors_, sp_clf.support_vectors_.toarray())
assert_allclose(clf.dual_coef_, sp_clf.dual_coef_.toarray())
assert_allclose(clf.predict(iris.data), sp_clf.predict(iris_data_sp))
if (kernel == 'linear'):
assert_allclose(clf.coef_, sp_clf.coef_.toarray()) |
class SNGAN(nn.Module):
def __init__(self, gen_cfg: DictConfig, disc_cfg: DictConfig, *args, **kwargs):
super().__init__()
self.generator = hydra.utils.instantiate(gen_cfg)
self.discriminator = hydra.utils.instantiate(disc_cfg)
def gen_backward(self, batch_size):
fake_samples = self.generator.sample(batch_size)
fake_logits = self.discriminator(fake_samples)
loss = (- torch.mean(fake_logits))
loss.backward()
return (loss, fake_samples)
def disc_backward(self, real_samples):
batch_size = real_samples.size(0)
with torch.no_grad():
fake_samples = self.generator.sample(batch_size)
real_logits = self.discriminator(real_samples)
fake_logits = self.discriminator(fake_samples)
loss = (F.relu((1 - real_logits)).mean() + F.relu((1 + fake_logits)).mean())
loss.backward()
return loss |
.integtest
def test_manual_pipeline(sampled_app_train_test, sampled_app_roles, binary_task):
(train, test) = sampled_app_train_test
pd_dataset = PandasDataset(train, roles_parser(sampled_app_roles), task=binary_task)
selector_iterator = FoldsIterator(pd_dataset, 1)
pipe = LGBSimpleFeatures()
model0 = BoostLGBM(default_params={'learning_rate': 0.05, 'num_leaves': 64, 'seed': 0, 'num_threads': 5})
mbie = ModelBasedImportanceEstimator()
selector = ImportanceCutoffSelector(pipe, model0, mbie, cutoff=10)
selector.fit(selector_iterator)
pipe = LGBSimpleFeatures()
params_tuner1 = OptunaTuner(n_trials=10, timeout=300)
model1 = BoostLGBM(default_params={'learning_rate': 0.05, 'num_leaves': 128})
params_tuner2 = OptunaTuner(n_trials=100, timeout=300)
model2 = BoostLGBM(default_params={'learning_rate': 0.025, 'num_leaves': 64})
total = MLPipeline([(model1, params_tuner1), (model2, params_tuner2)], pre_selection=selector, features_pipeline=pipe, post_selection=None)
train_valid = FoldsIterator(pd_dataset)
total.fit_predict(train_valid)
total.predict(pd_dataset)
with open('automl.pickle', 'wb') as f:
pickle.dump(total, f)
with open('automl.pickle', 'rb') as f:
total = pickle.load(f)
total.predict(pd_dataset)
os.remove('automl.pickle') |
def match_target_hypo(args, target_outfile, hypo_outfile):
if (len(args.weight1) == 1):
res = score_target_hypo(args, args.weight1[0], args.weight2[0], args.weight3[0], args.lenpen[0], target_outfile, hypo_outfile, True, args.normalize)
rerank_scores = [res]
else:
print('launching pool')
with Pool(32) as p:
rerank_scores = p.starmap(score_target_hypo, [(args, args.weight1[i], args.weight2[i], args.weight3[i], args.lenpen[i], target_outfile, hypo_outfile, False, args.normalize) for i in range(len(args.weight1))])
if (len(rerank_scores) > 1):
best_index = np.argmax(rerank_scores)
best_score = rerank_scores[best_index]
print('best score', best_score)
print('best lenpen', args.lenpen[best_index])
print('best weight1', args.weight1[best_index])
print('best weight2', args.weight2[best_index])
print('best weight3', args.weight3[best_index])
return (args.lenpen[best_index], args.weight1[best_index], args.weight2[best_index], args.weight3[best_index], best_score)
else:
return (args.lenpen[0], args.weight1[0], args.weight2[0], args.weight3[0], rerank_scores[0]) |
def sec_to_frame(seconds):
samples = (seconds * global_fs)
frame_idx = (samples // hopSize).astype(int)
return frame_idx |
class Trainer(DefaultTrainer):
def __init__(self, cfg):
super().__init__(cfg)
self.checkpointer = DetectionCheckpointer(self.model, cfg.OUTPUT_DIR, optimizer=self.optimizer, scheduler=self.scheduler)
def build_evaluator(cls, cfg, dataset_name, output_folder=None):
if (output_folder is None):
output_folder = os.path.join(cfg.OUTPUT_DIR, 'inference')
evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
if (evaluator_type == 'vcoco'):
return VCOCOEvaluator(dataset_name, cfg, True, output_folder)
elif (evaluator_type == 'hico-det'):
return HICOEvaluator(dataset_name, cfg, True, output_folder)
else:
raise NotImplementedError('no Evaluator for the dataset {} with the type {}'.format(dataset_name, evaluator_type))
def test_with_TTA(cls, cfg, model):
logger = logging.getLogger('detectron2.trainer')
logger.info('Running inference with test-time augmentation ...')
model = GeneralizedRCNNWithTTA(cfg, model)
evaluators = [cls.build_evaluator(cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, 'inference_TTA')) for name in cfg.DATASETS.TEST]
res = cls.test(cfg, model, evaluators)
res = OrderedDict({(k + '_TTA'): v for (k, v) in res.items()})
return res
def build_train_loader(cls, cfg):
return build_hoi_train_loader(cfg)
def build_test_loader(cls, cfg, dataset_name):
return build_hoi_test_loader(cfg, dataset_name) |
def evaluate_em(model, dataset, tokenizer, collator, opt):
sampler = SequentialSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=opt.per_gpu_batch_size, drop_last=False, num_workers=10, collate_fn=collator)
model.eval()
total = 0
exactmatch = []
model = (model.module if hasattr(model, 'module') else model)
with torch.no_grad():
for (i, batch) in enumerate(dataloader):
(idx, _, _, context_ids, context_mask) = batch
outputs = model.generate(input_ids=context_ids.cuda(), attention_mask=context_mask.cuda(), max_length=50)
for (k, o) in enumerate(outputs):
ans = tokenizer.decode(o, skip_special_tokens=True)
gold = dataset.get_example(idx[k])['answers']
score = src.evaluation.ems(ans, gold)
total += 1
exactmatch.append(score)
(exactmatch, total) = src.util.weighted_average(np.mean(exactmatch), total, opt)
return exactmatch |
class TestQuantizeFx(QuantizationTestCase):
def _get_conv_linear_test_cases(self):
class Conv(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
self.stride = (1, 1)
self.padding = (0, 0)
self.dilation = (1, 1)
self.groups = 1
def forward(self, x):
return F.conv2d(x, self.weight, None, self.stride, self.padding, self.dilation, self.groups)
conv_input = torch.rand(1, 3, 224, 224)
conv_weight = torch.rand(3, 3, 3, 3)
class Linear(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
return F.linear(x, self.weight)
linear_input = torch.rand(8, 5)
linear_weight = torch.rand(10, 5)
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
linear_module_input = torch.rand(8, 5)
tests = [(False, Conv, (conv_weight,), (conv_input,), ns.call_function(torch.ops.quantized.conv2d), ns.call_function(torch.ops.quantized.conv2d_prepack)), (True, Linear, (linear_weight,), (linear_input,), ns.call_function(torch.ops.quantized.linear_dynamic), ns.call_function(torch.ops.quantized.linear_prepack)), (False, Linear, (linear_weight,), (linear_input,), ns.call_function(torch.ops.quantized.linear), ns.call_function(torch.ops.quantized.linear_prepack)), (True, LinearModule, (), (linear_module_input,), ns.call_module(nnqd.Linear), None), (False, LinearModule, (), (linear_module_input,), ns.call_module(nnq.Linear), None)]
return tests
'\n Unit tests for functionalities\n '
def test_functional_no_debug(self):
tests = self._get_conv_linear_test_cases()
for (is_dynamic, ModuleClass, module_constructor_inputs, inputs, quantized_node, weight_prepack_node) in tests:
quant_type = (QuantType.DYNAMIC if is_dynamic else QuantType.STATIC)
node_occurrence = dict()
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 0
self.checkGraphModeFxOp(ModuleClass(*module_constructor_inputs), inputs, quant_type, expected_node=quantized_node, expected_node_occurrence=node_occurrence, debug=False)
def test_functional_debug(self):
tests = self._get_conv_linear_test_cases()
for (is_dynamic, ModuleClass, module_constructor_inputs, inputs, quantized_node, weight_prepack_node) in tests:
quant_type = (QuantType.DYNAMIC if is_dynamic else QuantType.STATIC)
node_occurrence = dict()
if weight_prepack_node:
node_occurrence[weight_prepack_node] = 1
self.checkGraphModeFxOp(ModuleClass(*module_constructor_inputs), inputs, quant_type, expected_node=quantized_node, expected_node_occurrence=node_occurrence, debug=True)
def test_dynamic_quant_weight_observer(self):
class M(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
return F.linear(x, self.weight)
m = M(torch.rand(1, 1)).eval()
original = symbolic_trace(m)
qconfig = default_dynamic_qconfig
qconfig_dict = {'': qconfig}
quantized = quantize_dynamic_fx(original, qconfig_dict, debug=True)
qparams = (quantized._scale_0, quantized._zero_point_0)
weight_obs = qconfig.weight()
weight_obs(quantized.weight)
ref_qparams = weight_obs.calculate_qparams()
self.assertEqual(qparams, ref_qparams)
def test_dynamic_quant_fp16(self):
class Linear(torch.nn.Module):
def __init__(self, weight):
super().__init__()
self.weight = torch.nn.Parameter(weight)
def forward(self, x):
return F.linear(x, self.weight)
linear_input = torch.rand(8, 5)
linear_weight = torch.rand(10, 5)
class LinearModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(5, 10)
def forward(self, x):
return self.linear(x)
linear_module_input = torch.rand(8, 5)
tests = [(Linear, (linear_weight,), (linear_input,), ns.call_function(torch.ops.quantized.linear_dynamic), ns.call_function(torch.ops.quantized.linear_prepack_fp16)), (LinearModule, (), (linear_module_input,), ns.call_module(nnqd.Linear), None)]
for (ModuleClass, module_constructor_inputs, inputs, quantized_node, weight_prepack_node) in tests:
for debug in [True, False]:
node_occurrence = dict()
if weight_prepack_node:
if debug:
node_occurrence[weight_prepack_node] = 1
else:
node_occurrence[weight_prepack_node] = 0
m = ModuleClass(*module_constructor_inputs).eval()
m = symbolic_trace(m)
qconfig_dict = {'': float16_dynamic_qconfig}
m = quantize_dynamic_fx(m, qconfig_dict, debug=debug)
self.checkGraphModuleNodes(m, expected_node_occurrence=node_occurrence)
((not TEST_MULTIGPU), 'multi-GPU not supported')
((not TEST_CUDA), 'CUDA unavailable')
_qengines
def test_qat_prepare_device_affinity(self):
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
self.conv = nn.Conv2d(1, 1, 1)
self.bn = nn.BatchNorm2d(1)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
model = Model()
qengine = torch.backends.quantized.engine
qconfig_dict = {'': torch.quantization.get_default_qat_qconfig(qengine)}
device = torch.device('cuda:0')
model.to(device)
model = symbolic_trace(model)
model = fuse_fx(model)
model = prepare_fx(model, qconfig_dict)
input = torch.randn(4, 1, 4, 4, device=device)
model(input)
model_devices = ({p.device for p in model.parameters()} | {p.device for p in model.buffers()})
self.assertEqual(len(model_devices), 1)
model_device = next(iter(model_devices))
self.assertEqual(model_device, device)
def test_inplace_option(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
def forward(self, x):
return self.conv(x)
model = symbolic_trace(M().eval())
qconfig_dict = {'': default_qconfig}
non_inplace_model = quantize_static_fx(model, qconfig_dict, test_only_eval_fn, [self.img_data_2d], inplace=False)
inplace_model = model
inplace_model = quantize_static_fx(inplace_model, qconfig_dict, test_only_eval_fn, [self.img_data_2d], inplace=True)
non_inplace_res = non_inplace_model(self.img_data_2d[0][0])
inplace_res = inplace_model(self.img_data_2d[0][0])
self.assertEqual(non_inplace_res, inplace_res)
def test_dict_output(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return {'output': self.conv(x['input'])}
dict_input = {'input': torch.randn(1, 1, 1, 1)}
m = symbolic_trace(M()).eval()
qconfig_dict = {'': default_qconfig}
m = prepare_static_fx(m, qconfig_dict)
m(dict_input)
m = convert_static_fx(m)
m(dict_input)
def test_qconfig_none(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
m = symbolic_trace(m)
qconfig_dict = {'': default_qconfig, 'module_name': [('conv2', None)]}
m = prepare_static_fx(m, qconfig_dict)
data = torch.randn(1, 1, 1, 1)
m(data)
m = convert_static_fx(m)
m(data)
node_list = [ns.call_function(torch.quantize_per_tensor), ns.call_module(nnq.Conv2d), ns.call_method('dequantize'), ns.call_module(nn.Conv2d)]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_module_type(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
m = symbolic_trace(m)
qconfig_dict = {'object_type': [(torch.nn.Conv2d, default_qconfig)]}
m = prepare_static_fx(m, qconfig_dict)
data = torch.randn(1, 1, 1, 1)
m(data)
m = convert_static_fx(m)
m(data)
node_list = [ns.call_function(torch.quantize_per_tensor), ns.call_module(nnq.Conv2d), ns.call_module(nnq.Conv2d), ns.call_method('dequantize')]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_function(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
return (x + y)
m = M().eval()
m = symbolic_trace(m)
qconfig_dict = {'object_type': [(operator.add, default_qconfig)]}
m = prepare_static_fx(m, qconfig_dict)
data = torch.randn(1, 1, 1, 1)
m(data, data)
m = convert_static_fx(m)
m(data, data)
node_list = [ns.call_function(torch.quantize_per_tensor), ns.call_function(torch.ops.quantized.add), ns.call_method('dequantize')]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_module_name_regex(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
m = symbolic_trace(m)
qconfig_dict = {'module_name_regex': [('conv*', default_qconfig)]}
m = prepare_static_fx(m, qconfig_dict)
data = torch.randn(1, 1, 1, 1)
m(data)
m = convert_static_fx(m)
m(data)
node_list = [ns.call_function(torch.quantize_per_tensor), ns.call_module(nnq.Conv2d), ns.call_module(nnq.Conv2d), ns.call_method('dequantize')]
self.checkGraphModuleNodes(m, expected_node_list=node_list)
def test_qconfig_precedence(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.linear = nn.Linear(1, 1)
self.conv = nn.Conv2d(1, 1, 1)
self.module_conv1 = nn.Conv2d(1, 1, 1)
self.module_conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.linear(x)
x = self.conv(x)
x = self.module_conv1(x)
x = self.module_conv2(x)
return x
m = M().eval()
m = symbolic_trace(m)
global_qconfig = default_qconfig
object_type_qconfig = default_dynamic_qconfig
module_name_regex_qconfig = float16_dynamic_qconfig
module_name_qconfig = default_qat_qconfig
qconfig_dict = {'': global_qconfig, 'object_type': [(nn.Conv2d, object_type_qconfig)], 'module_name_regex': [('module_conv*', module_name_regex_qconfig)], 'module_name': [('module_conv2', module_name_qconfig)]}
m = prepare_static_fx(m, qconfig_dict)
self.assertEqual(m.linear.qconfig, global_qconfig)
self.assertEqual(m.conv.qconfig, object_type_qconfig)
self.assertEqual(m.module_conv1.qconfig, module_name_regex_qconfig)
self.assertEqual(m.module_conv2.qconfig, module_name_qconfig)
def test_remove_qconfig(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.avg_pool = torch.nn.AvgPool2d(1)
def forward(self, x):
return self.avg_pool(x)
m = M().eval()
m = symbolic_trace(m)
qconfig_dict = {'': default_qconfig}
m = prepare_static_fx(m, qconfig_dict)
data = torch.randn(1, 1, 1, 1)
m(data)
m = convert_static_fx(m)
m(data)
for (name, module) in m.named_modules():
self.assertFalse(hasattr(module, 'qconfig'), ('qconfig is not removed for ' + name))
def test_qat_and_script(self):
model = LinearModelWithSubmodule()
qengine = torch.backends.quantized.engine
qconfig_dict = {'': torch.quantization.get_default_qat_qconfig(qengine)}
model = symbolic_trace(model)
model = prepare_qat_fx(model, qconfig_dict)
scripted = torch.jit.script(model)
x = torch.randn(5, 5)
scripted(x)
FileCheck().check_count('FakeQuantize = prim::GetAttr[name="', 4, exactly=True).run(scripted.graph)
for epoch in range(3):
if (epoch == 1):
scripted.apply(torch.quantization.disable_observer)
if (epoch == 2):
scripted.apply(torch.quantization.disable_fake_quant)
matches = ['.fake_quant_enabled', '.observer_enabled']
for (key, v) in scripted.state_dict().items():
if any(((x in key) for x in matches)):
self.assertEqual(v, torch.tensor([0], dtype=torch.uint8))
scripted.apply(torch.quantization.enable_fake_quant)
scripted.apply(torch.quantization.enable_observer)
for (key, v) in scripted.state_dict().items():
if any(((x in key) for x in matches)):
self.assertEqual(v, torch.tensor([1], dtype=torch.uint8))
def test_save_observer_state_dict(self):
orig = LinearModelWithSubmodule().eval()
model = orig
qconfig_dict = {'': torch.quantization.get_default_qconfig('fbgemm')}
model = symbolic_trace(model)
model = prepare_static_fx(model, qconfig_dict)
x = torch.randn(5, 5)
model(x)
quant = convert_static_fx(model)
obs_dict = torch.quantization.get_observer_state_dict(model)
b = io.BytesIO()
torch.save(obs_dict, b)
b.seek(0)
model_2 = orig
model_2 = symbolic_trace(model_2)
model_2 = prepare_static_fx(model_2, qconfig_dict)
loaded_dict = torch.load(b)
torch.quantization.load_observer_state_dict(model_2, loaded_dict)
quant_2 = convert_static_fx(model_2)
self.assertEqual(quant(x), quant_2(x))
def test_custom_module_class(self):
class CustomModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
return self.conv(x)
class ObservedCustomModule(torch.nn.Module):
def __init__(self, conv):
super().__init__()
self.conv = conv
def forward(self, x):
return self.conv(x)
def from_float(cls, float_module):
assert hasattr(float_module, 'qconfig')
observed = cls(float_module.conv)
observed.qconfig = float_module.qconfig
return observed
class QuantizedCustomModule(torch.nn.Module):
def __init__(self, conv):
super().__init__()
self.conv = conv
def forward(self, x):
return self.conv(x)
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
assert hasattr(observed_module, 'activation_post_process')
observed_module.conv.activation_post_process = observed_module.activation_post_process
quantized = cls(nnq.Conv2d.from_float(observed_module.conv))
return quantized
class DynamicallyQuantizedCustomModule(torch.nn.Module):
def __init__(self, conv):
super().__init__()
self.conv = conv
def forward(self, x):
return self.conv(x)
def from_observed(cls, observed_module):
assert hasattr(observed_module, 'qconfig')
assert hasattr(observed_module, 'activation_post_process')
quantized = cls(nnqd.Conv2d.from_float(observed_module.conv))
return quantized
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.custom = CustomModule()
def forward(self, x):
x = self.conv(x)
x = self.custom(x)
return x
class RefM(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 1, 1)
self.conv2 = torch.nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
data = torch.randn(1, 1, 1, 1)
original_m = M()
original_ref_m = RefM()
original_ref_m.conv1.weight = torch.nn.Parameter(original_m.conv.weight.detach())
original_ref_m.conv1.bias = torch.nn.Parameter(original_m.conv.bias.detach())
original_ref_m.conv2.weight = torch.nn.Parameter(original_m.custom.conv.weight.detach())
original_ref_m.conv2.bias = torch.nn.Parameter(original_m.custom.conv.bias.detach())
from torch._fx.symbolic_trace import Tracer
class CustomTracer(Tracer):
def is_leaf_module(self, m, module_qualified_name):
return ((m.__module__.startswith('torch.nn') and (not isinstance(m, torch.nn.Sequential))) or isinstance(m, CustomModule))
for quant_type in [QuantType.STATIC]:
register_observed_custom_module_mapping(CustomModule, ObservedCustomModule)
register_quantized_custom_module_mapping(CustomModule, QuantizedCustomModule)
m = CustomTracer().trace(original_m).eval()
qconfig_dict = {'': default_qconfig}
m = prepare_static_fx(m, qconfig_dict)
m(data)
count_check = {ns.call_module(torch.quantization.MinMaxObserver): 3}
self.checkGraphModuleNodes(m, expected_node_occurrence=count_check)
m = convert_static_fx(m)
count_check = {ns.call_function(torch.quantize_per_tensor): 1, ns.call_module(nnq.Conv2d): 1, ns.call_method('dequantize'): 1}
self.checkGraphModuleNodes(m, expected_node_occurrence=count_check)
res = m(data)
ref_m = symbolic_trace(original_ref_m).eval()
ref_m = prepare_fx(ref_m, qconfig_dict)
ref_m(data)
ref_m = convert_fx(ref_m)
ref_res = ref_m(data)
self.assertEqual(res, ref_res) |
class FitSolverError(FitError):
def __init__(self, mesg):
emsg = 'Solver for the MLE equations failed to converge: '
emsg += mesg.replace('\n', '')
self.args = (emsg,) |
.register('mobilenet_v3')
def mobilenet_v3():
model = MobileNetV3()
if cfg.BACKBONE.MV3.SAME_PAD:
model = convert_conv2convsamepadding_model(model)
return model |
class SuzukiSporadicGroup(PermutationGroup_unique):
def __init__(self):
libgap.load_package('atlasrep')
PermutationGroup_generic.__init__(self, gap_group='AtlasGroup("Suz")')
def _repr_(self):
return 'Sporadic Suzuki group acting on 1782 points' |
class BaseOptions():
def __init__(self):
self.initialized = False
def initialize(self, parser):
parser.add_argument('--name', type=str, default='label2coco', help='name of the experiment. It decides where to store samples and models')
parser.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
parser.add_argument('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
parser.add_argument('--model', type=str, default='pix2pix', help='which model to use')
parser.add_argument('--norm_G', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_D', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--norm_E', type=str, default='spectralinstance', help='instance normalization or batch normalization')
parser.add_argument('--phase', type=str, default='train', help='train, val, test, etc')
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--preprocess_mode', type=str, default='scale_width_and_crop', help='scaling and cropping of images at load time.', choices=('resize_and_crop', 'crop', 'scale_width', 'scale_width_and_crop', 'scale_shortside', 'scale_shortside_and_crop', 'fixed', 'none'))
parser.add_argument('--load_size', type=int, default=512, help='Scale images to this size. The final image will be cropped to --crop_size.')
parser.add_argument('--crop_size', type=int, default=512, help='Crop to the width of crop_size (after initially scaling the images to load_size.)')
parser.add_argument('--aspect_ratio', type=float, default=1.0, help='The ratio width/height. The final height of the load image will be crop_size/aspect_ratio')
parser.add_argument('--label_nc', type=int, default=182, help='# of input label classes without unknown class. If you have unknown class as class label, specify --contain_dopntcare_label.')
parser.add_argument('--contain_dontcare_label', action='store_true', help='if the label map contains dontcare label (dontcare=255)')
parser.add_argument('--output_nc', type=int, default=3, help='# of output image channels')
parser.add_argument('--dataroot', type=str, default='./datasets/cityscapes/')
parser.add_argument('--dataset_mode', type=str, default='coco')
parser.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
parser.add_argument('--no_flip', action='store_true', help='if specified, do not flip the images for data argumentation')
parser.add_argument('--nThreads', default=0, type=int, help='# threads for loading data')
parser.add_argument('--max_dataset_size', type=int, default=sys.maxsize, help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
parser.add_argument('--load_from_opt_file', action='store_true', help='load the options from checkpoints and use that as default')
parser.add_argument('--cache_filelist_write', action='store_true', help='saves the current filelist into a text file, so that it loads faster')
parser.add_argument('--cache_filelist_read', action='store_true', help='reads from the file list cache')
parser.add_argument('--center_crop', action='store_true', help='if specified, crop to center the images')
parser.add_argument('--no_one_hot', action='store_true', help='no one hot')
parser.add_argument('--reverse_mapping', action='store_true', help='if specified, do not flip the images for data argumentation')
parser.add_argument('--learned_ds', action='store_true', help='model learns the downsampling')
parser.add_argument('--learned_ds_factor', type=int, default=16, help='enables partial learned_ds (S2 in sec. 3.2)')
parser.add_argument('--lr_width', type=int, default=64, help='low res stream strided conv number of channles')
parser.add_argument('--lr_max_width', type=int, default=1024, help='low res stream conv number of channles')
parser.add_argument('--lr_depth', type=int, default=7, help='low res stream number of conv layers')
parser.add_argument('--hr_width', type=int, default=64, help='high res stream number of MLP channles')
parser.add_argument('--hr_depth', type=int, default=5, help='high res stream number of MLP layers')
parser.add_argument('--reflection_pad', action='store_true', help='if specified, use reflection padding at lr stream')
parser.add_argument('--replicate_pad', action='store_true', help='if specified, use replicate padding at lr stream')
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate for adam')
parser.add_argument('--netG', type=str, default='ASAPNets', help='selects model to use for netG')
parser.add_argument('--init_type', type=str, default='xavier', help='network initialization [normal|xavier|kaiming|orthogonal]')
parser.add_argument('--init_variance', type=float, default=0.02, help='variance of the initialization distribution')
parser.add_argument('--hr_coor', choices=('cosine', None), default='cosine')
parser.add_argument('--nef', type=int, default=16, help='# of encoder filters in the first conv layer')
parser.add_argument('--use_vae', action='store_true', help='enable training with an image encoder.')
parser.add_argument('--z_dim', type=int, default=256, help='dimension of the latent z vector')
parser.add_argument('--display_winsize', type=int, default=400, help='display window size')
parser.add_argument('--no_instance_edge', action='store_true', help='if specified, do *not* add the edges of the instance map as input')
parser.add_argument('--no_instance_dist', action='store_true', help='if specified, do *not* add distence transform of the instance map as input')
parser.add_argument('--lr_instance', action='store_true', help='if specified, add instance map only to the lr-stream')
self.initialized = True
return parser
def gather_options(self):
if (not self.initialized):
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
(opt, unknown) = parser.parse_known_args()
model_name = opt.model
model_option_setter = models.get_option_setter(model_name)
parser = model_option_setter(parser, self.isTrain)
dataset_mode = opt.dataset_mode
dataset_option_setter = data.get_option_setter(dataset_mode)
parser = dataset_option_setter(parser, self.isTrain)
(opt, unknown) = parser.parse_known_args()
if opt.load_from_opt_file:
parser = self.update_options_from_file(parser, opt)
opt = parser.parse_args()
self.parser = parser
return opt
def print_options(self, opt):
message = ''
message += ' Options \n'
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += ' End '
print(message)
def option_file_path(self, opt, makedir=False):
expr_dir = os.path.join(opt.checkpoints_dir, opt.name)
if makedir:
util.mkdirs(expr_dir)
file_name = os.path.join(expr_dir, 'opt')
return file_name
def save_options(self, opt):
file_name = self.option_file_path(opt, makedir=True)
with open((file_name + '.txt'), 'wt') as opt_file:
for (k, v) in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if (v != default):
comment = ('\t[default: %s]' % str(default))
opt_file.write('{:>25}: {:<30}{}\n'.format(str(k), str(v), comment))
with open((file_name + '.pkl'), 'wb') as opt_file:
pickle.dump(opt, opt_file)
def update_options_from_file(self, parser, opt):
new_opt = self.load_options(opt)
for (k, v) in sorted(vars(opt).items()):
if (hasattr(new_opt, k) and (v != getattr(new_opt, k))):
new_val = getattr(new_opt, k)
parser.set_defaults(**{k: new_val})
return parser
def load_options(self, opt):
file_name = self.option_file_path(opt, makedir=False)
new_opt = pickle.load(open((file_name + '.pkl'), 'rb'))
return new_opt
def parse(self, save=False):
opt = self.gather_options()
opt.isTrain = self.isTrain
self.print_options(opt)
if opt.isTrain:
self.save_options(opt)
opt.semantic_nc = ((opt.label_nc + (1 if opt.contain_dontcare_label else 0)) + (0 if (opt.no_instance_edge & opt.no_instance_dist) else 1))
str_ids = opt.gpu_ids.split(',')
opt.gpu_ids = []
for str_id in str_ids:
id = int(str_id)
if (id >= 0):
opt.gpu_ids.append(id)
if (len(opt.gpu_ids) > 0):
torch.cuda.set_device(opt.gpu_ids[0])
assert ((len(opt.gpu_ids) == 0) or ((opt.batchSize % len(opt.gpu_ids)) == 0)), ('Batch size %d is wrong. It must be a multiple of # GPUs %d.' % (opt.batchSize, len(opt.gpu_ids)))
if opt.lr_instance:
opt.no_instance = False
if (opt.hr_coor == 'None'):
opt.hr_coor = None
opt.no_instance = opt.no_instance_edge
self.opt = opt
return self.opt |
def build_data(resource, directory='data'):
if resource.filename.endswith('.tar.gz'):
resource_dir = os.path.splitext(os.path.splitext(os.path.basename(resource.filename))[0])[0]
else:
resource_dir = os.path.splitext(os.path.basename(resource.filename))[0]
file_path = os.path.join(directory, resource_dir)
built = check_built(file_path, resource.version)
if (not built):
file_path = download_and_check_hash(resource.url, resource.filename, resource.expected_hash, resource.version, directory)
if resource.zipped:
built_location = unzip_file(file_path, directory)
os.remove(file_path)
else:
built_location = file_path
mark_built(built_location, resource.version)
print('Successfully built dataset at {}'.format(built_location))
else:
print('Already built at {}. version {}'.format(file_path, resource.version))
built_location = file_path
return built_location |
(scope='module')
def config():
return Configuration.from_yaml('tardis/io/tests/data/tardis_configv1_verysimple.yml') |
def test_mean_logvar_length_dict():
r = model1_dict.forward(x1_dict.float())
assert (len(r[1][0]) == len(r[2][0])) |
class RegularSuperCrystals(Category_singleton):
def super_categories(self):
return [SuperCrystals().Finite()]
class ElementMethods():
def epsilon(self, i):
string_length = 0
x = self
while True:
x = x.e(i)
if (x is None):
return string_length
else:
string_length += 1
def phi(self, i):
string_length = 0
x = self
while True:
x = x.f(i)
if (x is None):
return string_length
else:
string_length += 1
class TensorProducts(TensorProductsCategory):
_method
def extra_super_categories(self):
return [self.base_category()] |
class BiSeNetOutput(nn.Module):
def __init__(self, in_chan, mid_chan, num_class):
super(BiSeNetOutput, self).__init__()
self.conv = ConvBNReLU(in_chan, mid_chan, ks=3, stride=1, padding=1)
self.conv_out = nn.Conv2d(mid_chan, num_class, kernel_size=1, bias=False)
def forward(self, x):
feat = self.conv(x)
out = self.conv_out(feat)
return (out, feat) |
.parametrize('csr_container', CSR_CONTAINERS)
def test_scale_normalize(global_random_seed, csr_container):
generator = np.random.RandomState(global_random_seed)
X = generator.rand(100, 100)
for mat in (X, csr_container(X)):
(scaled, _, _) = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled) |
def _set_checking_parameters(estimator):
params = estimator.get_params()
name = estimator.__class__.__name__
if ('n_estimators' in params):
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if (name == 'ClusterCentroids'):
if (sklearn_version < parse_version('1.1')):
algorithm = 'full'
else:
algorithm = 'lloyd'
estimator.set_params(voting='soft', estimator=KMeans(random_state=0, algorithm=algorithm, n_init=1))
if (name == 'KMeansSMOTE'):
estimator.set_params(kmeans_estimator=12)
if (name == 'BalancedRandomForestClassifier'):
estimator.set_params(replacement=True, sampling_strategy='all', bootstrap=False) |
def window(df: pd.DataFrame, size: int, driving_series: List[str], target_series: List[str]):
X = df[driving_series].values
y = df[target_series].values
X_T = []
y_T = []
for i in range(((len(X) - size) + 1)):
X_T.append(X[i:(i + size)])
y_T.append(y[i:(i + size)])
return (np.array(X_T), np.array(y_T)) |
def set_target_os(platform: Optional[str]=None):
global _target_os
if ((platform is None) or (platform in ('linux', 'macosx', 'windows'))):
_target_os = platform
else:
raise OSError(f"Unsupported target OS: '{platform}' - py-solc-x supports 'linux', 'macosx', or 'windows'.") |
def expected_failure_on_sympy(func: T.Callable) -> T.Callable:
if (symforce.get_symbolic_api() == 'sympy'):
return unittest.expectedFailure(func)
else:
return func |
def convert_examples_to_features(examples, label_list, tokenizer, max_seq_length, max_entity_length, max_mention_length):
max_num_subwords = (max_seq_length - 2)
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
def tokenize_word(text):
if (isinstance(tokenizer, RobertaTokenizer) and (text[0] != "'") and ((len(text) != 1) or (not is_punctuation(text)))):
return tokenizer.tokenize(text, add_prefix_space=True)
return tokenizer.tokenize(text)
for (example_index, example) in enumerate(examples):
tokens = [tokenize_word(w) for w in example.words]
subwords = [w for li in tokens for w in li]
subword2token = list(itertools.chain(*[([i] * len(li)) for (i, li) in enumerate(tokens)]))
token2subword = ([0] + list(itertools.accumulate((len(li) for li in tokens))))
subword_start_positions = frozenset(token2subword)
subword_sentence_boundaries = [sum((len(li) for li in tokens[:p])) for p in example.sentence_boundaries]
entity_labels = {}
start = None
cur_type = None
for (n, label) in enumerate(example.labels):
if ((label == 'O') or (n in example.sentence_boundaries)):
if (start is not None):
entity_labels[(token2subword[start], token2subword[n])] = label_map[cur_type]
start = None
cur_type = None
if label.startswith('B'):
if (start is not None):
entity_labels[(token2subword[start], token2subword[n])] = label_map[cur_type]
start = n
cur_type = label[2:]
elif label.startswith('I'):
if (start is None):
start = n
cur_type = label[2:]
elif (cur_type != label[2:]):
entity_labels[(token2subword[start], token2subword[n])] = label_map[cur_type]
start = n
cur_type = label[2:]
if (start is not None):
entity_labels[(token2subword[start], len(subwords))] = label_map[cur_type]
for n in range((len(subword_sentence_boundaries) - 1)):
(doc_sent_start, doc_sent_end) = subword_sentence_boundaries[n:(n + 2)]
left_length = doc_sent_start
right_length = (len(subwords) - doc_sent_end)
sentence_length = (doc_sent_end - doc_sent_start)
half_context_length = int(((max_num_subwords - sentence_length) / 2))
if (left_length < right_length):
left_context_length = min(left_length, half_context_length)
right_context_length = min(right_length, ((max_num_subwords - left_context_length) - sentence_length))
else:
right_context_length = min(right_length, half_context_length)
left_context_length = min(left_length, ((max_num_subwords - right_context_length) - sentence_length))
doc_offset = (doc_sent_start - left_context_length)
target_tokens = subwords[doc_offset:(doc_sent_end + right_context_length)]
word_ids = tokenizer.convert_tokens_to_ids((([tokenizer.cls_token] + target_tokens) + [tokenizer.sep_token]))
word_attention_mask = ([1] * (len(target_tokens) + 2))
word_segment_ids = ([0] * (len(target_tokens) + 2))
entity_start_positions = []
entity_end_positions = []
entity_ids = []
entity_attention_mask = []
entity_segment_ids = []
entity_position_ids = []
original_entity_spans = []
labels = []
for entity_start in range(left_context_length, (left_context_length + sentence_length)):
doc_entity_start = (entity_start + doc_offset)
if (doc_entity_start not in subword_start_positions):
continue
for entity_end in range((entity_start + 1), ((left_context_length + sentence_length) + 1)):
doc_entity_end = (entity_end + doc_offset)
if (doc_entity_end not in subword_start_positions):
continue
if ((entity_end - entity_start) > max_mention_length):
continue
entity_start_positions.append((entity_start + 1))
entity_end_positions.append(entity_end)
entity_ids.append(1)
entity_attention_mask.append(1)
entity_segment_ids.append(0)
position_ids = list(range((entity_start + 1), (entity_end + 1)))
position_ids += ([(- 1)] * ((max_mention_length - entity_end) + entity_start))
entity_position_ids.append(position_ids)
original_entity_spans.append((subword2token[doc_entity_start], (subword2token[(doc_entity_end - 1)] + 1)))
labels.append(entity_labels.get((doc_entity_start, doc_entity_end), 0))
entity_labels.pop((doc_entity_start, doc_entity_end), None)
if (len(entity_ids) == 1):
entity_start_positions.append(0)
entity_end_positions.append(0)
entity_ids.append(0)
entity_attention_mask.append(0)
entity_segment_ids.append(0)
entity_position_ids.append(([(- 1)] * max_mention_length))
original_entity_spans.append(None)
labels.append((- 1))
split_size = math.ceil((len(entity_ids) / max_entity_length))
for i in range(split_size):
entity_size = math.ceil((len(entity_ids) / split_size))
start = (i * entity_size)
end = (start + entity_size)
features.append(InputFeatures(example_index=example_index, word_ids=word_ids, word_attention_mask=word_attention_mask, word_segment_ids=word_segment_ids, entity_start_positions=entity_start_positions[start:end], entity_end_positions=entity_end_positions[start:end], entity_ids=entity_ids[start:end], entity_position_ids=entity_position_ids[start:end], entity_segment_ids=entity_segment_ids[start:end], entity_attention_mask=entity_attention_mask[start:end], original_entity_spans=original_entity_spans[start:end], labels=labels[start:end]))
assert (not entity_labels)
return features |
class ClusterGCN(GraphSamplingBase):
def __init__(self, args, data, train_idx, processed_dir):
super(ClusterGCN, self).__init__(args, data, train_idx, processed_dir)
base_gnnconv = (SAGEConvMLP if (args.gnn_type == 'mlp') else SAGEConv)
self.convs = torch.nn.ModuleList()
self.convs.append(base_gnnconv(self.num_feats, self.dim_hidden))
for _ in range((self.num_layers - 2)):
self.convs.append(base_gnnconv(self.dim_hidden, self.dim_hidden))
self.convs.append(base_gnnconv(self.dim_hidden, self.num_classes))
sample_size = max(1, int((args.batch_size / (data.num_nodes / args.num_parts))))
cluster_data = ClusterData(data, num_parts=args.num_parts, recursive=False, save_dir=self.save_dir)
self.train_loader = ClusterLoader(cluster_data, batch_size=sample_size, shuffle=True, num_workers=0)
self.saved_args = vars(args)
self.reset_parameters()
def reset_parameters(self):
for conv in self.convs:
conv.reset_parameters()
def forward(self, x, edge_index):
for (i, conv) in enumerate(self.convs):
x = conv(x, edge_index)
if (i != (self.num_layers - 1)):
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
return x
def train_net(self, input_dict):
device = input_dict['device']
optimizer = input_dict['optimizer']
loss_op = input_dict['loss_op']
total_loss = total_correct = 0
for batch in tqdm(self.train_loader):
batch = batch.to(device)
if (batch.train_mask.sum() == 0):
continue
optimizer.zero_grad()
out = self(batch.x, batch.edge_index)
if isinstance(loss_op, torch.nn.NLLLoss):
out = F.log_softmax(out, dim=(- 1))
loss = loss_op(out[batch.train_mask], batch.y[batch.train_mask])
else:
loss = loss_op(out[batch.train_mask], batch.y[batch.train_mask].type_as(out))
loss.backward()
optimizer.step()
total_loss += float(loss.item())
if isinstance(loss_op, torch.nn.NLLLoss):
total_correct += int(out.argmax(dim=(- 1)).eq(batch.y).sum())
else:
total_correct += int(out.eq(batch.y).sum())
train_size = (self.train_size if isinstance(loss_op, torch.nn.NLLLoss) else (self.train_size * self.num_classes))
return ((total_loss / len(self.train_loader)), (total_correct / train_size)) |
def get_input_fn(config_params, image_dir, batch_size=None, steps=None):
if (batch_size is None):
raise ValueError('`batch_size` cannot be None')
image_paths = glob(os.path.join(image_dir, '*'))
preprocessing_pipeling = PreprocessingPipeline(config_params.input.input_shape, config_params.dataloader_params)
_resize_fn = preprocessing_pipeling.normalize_and_resize_with_pad
if (steps is None):
steps = (len(image_paths) // batch_size)
logging.info('Using {} out of {} images'.format((steps * batch_size), len(image_paths)))
dataset = tf.data.Dataset.from_tensor_slices((image_paths,))
dataset = dataset.map(map_func=(lambda image_path: _resize_fn(read_image(image_path))), num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.take(steps).prefetch(tf.data.AUTOTUNE)
def _input_fn():
for sample in tqdm(dataset, total=steps):
(yield (sample['image'],))
return _input_fn |
class TestMEstimateEncoder(TestCase):
def test_reference_m0(self):
x = ['A', 'A', 'B', 'B']
y = [1, 1, 0, 1]
x_t = ['A', 'B', 'C']
encoder = encoders.MEstimateEncoder(m=0, handle_unknown='value', handle_missing='value')
encoder.fit(x, y)
scored = encoder.transform(x_t)
expected = [[1], [0.5], [(3.0 / 4.0)]]
self.assertEqual(scored.to_numpy().tolist(), expected)
def test_reference_m1(self):
x = ['A', 'A', 'B', 'B']
y = [1, 1, 0, 1]
x_t = ['A', 'B', 'C']
encoder = encoders.MEstimateEncoder(m=1, handle_unknown='value', handle_missing='value')
encoder.fit(x, y)
scored = encoder.transform(x_t)
expected = [[((2 + (3.0 / 4.0)) / (2 + 1))], [((1 + (3.0 / 4.0)) / (2 + 1))], [(3.0 / 4.0)]]
self.assertEqual(scored.to_numpy().tolist(), expected) |
def create_clustering_layout():
return dbc.Row([dbc.Col([create_description_card(), create_control_card(), html.Div(['initial child'], id='clustering-output-clientside', style={'display': 'none'})], width=2), dbc.Col([dbc.Row([dbc.Col(dbc.Card(dbc.CardBody([html.H4('Summary'), html.Div(id='clustering-summary')])), width=4), dbc.Col(dbc.Card(dbc.CardBody([html.H4('Attributes'), html.Div(id='clustering-attribute-options')])), width=8)]), dbc.Row([dbc.Col([create_display_layout()])])])]) |
def test_cond_twice_shared_params():
time_dim = Dim(Tensor('time', [batch_dim], dtype='int32'))
in_dim = Dim(7, name='in')
out_dim = Dim(13, name='out')
extern_data = TensorDict({'data': Tensor('data', [batch_dim, time_dim, in_dim], dtype='float32')})
class _Net(rf.Module):
def __init__(self):
super().__init__()
self.pre_linear = rf.Linear(in_dim, out_dim)
self.linear_true = rf.Linear(out_dim, out_dim)
self.linear_false = rf.Linear(out_dim, out_dim)
def __call__(self, x: Tensor) -> Tensor:
x = self.pre_linear(x)
x = rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 0), true_fn=(lambda : self.linear_true(x)), false_fn=(lambda : self.linear_false(x)))
x = rf.cond(pred=((time_dim.get_dim_value_tensor() % 2) == 1), true_fn=(lambda : self.linear_true(x)), false_fn=(lambda : self.linear_false(x)))
return x
def _forward_step(*, model: _Net, extern_data: TensorDict):
out = model(extern_data['data'])
out.mark_as_default_output(shape=(batch_dim, time_dim, out_dim))
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 5})
run_model(extern_data, (lambda *, epoch, step: _Net()), _forward_step, dyn_dim_max_sizes={time_dim: 6}) |
def hook_maxpool1d(m, x, y):
flops_per_ele = (m.kernel_size - 1)
flops = (flops_per_ele * y.numel())
return int(flops) |
class Test():
def __init__(self, value: int) -> None:
self._value = value
def test_method(self, x: int) -> int:
return (5 * x) |
.parametrize('std', [False, True])
.parametrize('pro', [False, True])
def test_iff_variables(std, pro):
if (std == pro):
with pytest.raises(ValueError):
_bk = Background(use_std_logic_variables=std, use_prolog_variables=pro)
else:
_bk = Background(use_std_logic_variables=std, use_prolog_variables=pro)
assert (_bk.use_std_logic_variables == std)
assert (_bk.use_prolog_variables == pro) |
def _validate_gpu():
import torch
if (not torch.cuda.is_available()):
logger.error('Skyline did not detect a GPU on this machine. Skyline only profiles deep learning workloads on GPUs.')
return False
return True |
class BitDownsampleConv(nn.Module):
def __init__(self, config, in_channels, out_channels, stride=1, preact=True):
super().__init__()
self.conv = WeightStandardizedConv2d(in_channels, out_channels, 1, stride=stride, eps=1e-08, padding=config.global_padding)
self.norm = (nn.Identity() if preact else BitGroupNormActivation(config, num_channels=out_channels, apply_activation=False))
def forward(self, x):
return self.norm(self.conv(x)) |
def isotropic_opening(image, radius, out=None, spacing=None):
eroded = isotropic_erosion(image, radius, out=out, spacing=spacing)
return isotropic_dilation(eroded, radius, out=out, spacing=spacing) |
class EDGE_ENHANCE_MORE(BuiltinFilter):
name = 'Edge-enhance More'
filterargs = ((3, 3), 1, 0, ((- 1), (- 1), (- 1), (- 1), 9, (- 1), (- 1), (- 1), (- 1))) |
class TransformerWav2VecEncoderLayer(nn.Module):
def __init__(self, embedding_dim: float=768, ffn_embedding_dim: float=3072, num_attention_heads: float=8, dropout: float=0.1, attention_dropout: float=0.1, activation_dropout: float=0.1, activation_fn: str='relu', add_bias_kv: bool=False, add_zero_attn: bool=False, export: bool=False) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
self.activation_fn = utils.get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(self.embedding_dim, num_attention_heads, dropout=attention_dropout, add_bias_kv=add_bias_kv, add_zero_attn=add_zero_attn, self_attention=True)
self.self_attn_layer_norm = LayerNorm(self.embedding_dim, export=export)
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
self.final_layer_norm = LayerNorm(self.embedding_dim, export=export)
def forward(self, x: torch.Tensor, self_attn_mask: torch.Tensor=None, self_attn_padding_mask: torch.Tensor=None):
residual = x
(x, attn) = self.self_attn(query=x, key=x, value=x, key_padding_mask=self_attn_padding_mask, need_weights=False, attn_mask=self_attn_mask)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.self_attn_layer_norm(x)
residual = x
x = self.activation_fn(self.fc1(x))
x = F.dropout(x, p=self.activation_dropout, training=self.training)
x = self.fc2(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = (residual + x)
x = self.final_layer_norm(x)
return (x, attn) |
def test_int_primitive_statement_delta(default_test_case):
config.configuration.test_creation.max_delta = 10
statement = stmt.IntPrimitiveStatement(default_test_case, 1)
with mock.patch('pynguin.utils.randomness.next_gaussian') as gauss_mock:
gauss_mock.return_value = 0.5
statement.delta()
assert (statement.value == 6) |
def make_policy():
return DeterministicPolicy(state_shape=STATE_SHAPE, action_shape=ACTION_SHAPE, hidden_units=[256, 256], hidden_activation=nn.ReLU(inplace=True), device=args.device) |
class Adam(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad)
super(Adam, self).__init__(params, defaults)
def __setstate__(self, state):
super(Adam, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
(beta1, beta2) = group['betas']
state['step'] += 1
if (group['weight_decay'] != 0):
grad = grad.add(group['weight_decay'], p.data)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
if amsgrad:
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_((- step_size), exp_avg, denom)
return loss |
def make_and_save_predictions(model, input_path_str, output_path_str):
input_path = Path(input_path_str)
output_path = Path(output_path_str)
input_image_filenames = [(input_path / filename) for filename in os.listdir(input_path)]
for image_filename in tqdm(input_image_filenames):
input_image = imread(image_filename.as_posix())
input_tensor = img2tensor(input_image)
output_image = single_image_inference(model, input_tensor, output_path_str)
output_filepath = (output_path_str + image_filename.name)
cv2.imwrite(output_filepath, output_image) |
class Mixed_5b(nn.Module):
def __init__(self):
super(Mixed_5b, self).__init__()
self.branch0 = nn.Sequential(BasicConv3d(832, 256, kernel_size=1, stride=1))
self.branch1 = nn.Sequential(BasicConv3d(832, 160, kernel_size=1, stride=1), SepConv3d(160, 320, kernel_size=3, stride=1, padding=1))
self.branch2 = nn.Sequential(BasicConv3d(832, 32, kernel_size=1, stride=1), SepConv3d(32, 128, kernel_size=3, stride=1, padding=1))
self.branch3 = nn.Sequential(nn.MaxPool3d(kernel_size=(3, 3, 3), stride=1, padding=1), BasicConv3d(832, 128, kernel_size=1, stride=1))
def forward(self, x):
x0 = self.branch0(x)
x1 = self.branch1(x)
x2 = self.branch2(x)
x3 = self.branch3(x)
out = torch.cat((x0, x1, x2, x3), 1)
return out |
def get_params(argv='1'):
print('SET: {}'.format(argv))
params = dict(quick_test=True, finetune_mode=False, pretrained_model_weights='models/1_1_foa_dev_split6_model.h5', dataset_dir='/scratch/asignal/partha/DCASE2023/DCASE2023_SELD_dataset', feat_label_dir='/scratch/asignal/partha/DCASE2023/DCASE2023_SELD_dataset/seld_feat_label', model_dir='models/', dcase_output_dir='results/', mode='dev', dataset='foa', fs=24000, hop_len_s=0.02, label_hop_len_s=0.1, max_audio_len_s=60, nb_mel_bins=64, use_salsalite=False, fmin_doa_salsalite=50, fmax_doa_salsalite=2000, fmax_spectra_salsalite=9000, multi_accdoa=False, thresh_unify=15, label_sequence_length=50, batch_size=128, dropout_rate=0.05, nb_cnn2d_filt=64, f_pool_size=[4, 4, 2], self_attn=True, nb_heads=8, nb_self_attn_layers=2, nb_rnn_layers=2, rnn_size=128, nb_fnn_layers=1, fnn_size=128, nb_epochs=100, lr=0.001, average='macro', lad_doa_thresh=20)
if (argv == '1'):
print('USING DEFAULT PARAMETERS\n')
elif (argv == '2'):
print('FOA + ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'foa'
params['multi_accdoa'] = False
elif (argv == '3'):
print('FOA + multi ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'foa'
params['multi_accdoa'] = True
elif (argv == '4'):
print('MIC + GCC + ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = False
params['multi_accdoa'] = False
elif (argv == '5'):
print('MIC + SALSA + ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = True
params['multi_accdoa'] = False
elif (argv == '6'):
print('MIC + GCC + multi ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = False
params['multi_accdoa'] = True
elif (argv == '7'):
print('MIC + SALSA + multi ACCDOA\n')
params['quick_test'] = False
params['dataset'] = 'mic'
params['use_salsalite'] = True
params['multi_accdoa'] = True
elif (argv == '999'):
print('QUICK TEST MODE\n')
params['quick_test'] = True
else:
print('ERROR: unknown argument {}'.format(argv))
exit()
feature_label_resolution = int((params['label_hop_len_s'] // params['hop_len_s']))
params['feature_sequence_length'] = (params['label_sequence_length'] * feature_label_resolution)
params['t_pool_size'] = [feature_label_resolution, 1, 1]
params['patience'] = int(params['nb_epochs'])
if ('2020' in params['dataset_dir']):
params['unique_classes'] = 14
elif ('2021' in params['dataset_dir']):
params['unique_classes'] = 12
elif ('2022' in params['dataset_dir']):
params['unique_classes'] = 13
elif ('2023' in params['dataset_dir']):
params['unique_classes'] = 13
for (key, value) in params.items():
print('\t{}: {}'.format(key, value))
return params |
def np_loader(np_path, l2norm=False):
with open(np_path, 'rb') as f:
data = np.load(f, encoding='latin1', allow_pickle=True)
if (isinstance(data, np.ndarray) and (data.size == 1)):
data = data[()]
if l2norm:
print('L2 normalizing features')
if isinstance(data, dict):
for key in data:
feats_ = data[key]
feats_ = (feats_ / max(np.linalg.norm(feats_), 1e-06))
data[key] = feats_
elif (data.ndim == 2):
data_norm = np.linalg.norm(data, axis=1)
data = (data / np.maximum(data_norm.reshape((- 1), 1), 1e-06))
else:
raise ValueError('unexpected data format {}'.format(type(data)))
return data |
def batch_nested_sequences(seqs_subseqs, max_length=None, max_tokens=None, fixed_length=None, batch_first=True, pad_value=PAD, augment=False, device=None, dtype=torch.long):
(seqs, sub_seqs) = zip(*seqs_subseqs)
(batch_dim, time_dim) = ((0, 1) if batch_first else (1, 0))
if (fixed_length is not None):
fixed_length = max_length = min(max_length, fixed_length)
lengths = _limit_lengths(seqs, max_length, max_tokens)
sub_seqs = [s[:length] for (s, length) in zip(sub_seqs, lengths)]
sub_lengths = [[sub.nelement() for sub in s] for s in sub_seqs]
batch_length = (max(lengths) if (fixed_length is None) else fixed_length)
batch_sub_length = max([max([s2.numel() for s2 in s1]) for s1 in sub_seqs])
sub_tensor_size = ((len(seqs), batch_length, batch_sub_length) if batch_first else (batch_length, batch_sub_length, len(seqs)))
sub_seq_tensor = torch.full(sub_tensor_size, pad_value, dtype=dtype, device=device)
tensor_size = ((len(seqs), batch_length) if batch_first else (batch_length, len(seqs)))
seq_tensor = torch.full(tensor_size, pad_value, dtype=dtype, device=device)
for (i, seq) in enumerate(seqs):
end_seq = lengths[i]
seq_tensor.narrow(time_dim, 0, lengths[i]).select(batch_dim, i).copy_(seq[0:end_seq])
for (j, sub_seq) in enumerate(sub_seqs[i]):
end_sub_seq = sub_lengths[i][j]
sub_seq_tensor.narrow((time_dim + 1), 0, end_sub_seq).select(time_dim, j).select(batch_dim, i).copy_(sub_seq[0:end_sub_seq])
return ((seq_tensor, lengths), (sub_seq_tensor, sub_lengths)) |
class TestSharedExtension(object):
def test_get_shared_lib_extension(self):
import sys
ext = get_shared_lib_extension(is_python_ext=False)
if sys.platform.startswith('linux'):
assert_equal(ext, '.so')
elif sys.platform.startswith('gnukfreebsd'):
assert_equal(ext, '.so')
elif sys.platform.startswith('darwin'):
assert_equal(ext, '.dylib')
elif sys.platform.startswith('win'):
assert_equal(ext, '.dll')
assert_(get_shared_lib_extension(is_python_ext=True)) |
def get_world_size():
if (not dist.is_available()):
return 1
if (not dist.is_initialized()):
return 1
return dist.get_world_size() |
def test_resampler_last_stage_passthrough():
(X, y) = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9], n_informative=3, n_redundant=1, flip_y=0, n_features=20, n_clusters_per_class=1, n_samples=50000, random_state=0)
rus = RandomUnderSampler(random_state=42)
pipe = make_pipeline(rus, None)
pipe.fit_resample(X, y) |
def get_default_plots_list():
plots_list = [['AssA', 'DetA', 'HOTA', 'HOTA', 'geometric_mean'], ['AssPr', 'AssRe', 'HOTA', 'AssA', 'jaccard'], ['DetPr', 'DetRe', 'HOTA', 'DetA', 'jaccard'], ['HOTA(0)', 'LocA(0)', 'HOTA', 'HOTALocA(0)', 'multiplication'], ['HOTA', 'LocA', 'HOTA', None, None], ['HOTA', 'MOTA', 'HOTA', None, None], ['HOTA', 'IDF1', 'HOTA', None, None], ['IDF1', 'MOTA', 'HOTA', None, None]]
return plots_list |
def findmap(*args, **kwargs):
if (len(args) > 3):
raise TypeError(('findmap takes at most 3 positional arguments (%s given)' % len(args)))
bad_args = set(kwargs).difference(['values', 'distribution', 'domain', 'codomain', 'depth', 'max_values'])
if bad_args:
raise TypeError(("findmap got unexpected keyword arguments '%s'" % bad_args))
max_values = kwargs.get('max_values', FINDSTAT_MAX_VALUES)
depth = kwargs.get('depth', FINDSTAT_DEFAULT_DEPTH)
values = kwargs.get('values', None)
distribution = kwargs.get('distribution', None)
domain = kwargs.get('domain', None)
codomain = kwargs.get('codomain', None)
try:
max_values = int(max_values)
assert (0 <= max_values <= FINDSTAT_MAX_VALUES)
except (ValueError, AssertionError):
raise ValueError(('the maximal number of values for a FindStat query must be a non-negative integer less than or equal to %i' % FINDSTAT_MAX_VALUES))
check_collection = True
def get_values(raw, domain=None, codomain=None):
if callable(raw):
known_terms = _data_from_function(raw, domain)
if (codomain is None):
codomain = FindStatCollection(known_terms[0][1][0])
function = raw
else:
(known_terms, domain, codomain) = _data_from_iterable(raw, domain=domain, codomain=codomain, mapping=True, check=check_collection)
function = None
data = _data_from_data(known_terms, max_values)
return (known_terms, data, domain, codomain, function)
def get_distribution(raw, domain=None, codomain=None):
if callable(raw):
known_terms = _data_from_function(raw, domain)
function = raw
else:
(known_terms, domain, codomain) = _data_from_iterable(raw, domain=domain, codomain=codomain, mapping=True, check=check_collection)
function = None
data = _distribution_from_data(known_terms, domain, max_values)
return (known_terms, data, domain, codomain, function)
def is_collection(arg):
try:
FindStatCollection(arg)
return True
except ValueError:
return False
def check_domain(arg, domain):
if (domain is not None):
raise TypeError(('the domain was specified twice, as positional argument (%s) and as keyword domain=%s' % (arg, domain)))
return arg
def check_codomain(arg, codomain):
if (codomain is not None):
raise TypeError(('the codomain was specified twice, as positional argument (%s) and as keyword codomain=%s' % (arg, codomain)))
return arg
def check_values(arg, values):
if (values is not None):
raise TypeError(('values were specified twice, as positional argument (%s) and as keyword values=%s' % (arg, values)))
return arg
if ((values is not None) and (distribution is not None)):
raise ValueError('not both of `values` and `distribution` may be given for a FindStat query')
if (len(args) == 1):
if ((values is None) and (distribution is None) and (domain is None) and (codomain is None) and (isinstance(args[0], (int, Integer, FindStatCombinatorialMap)) or (isinstance(args[0], str) and (not is_collection(args[0]))))):
return FindStatMap(args[0])
if (isinstance(args[0], str) and is_collection(args[0])):
domain = check_domain(args[0], domain)
else:
values = check_values(args[0], values)
elif (len(args) == 2):
domain = check_domain(args[0], domain)
if isinstance(args[1], (int, Integer, str)):
codomain = check_codomain(args[1], codomain)
else:
values = check_values(args[1], values)
elif (len(args) == 3):
domain = check_domain(args[0], domain)
codomain = check_codomain(args[1], codomain)
values = check_values(args[2], values)
if (domain is not None):
domain = FindStatCollection(domain)
if (codomain is not None):
codomain = FindStatCollection(codomain)
if ((values is None) and (distribution is None) and ((domain is not None) or (codomain is not None))):
return FindStatMaps(domain=domain, codomain=codomain)
if (values is not None):
if isinstance(values, (int, Integer, str, FindStatCombinatorialMap)):
if ((domain is not None) or (codomain is not None)):
raise ValueError('domain and codomain must not be provided if a map identifier is given')
return FindStatMapQuery(values_of=values, depth=depth)
(known_terms, data, domain, codomain, function) = get_values(values, domain, codomain)
return FindStatMapQuery(data=data, domain=domain, codomain=codomain, depth=depth, known_terms=known_terms, function=function)
if (distribution is not None):
if isinstance(distribution, (int, Integer, str, FindStatCombinatorialMap)):
if ((domain is not None) or (codomain is not None)):
raise ValueError('domain and codomain must not be provided if a map identifier is given')
return FindStatMapQuery(distribution_of=distribution, depth=depth)
(known_terms, data, domain, function) = get_distribution(distribution, domain)
return FindStatMapQuery(data=data, domain=domain, codomain=codomain, depth=depth, known_terms=known_terms, function=function)
raise ValueError('the given arguments cannot be used for a FindStat search') |
class Reader():
def __init__(self, task: Task, *args: Any, **kwargs: Any):
self.task = task
self._roles = {}
self._dropped_features = []
self._used_array_attrs = {}
self._used_features = []
def roles(self) -> RolesDict:
return self._roles
def dropped_features(self) -> List[str]:
return self._dropped_features
def used_features(self) -> List[str]:
return self._used_features
def used_array_attrs(self) -> Dict[(str, str)]:
return self._used_array_attrs
def fit_read(self, train_data: Any, features_names: Optional[List[str]]=None, roles: UserRolesDefinition=None, **kwargs: Any):
raise NotImplementedError
def read(self, data: Any, features_names: Optional[List[str]], **kwargs: Any):
raise NotImplementedError
def upd_used_features(self, add: Optional[Sequence[str]]=None, remove: Optional[Sequence[str]]=None):
curr_feats = set(self.used_features)
if (add is not None):
curr_feats = curr_feats.union(add)
if (remove is not None):
curr_feats = (curr_feats - set(remove))
self._used_features = list(curr_feats)
def from_reader(cls, reader: 'Reader', **kwargs) -> 'Reader':
new_reader = cls(reader.task, **kwargs)
for attr in reader.__dict__:
if (attr[0] == '_'):
cls.__dict__[attr] = getattr(reader, attr)
return new_reader
def cols_by_type(self, col_type: str) -> List[str]:
names = []
for (col, role) in self.roles.items():
if (role.name == col_type):
names.append(col)
return names |
def tn(y_true, y_pred):
smooth = 1
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = (1 - y_pred_pos)
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = (1 - y_pos)
tn = ((K.sum((y_neg * y_pred_neg)) + smooth) / (K.sum(y_neg) + smooth))
return tn |
def main(as_module=False):
cli.main(args=sys.argv[1:], prog_name=('python -m flask' if as_module else None)) |
def _validate_params_axes(params_axes, params):
axis_names = flax_partitioning.get_axis_names(params_axes)
missing_params_axes = (set(traverse_util.flatten_dict(params, sep='/')) - set(traverse_util.flatten_dict(axis_names, sep='/')))
if missing_params_axes:
raise ValueError(f'Missing axis names for parameters: {missing_params_axes}') |
def load_model(ckpt_file: str, _config):
init_params = JNFExp.get_init_params(_config)
model = JNFExp.load_from_checkpoint(ckpt_file, **init_params)
model.to('cuda')
return model |
def get_mesh():
cs = 10.0
ncx = 4
ncy = 4
ncz = 4
npad = 2
return discretize.TensorMesh([[(cs, npad, (- 1.3)), (cs, ncx), (cs, npad, 1.3)], [(cs, npad, (- 1.3)), (cs, ncy), (cs, npad, 1.3)], [(cs, npad, (- 1.3)), (cs, ncz), (cs, npad, 1.3)]], 'CCC') |
def save_masks(masks, index, categories, mask_name, outdir):
masks = masks.cpu().detach().numpy()
for (i, (mask, category)) in enumerate(zip(masks, categories), start=index):
np.save(os.path.join(outdir, f'{mask_name}_{(i + 1)}_mask_{category}.npy'), mask) |
(scope='function')
def config_montecarlo_1e5_verysimple(example_configuration_dir):
return Configuration.from_yaml((example_configuration_dir / 'tardis_configv1_verysimple.yml')) |
def oracle_score(confidence: ConfidenceFeatures):
label = ConfidenceEstimator.convert_to_labels([confidence])[0]
oracle_confidence = ((label * ((np.random.random() / 2) + 0.5)) + ((1 - label) * (np.random.random() / 2)))
return oracle_confidence |
def weighted_sparse_xentropy(y_true, y_pred, weights, from_logits=False):
tshp = tf.shape(y_true)
tshp_stat = y_true.shape
y_true = tf.reshape(y_true, shape=[(- 1)])
y_pred = tf.reshape(y_pred, shape=[(- 1), y_pred.shape[(- 1)]])
weights = tf.gather(weights, tf.cast(y_true, tf.int32))
xent = backend.sparse_categorical_crossentropy(y_true, y_pred, from_logits=from_logits, axis=(- 1))
assert (weights.shape.rank == xent.shape.rank)
w_xent = (weights * xent)
w_xent = tf.reshape(w_xent, tshp)
w_xent.set_shape(tshp_stat)
return w_xent |
def phase_net_file():
with tempfile.NamedTemporaryFile(mode='w+', delete=False) as f:
f.write("name: 'pythonnet' force_backward: true\n layer { type: 'Python' name: 'layer' top: 'phase'\n python_param { module: 'test_python_layer' layer: 'PhaseLayer' } }\n ")
return f.name |
def write_vnnlib_spec(upper_bound: torch.Tensor, lower_bound: torch.Tensor, correct_label: int, path: str):
output_class = 10
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, 'w') as f:
f.write(f'''; Mnist property with label: {correct_label}.
''')
f.write(f'''; Input variables:
''')
f.write('\n')
for i in range(upper_bound.shape[1]):
f.write(f'''(declare-const X_{i} Real)
''')
f.write('\n')
f.write(f'''; Output variables:
''')
f.write('\n')
for i in range(output_class):
f.write(f'''(declare-const Y_{i} Real)
''')
f.write('\n')
f.write(f'''; Input constraints:
''')
for i in range(upper_bound.shape[1]):
f.write(f'''(assert (<= X_{i} {upper_bound[0][i].item()}))
''')
f.write(f'''(assert (>= X_{i} {lower_bound[0][i].item()}))
''')
f.write('\n')
f.write('\n')
f.write(f'''; Output constraints:
''')
f.write('(assert (or\n')
for i in range(output_class):
if (i != correct_label):
f.write(f''' (and (>= Y_{i} Y_{correct_label}))
''')
f.write('))') |
def check_acc(lag, k):
try:
if ((not isinstance(lag, int)) or (lag <= 0)):
raise ValueError('Error, parameter lag must be an int type and larger than 0.')
elif ((not isinstance(k, int)) or (lag <= 0)):
raise ValueError('Error, parameter k must be an int type and larger than 0.')
except ValueError:
raise |
def _gen_random_bool_series(size: int, random_state: Union[(int, np.random.RandomState)]=0) -> pd.Series:
rand = _resolve_random_state(random_state)
arr = rand.choice([True, False], size=size)
return pd.Series(arr) |
def rnd_uniform(low, high):
if (low == high):
return low
return np.random.uniform(low, high) |
def test_get_reference_value_3(test_case_mock):
ctx = ExecutionContext(ModuleProvider())
var_mock = MagicMock(foo=MagicMock(bar=5))
var = vr.VariableReference(test_case_mock, int)
ref = vr.FieldReference(vr.FieldReference(var, gao.GenericField(MagicMock, 'foo', int)), gao.GenericField(MagicMock, 'bar', int))
ctx._local_namespace = {ctx._variable_names.get_name(var): var_mock}
assert (ctx.get_reference_value(ref) == 5) |
def test_max_batch_size():
coords = np.random.randint(low=0, high=1848, size=(40000, 2))
tstart = time.time()
ensure_spacing(coords, spacing=100, min_split_size=50, max_split_size=2000)
dur1 = (time.time() - tstart)
tstart = time.time()
ensure_spacing(coords, spacing=100, min_split_size=50, max_split_size=20000)
dur2 = (time.time() - tstart)
assert (dur1 < (1.33 * dur2)) |
class TrackMessages(Callback):
def __init__(self, keys=['a', 'n_iter', 'direction']):
self.keys = keys
self.records = []
def __call__(self, algo, i, max_iter):
if (i == 0):
self.records = []
self.records += algo.get_edges_data(self.keys)
def get_dataframe(self):
return pd.DataFrame(self.records) |
def main():
easycase12 = set()
easycase123 = set()
easycase1234 = set()
for x in range((1 << 12)):
sizes = compute_code_point_size(x)
if easy_case12(sizes):
z1 = grab_easy_case12_code_point_size(sizes)
easycase12.add(tuple(z1))
elif easy_case123(sizes):
z1 = grab_easy_case123_code_point_size(sizes)
easycase123.add(tuple(z1))
elif easy_case1234(sizes):
z1 = grab_easy_case1234_code_point_size(sizes)
easycase1234.add(tuple(z1))
easycase12sorted = [x for x in easycase12]
easycase12sorted.sort()
easycase123sorted = [x for x in easycase123]
easycase123sorted.sort()
easycase1234sorted = [x for x in easycase1234]
easycase1234sorted.sort()
print('#include <cstdint>')
allshuf = (([buildshuf12_twobytes(z) for z in easycase12sorted] + [buildshuf123_threebytes(z) for z in easycase123sorted]) + [buildshuf1234_fourbytes(z) for z in easycase1234sorted])
print((('const uint8_t shufutf8[' + str(len(((easycase12sorted + easycase123sorted) + easycase1234sorted)))) + '][16] = '))
print(cpp_arrayarray_initializer(allshuf), end=';\n')
print((('/* number of two bytes : ' + str(len(easycase12sorted))) + ' */'))
print((('/* number of two + three bytes : ' + str(len((easycase12sorted + easycase123sorted)))) + ' */'))
print((('/* number of two + three + four bytes : ' + str(len(((easycase12sorted + easycase123sorted) + easycase1234sorted)))) + ' */'))
c = 0
index = {}
for t in ((easycase12sorted + easycase123sorted) + easycase1234sorted):
index[t] = c
c = (c + 1)
arrg = []
for x in range((1 << 12)):
sizes = compute_code_point_size(x)
if easy_case12(sizes):
z1 = grab_easy_case12_code_point_size(sizes)
idx = index[tuple(z1)]
s = sum(z1)
arrg.append((idx, s))
elif easy_case123(sizes):
z1 = grab_easy_case123_code_point_size(sizes)
idx = index[tuple(z1)]
s = sum(z1)
arrg.append((idx, s))
elif easy_case1234(sizes):
z1 = grab_easy_case1234_code_point_size(sizes)
idx = index[tuple(z1)]
s = sum(z1)
arrg.append((idx, s))
else:
arrg.append((209, 12))
print((('const uint8_t utf8bigindex[' + str(len(arrg))) + '][2] = '))
print(cpp_arrayarray_initializer(arrg), end=';\n') |
.parametrize('array', [ak.contents.NumpyArray(np.arange(4)), ak.contents.IndexedArray(ak.index.Index64(np.arange(4, dtype=np.int64)), ak.contents.NumpyArray(np.arange(4))), ak.contents.IndexedOptionArray(ak.index.Index64(np.arange(4, dtype=np.int64)), ak.contents.NumpyArray(np.arange(4))), ak.contents.ListOffsetArray(ak.index.Index64(np.arange(4, dtype=np.int64)), ak.contents.NumpyArray(np.arange(3))), ak.contents.ListArray(ak.index.Index64(np.arange(3, dtype=np.int64)), ak.index.Index64(np.arange(1, 4, dtype=np.int64)), ak.contents.NumpyArray(np.arange(3))), ak.contents.RegularArray(ak.contents.NumpyArray(np.arange(12)), size=3)])
def test_highlevel_lowlevel(array):
layout = ak.to_layout(array)
assert isinstance(ak.type(layout), ak.types.ArrayType)
assert (layout.form.type == ak.type(layout).content) |
class ConformerPositionwiseFeedForward(rf.Module):
def __init__(self, out_dim: Dim, *, ff_dim: Dim, dropout: float, activation: Callable[([Tensor], Tensor)]):
super().__init__()
self.out_dim = out_dim
self.dropout = dropout
self.dropout_broadcast = rf.dropout_broadcast_default()
self.activation = activation
self.linear_ff = rf.Linear(out_dim, ff_dim)
self.linear_out = rf.Linear(ff_dim, out_dim)
def __call__(self, inp: Tensor) -> Tensor:
x_ff1 = self.linear_ff(inp)
x_act = self.activation(x_ff1)
x_drop = rf.dropout(x_act, self.dropout, axis=(self.dropout_broadcast and self.linear_ff.out_dim))
x_ff2 = self.linear_out(x_drop)
return x_ff2 |
def main(args):
cfg = get_default_cfg()
if args.cfg_file:
cfg.merge_from_file(args.cfg_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
device = torch.device(cfg.DEVICE)
print('Creating model')
model = SeqNet(cfg)
model.to(device)
model.eval()
resume_from_ckpt(args.ckpt, model)
query_img = [F.to_tensor(Image.open('demo_imgs/query.jpg').convert('RGB')).to(device)]
query_target = [{'boxes': torch.tensor([[0, 0, 466, 943]]).to(device)}]
query_feat = model(query_img, query_target)[0]
gallery_img_paths = sorted(glob('demo_imgs/gallery-*.jpg'))
for gallery_img_path in gallery_img_paths:
print(f'Processing {gallery_img_path}')
gallery_img = [F.to_tensor(Image.open(gallery_img_path).convert('RGB')).to(device)]
gallery_output = model(gallery_img)[0]
detections = gallery_output['boxes']
gallery_feats = gallery_output['embeddings']
similarities = gallery_feats.mm(query_feat.view((- 1), 1)).squeeze()
visualize_result(gallery_img_path, detections.cpu().numpy(), similarities) |
class IInt8EntropyCalibrator2(CalibratorBase, trt.IInt8EntropyCalibrator2):
def __init__(self, *args, **kwargs):
CalibratorBase.__init__(self, *args, **kwargs)
trt.IInt8EntropyCalibrator2.__init__(self) |
def is_FriCASElement(x):
from sage.misc.superseded import deprecation
deprecation(34804, 'the function is_FriCASElement is deprecated; use isinstance(x, sage.interfaces.abc.FriCASElement) instead')
return isinstance(x, FriCASElement) |
class Random(_random.Random):
VERSION = 3
def __init__(self, x=None):
self.seed(x)
self.gauss_next = None
def seed(self, a=None):
if (a is None):
try:
a = int(_hexlify(_urandom(2500)), 16)
except NotImplementedError:
import time
a = int((time.time() * 256))
super().seed(a)
self.gauss_next = None
def getstate(self):
return (self.VERSION, super().getstate(), self.gauss_next)
def setstate(self, state):
version = state[0]
if (version == 3):
(version, internalstate, self.gauss_next) = state
super().setstate(internalstate)
elif (version == 2):
(version, internalstate, self.gauss_next) = state
try:
internalstate = tuple(((int(x) % (2 ** 32)) for x in internalstate))
except ValueError as e:
raise TypeError(e)
super().setstate(internalstate)
else:
raise ValueError(('state with version %s passed to Random.setstate() of version %s' % (version, self.VERSION)))
def jumpahead(self, n):
s = (repr(n) + repr(self.getstate()))
n = int(_hashlib.new('sha512', s).hexdigest(), 16)
super().jumpahead(n)
def __getstate__(self):
return self.getstate()
def __setstate__(self, state):
self.setstate(state)
def __reduce__(self):
return (self.__class__, (), self.getstate())
def randrange(self, start, stop=None, step=1, _int=int, _maxwidth=(1 << BPF)):
istart = _int(start)
if (istart != start):
raise ValueError('non-integer arg 1 for randrange()')
if (stop is None):
if (istart > 0):
if (istart >= _maxwidth):
return self._randbelow(istart)
return _int((self.random() * istart))
raise ValueError('empty range for randrange()')
istop = _int(stop)
if (istop != stop):
raise ValueError('non-integer stop for randrange()')
width = (istop - istart)
if ((step == 1) and (width > 0)):
if (width >= _maxwidth):
return _int((istart + self._randbelow(width)))
return _int((istart + _int((self.random() * width))))
if (step == 1):
raise ValueError(('empty range for randrange() (%d,%d, %d)' % (istart, istop, width)))
istep = _int(step)
if (istep != step):
raise ValueError('non-integer step for randrange()')
if (istep > 0):
n = (((width + istep) - 1) // istep)
elif (istep < 0):
n = (((width + istep) + 1) // istep)
else:
raise ValueError('zero step for randrange()')
if (n <= 0):
raise ValueError('empty range for randrange()')
if (n >= _maxwidth):
return (istart + (istep * self._randbelow(n)))
return (istart + (istep * _int((self.random() * n))))
def randint(self, a, b):
return self.randrange(a, (b + 1))
def _randbelow(self, n, _log=_log, _int=int, _maxwidth=(1 << BPF), _Method=_MethodType, _BuiltinMethod=_BuiltinMethodType):
try:
getrandbits = self.getrandbits
except AttributeError:
pass
else:
if ((type(self.random) is _BuiltinMethod) or (type(getrandbits) is _Method)):
k = _int((1.00001 + _log((n - 1), 2.0)))
r = getrandbits(k)
while (r >= n):
r = getrandbits(k)
return r
if (n >= _maxwidth):
_warn('Underlying random() generator does not supply \nenough bits to choose from a population range this large')
return _int((self.random() * n))
def choice(self, seq):
return seq[int((self.random() * len(seq)))]
def shuffle(self, x, random=None):
if (random is None):
random = self.random
_int = int
for i in reversed(range(1, len(x))):
j = _int((random() * (i + 1)))
(x[i], x[j]) = (x[j], x[i])
def sample(self, population, k):
n = len(population)
if (not (0 <= k <= n)):
raise ValueError('sample larger than population')
random = self.random
_int = int
result = ([None] * k)
setsize = 21
if (k > 5):
setsize += (4 ** _ceil(_log((k * 3), 4)))
if ((n <= setsize) or hasattr(population, 'keys')):
pool = list(population)
for i in range(k):
j = _int((random() * (n - i)))
result[i] = pool[j]
pool[j] = pool[((n - i) - 1)]
else:
try:
selected = set()
selected_add = selected.add
for i in range(k):
j = _int((random() * n))
while (j in selected):
j = _int((random() * n))
selected_add(j)
result[i] = population[j]
except (TypeError, KeyError):
if isinstance(population, list):
raise
return self.sample(tuple(population), k)
return result
def uniform(self, a, b):
return (a + ((b - a) * self.random()))
def triangular(self, low=0.0, high=1.0, mode=None):
u = self.random()
try:
c = (0.5 if (mode is None) else ((mode - low) / (high - low)))
except ZeroDivisionError:
return low
if (u > c):
u = (1.0 - u)
c = (1.0 - c)
(low, high) = (high, low)
return (low + ((high - low) * ((u * c) ** 0.5)))
def normalvariate(self, mu, sigma):
random = self.random
while 1:
u1 = random()
u2 = (1.0 - random())
z = ((NV_MAGICCONST * (u1 - 0.5)) / u2)
zz = ((z * z) / 4.0)
if (zz <= (- _log(u2))):
break
return (mu + (z * sigma))
def lognormvariate(self, mu, sigma):
return _exp(self.normalvariate(mu, sigma))
def expovariate(self, lambd):
return ((- _log((1.0 - self.random()))) / lambd)
def vonmisesvariate(self, mu, kappa):
random = self.random
if (kappa <= 1e-06):
return (TWOPI * random())
s = (0.5 / kappa)
r = (s + _sqrt((1.0 + (s * s))))
while 1:
u1 = random()
z = _cos((_pi * u1))
d = (z / (r + z))
u2 = random()
if ((u2 < (1.0 - (d * d))) or (u2 <= ((1.0 - d) * _exp(d)))):
break
q = (1.0 / r)
f = ((q + z) / (1.0 + (q * z)))
u3 = random()
if (u3 > 0.5):
theta = ((mu + _acos(f)) % TWOPI)
else:
theta = ((mu - _acos(f)) % TWOPI)
return theta
def gammavariate(self, alpha, beta):
if ((alpha <= 0.0) or (beta <= 0.0)):
raise ValueError('gammavariate: alpha and beta must be > 0.0')
random = self.random
if (alpha > 1.0):
ainv = _sqrt(((2.0 * alpha) - 1.0))
bbb = (alpha - LOG4)
ccc = (alpha + ainv)
while 1:
u1 = random()
if (not (1e-07 < u1 < 0.9999999)):
continue
u2 = (1.0 - random())
v = (_log((u1 / (1.0 - u1))) / ainv)
x = (alpha * _exp(v))
z = ((u1 * u1) * u2)
r = ((bbb + (ccc * v)) - x)
if ((((r + SG_MAGICCONST) - (4.5 * z)) >= 0.0) or (r >= _log(z))):
return (x * beta)
elif (alpha == 1.0):
u = random()
while (u <= 1e-07):
u = random()
return ((- _log(u)) * beta)
else:
while 1:
u = random()
b = ((_e + alpha) / _e)
p = (b * u)
if (p <= 1.0):
x = (p ** (1.0 / alpha))
else:
x = (- _log(((b - p) / alpha)))
u1 = random()
if (p > 1.0):
if (u1 <= (x ** (alpha - 1.0))):
break
elif (u1 <= _exp((- x))):
break
return (x * beta)
def gauss(self, mu, sigma):
random = self.random
z = self.gauss_next
self.gauss_next = None
if (z is None):
x2pi = (random() * TWOPI)
g2rad = _sqrt(((- 2.0) * _log((1.0 - random()))))
z = (_cos(x2pi) * g2rad)
self.gauss_next = (_sin(x2pi) * g2rad)
return (mu + (z * sigma))
def betavariate(self, alpha, beta):
y = self.gammavariate(alpha, 1.0)
if (y == 0):
return 0.0
else:
return (y / (y + self.gammavariate(beta, 1.0)))
def paretovariate(self, alpha):
u = (1.0 - self.random())
return (1.0 / pow(u, (1.0 / alpha)))
def weibullvariate(self, alpha, beta):
u = (1.0 - self.random())
return (alpha * pow((- _log(u)), (1.0 / beta))) |
(config_path='cfgs', config_name='config')
def main(cfg):
from train_robot_ssl_hand import WorkspaceIL as W
workspace = W(cfg)
if cfg.load_bc:
snapshot = Path(cfg.bc_weight)
if snapshot.exists():
print(f'resuming bc: {snapshot}')
workspace.load_snapshot(snapshot)
workspace.train_il() |
def SuffixNet(name, net, prefix_len, outputs):
outputs = BlobReferenceList(outputs)
for output in outputs:
assert net.BlobIsDefined(output)
new_net = net.Clone(name)
del new_net.Proto().op[:]
del new_net.Proto().external_input[:]
del new_net.Proto().external_output[:]
new_net.Proto().op.extend(net.Proto().op[prefix_len:])
input_names = [i for op in new_net.Proto().op for i in op.input if (not new_net.BlobIsDefined(i))]
new_net.Proto().external_input.extend(input_names)
output_names = [str(o) for o in outputs]
new_net.Proto().external_output.extend(output_names)
return (new_net, [new_net.GetBlobRef(o) for o in output_names]) |
class NezhaForMaskedLM(metaclass=DummyObject):
_backends = ['torch']
def __init__(self, *args, **kwargs):
requires_backends(self, ['torch']) |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
logger.info(f'Training/evaluation parameters {training_args}')
if ((data_args.source_prefix is None) and (model_args.model_name_or_path in ['t5-small', 't5-base', 't5-large', 't5-3b', 't5-11b'])):
logger.warning("You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with `--source_prefix 'summarize: ' `")
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((last_checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
set_seed(training_args.seed)
if (data_args.dataset_name is not None):
raw_datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir)
else:
data_files = {}
if (data_args.train_file is not None):
data_files['train'] = data_args.train_file
extension = data_args.train_file.split('.')[(- 1)]
if (data_args.validation_file is not None):
data_files['validation'] = data_args.validation_file
extension = data_args.validation_file.split('.')[(- 1)]
if (data_args.test_file is not None):
data_files['test'] = data_args.test_file
extension = data_args.test_file.split('.')[(- 1)]
raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForSeq2SeqLM.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model.resize_token_embeddings(len(tokenizer))
if ((model.config.decoder_start_token_id is None) and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast))):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.lang)
if (model.config.decoder_start_token_id is None):
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined')
if (hasattr(model.config, 'max_position_embeddings') and (model.config.max_position_embeddings < data_args.max_source_length)):
if (model_args.resize_position_embeddings is None):
logger.warning(f"Increasing the model's number of position embedding vectors from {model.config.max_position_embeddings} to {data_args.max_source_length}.")
model.resize_position_embeddings(data_args.max_source_length)
elif model_args.resize_position_embeddings:
model.resize_position_embeddings(data_args.max_source_length)
else:
raise ValueError(f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has {model.config.max_position_embeddings} position encodings. Consider either reducing `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the model's position encodings by passing `--resize_position_embeddings`.")
prefix = (data_args.source_prefix if (data_args.source_prefix is not None) else '')
if training_args.do_train:
column_names = raw_datasets['train'].column_names
elif training_args.do_eval:
column_names = raw_datasets['validation'].column_names
elif training_args.do_predict:
column_names = raw_datasets['test'].column_names
else:
logger.info('There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.')
return
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
assert (data_args.lang is not None), f'{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --lang argument'
tokenizer.src_lang = data_args.lang
tokenizer.tgt_lang = data_args.lang
forced_bos_token_id = (tokenizer.lang_code_to_id[data_args.forced_bos_token] if (data_args.forced_bos_token is not None) else None)
model.config.forced_bos_token_id = forced_bos_token_id
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
if (data_args.text_column is None):
text_column = (dataset_columns[0] if (dataset_columns is not None) else column_names[0])
else:
text_column = data_args.text_column
if (text_column not in column_names):
raise ValueError(f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}")
if (data_args.summary_column is None):
summary_column = (dataset_columns[1] if (dataset_columns is not None) else column_names[1])
else:
summary_column = data_args.summary_column
if (summary_column not in column_names):
raise ValueError(f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}")
max_target_length = data_args.max_target_length
padding = ('max_length' if data_args.pad_to_max_length else False)
if ((training_args.label_smoothing_factor > 0) and (not hasattr(model, 'prepare_decoder_input_ids_from_labels'))):
logger.warning(f'label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory')
def preprocess_function(examples):
(inputs, targets) = ([], [])
for i in range(len(examples[text_column])):
if ((examples[text_column][i] is not None) and (examples[summary_column][i] is not None)):
inputs.append(examples[text_column][i])
targets.append(examples[summary_column][i])
inputs = examples[text_column]
targets = examples[summary_column]
inputs = [(prefix + inp) for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
if ((padding == 'max_length') and data_args.ignore_pad_token_for_loss):
labels['input_ids'] = [[(l if (l != tokenizer.pad_token_id) else (- 100)) for l in label] for label in labels['input_ids']]
model_inputs['labels'] = labels['input_ids']
return model_inputs
if training_args.do_train:
if ('train' not in raw_datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = raw_datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
with training_args.main_process_first(desc='train dataset map pre-processing'):
train_dataset = train_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on train dataset')
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
if ('validation' not in raw_datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = raw_datasets['validation']
if (data_args.max_eval_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_eval_samples))
with training_args.main_process_first(desc='validation dataset map pre-processing'):
eval_dataset = eval_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on validation dataset')
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
if ('test' not in raw_datasets):
raise ValueError('--do_predict requires a test dataset')
predict_dataset = raw_datasets['test']
if (data_args.max_predict_samples is not None):
predict_dataset = predict_dataset.select(range(data_args.max_predict_samples))
with training_args.main_process_first(desc='prediction dataset map pre-processing'):
predict_dataset = predict_dataset.map(preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=(not data_args.overwrite_cache), desc='Running tokenizer on prediction dataset')
label_pad_token_id = ((- 100) if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=(8 if training_args.fp16 else None))
metric = load_metric('rouge')
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
preds = ['\n'.join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ['\n'.join(nltk.sent_tokenize(label)) for label in labels]
return (preds, labels)
def compute_metrics(eval_preds):
(preds, labels) = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
if data_args.ignore_pad_token_for_loss:
labels = np.where((labels != (- 100)), labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
(decoded_preds, decoded_labels) = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
result = {key: (value.mid.fmeasure * 100) for (key, value) in result.items()}
prediction_lens = [np.count_nonzero((pred != tokenizer.pad_token_id)) for pred in preds]
result['gen_len'] = np.mean(prediction_lens)
result = {k: round(v, 4) for (k, v) in result.items()}
return result
trainer = Seq2SeqTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=(compute_metrics if training_args.predict_with_generate else None))
if training_args.do_train:
checkpoint = None
if (training_args.resume_from_checkpoint is not None):
checkpoint = training_args.resume_from_checkpoint
elif (last_checkpoint is not None):
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model()
metrics = train_result.metrics
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
results = {}
max_length = (training_args.generation_max_length if (training_args.generation_max_length is not None) else data_args.val_max_target_length)
num_beams = (data_args.num_beams if (data_args.num_beams is not None) else training_args.generation_num_beams)
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix='eval')
max_eval_samples = (data_args.max_eval_samples if (data_args.max_eval_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
predict_results = trainer.predict(predict_dataset, metric_key_prefix='predict', max_length=max_length, num_beams=num_beams)
metrics = predict_results.metrics
max_predict_samples = (data_args.max_predict_samples if (data_args.max_predict_samples is not None) else len(predict_dataset))
metrics['predict_samples'] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics('predict', metrics)
trainer.save_metrics('predict', metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = tokenizer.batch_decode(predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True)
predictions = [pred.strip() for pred in predictions]
output_prediction_file = os.path.join(training_args.output_dir, 'generated_predictions.txt')
with open(output_prediction_file, 'w') as writer:
writer.write('\n'.join(predictions))
kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'summarization'}
if (data_args.dataset_name is not None):
kwargs['dataset_tags'] = data_args.dataset_name
if (data_args.dataset_config_name is not None):
kwargs['dataset_args'] = data_args.dataset_config_name
kwargs['dataset'] = f'{data_args.dataset_name} {data_args.dataset_config_name}'
else:
kwargs['dataset'] = data_args.dataset_name
if (data_args.lang is not None):
kwargs['language'] = data_args.lang
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results |
def build_dataloader(dataset, image_set, cfg):
dataloader = DataLoader(dataset=dataset, batch_size=cfg['DATA'][image_set.upper()]['BATCH_SIZE'], shuffle=(image_set == 'train'), num_workers=cfg['DATA']['NUM_WORKER'])
return dataloader |
def infer_tasklet_connectors(sdfg: SDFG, state: SDFGState, node: Tasklet, inferred: TypeInferenceDict):
if (node.code.language != dtypes.Language.Python):
raise NotImplementedError('Tasklet inference for other languages than Python not supported')
if any(((inferred[(node, conn, True)].type is None) for conn in node.in_connectors)):
raise TypeError(('Cannot infer output connectors of tasklet "%s", not all input connectors have types' % str(node)))
from dace.codegen.tools.type_inference import infer_types
syms = state.symbols_defined_at(node)
in_syms = {}
for conn in node.in_connectors:
if (inferred[(node, conn, True)].type is not None):
in_syms[conn] = inferred[(node, conn, True)]
else:
in_syms[conn] = node.in_connectors[conn]
syms.update(in_syms)
new_syms = infer_types(node.code.code, syms)
for cname in node.out_connectors:
if (inferred[(node, cname, False)].type is None):
if (cname not in new_syms):
raise TypeError(('Cannot infer type of tasklet %s output "%s", please specify manually.' % (node.label, cname)))
inferred[(node, cname, False)] = new_syms[cname] |
class TFBertForNextSentencePrediction():
def __init__(self, *args, **kwargs):
requires_tf(self) |
class _operation_layer():
def __init__(self, layer: LayerPQC, num_layers: int=1, layer_number: int=1) -> None:
self.layer = layer
self.num_layers = num_layers
self.layer_number = layer_number
def change_qubits(self, value):
for operation in self.layer.operation_list:
var_group_tuple = operation.variablegroup_tuple
operation.num_qubits = value
if (var_group_tuple != None):
if (operation.ent_strategy == None):
for var_group in var_group_tuple:
var_group.increase_used_number_of_variables((self.num_layers * (value - self.layer.num_qubits)))
elif (operation.ent_strategy == 'NN'):
for var_group in var_group_tuple:
var_group.increase_used_number_of_variables((self.num_layers * (value - self.layer.num_qubits)))
else:
for var_group in var_group_tuple:
old_num_of_variables = sum((x for x in range(1, self.layer.num_qubits)))
new_num_of_variables = sum((x for x in range(1, value)))
var_group.increase_used_number_of_variables((self.num_layers * (new_num_of_variables - old_num_of_variables)))
self.layer._num_qubits = value
def change_num_layers(self, value):
num_layers_difference = (value - self.num_layers)
num_qubits = self.layer.num_qubits
for layer_operation in self.layer.operation_list:
variablegroup_tuple = layer_operation.variablegroup_tuple
if (variablegroup_tuple != None):
if (layer_operation.ent_strategy == None):
number_of_variables = num_qubits
elif (layer_operation.ent_strategy == 'NN'):
number_of_variables = (num_qubits - 1)
else:
number_of_variables = sum((x for x in range(1, num_qubits)))
variable_num_list = [(num_layers_difference * number_of_variables) for i in range(len(variablegroup_tuple))]
iteration_counter = 0
for variablegroup in variablegroup_tuple:
variablegroup.increase_used_number_of_variables(variable_num_list[iteration_counter])
iteration_counter += 1
self.num_layers = value |
def conv2d_transpose_strided(x, W, b, output_shape=None, stride=2):
if (output_shape is None):
output_shape = x.get_shape().as_list()
output_shape[1] *= 2
output_shape[2] *= 2
output_shape[3] = W.get_shape().as_list()[2]
conv = tf.nn.conv2d_transpose(x, W, output_shape, strides=[1, stride, stride, 1], padding='SAME')
return tf.nn.bias_add(conv, b) |
def test():
assert ak.almost_equal(ak.concatenate([ak.Array([1, 2, 3]), ak.Array([1, 2, None])]), ak.contents.ByteMaskedArray(ak.index.Index8(np.array([False, False, False, False, False, True])), ak.contents.NumpyArray(np.array([1, 2, 3, 1, 2, 3], dtype=np.int64)), valid_when=False)) |
def longest_common_prefix(strings):
if (not strings):
return ''
min_s = min(strings)
max_s = max(strings)
if (not min_s):
return ''
for i in range(len(min_s)):
if (max_s[i] != min_s[i]):
return max_s[:i]
return min_s[:] |
class LmDataset(CachedDataset2):
def __init__(self, corpus_file, skip_empty_lines=True, orth_symbols_file=None, orth_symbols_map_file=None, orth_replace_map_file=None, word_based=False, word_end_symbol=None, seq_end_symbol='[END]', unknown_symbol='[UNKNOWN]', parse_orth_opts=None, phone_info=None, add_random_phone_seqs=0, auto_replace_unknown_symbol=False, log_auto_replace_unknown_symbols=10, log_skipped_seqs=10, error_on_invalid_seq=True, add_delayed_seq_data=False, delayed_seq_data_start_symbol='[START]', **kwargs):
super(LmDataset, self).__init__(**kwargs)
if callable(corpus_file):
corpus_file = corpus_file()
if callable(orth_symbols_file):
orth_symbols_file = orth_symbols_file()
if callable(orth_symbols_map_file):
orth_symbols_map_file = orth_symbols_map_file()
if callable(orth_replace_map_file):
orth_replace_map_file = orth_replace_map_file()
print('LmDataset, loading file', corpus_file, file=log.v4)
self.word_based = word_based
self.word_end_symbol = word_end_symbol
self.seq_end_symbol = seq_end_symbol
self.unknown_symbol = unknown_symbol
self.parse_orth_opts = (parse_orth_opts or {})
self.parse_orth_opts.setdefault('word_based', self.word_based)
if (self.word_end_symbol and (not self.word_based)):
self.parse_orth_opts.setdefault('postfix', ([self.word_end_symbol, self.seq_end_symbol] if (self.seq_end_symbol is not None) else [self.word_end_symbol]))
else:
self.parse_orth_opts.setdefault('postfix', ([self.seq_end_symbol] if (self.seq_end_symbol is not None) else []))
if orth_symbols_file:
assert (not phone_info)
assert (not orth_symbols_map_file)
orth_symbols = open(orth_symbols_file).read().splitlines()
self.orth_symbols_map = {sym: i for (i, sym) in enumerate(orth_symbols)}
self.orth_symbols = orth_symbols
self.labels['data'] = orth_symbols
self.seq_gen = None
if (orth_symbols_map_file and orth_symbols_map_file.endswith('.pkl')):
import pickle
with open(orth_symbols_map_file, 'rb') as f:
self.orth_symbols_map = pickle.load(f)
self.orth_symbols = self.orth_symbols_map.keys()
reverse_map = {i: sym for (sym, i) in sorted(self.orth_symbols_map.items())}
self.labels['data'] = [sym for (i, sym) in sorted(reverse_map.items())]
self.seq_gen = None
elif orth_symbols_map_file:
assert (not phone_info)
with open(orth_symbols_map_file, 'r') as f:
test_string = f.read(1024).replace(' ', '').replace('\n', '')
match = re.search('^{["\'].+["\']:[0-9]+,', test_string)
f.seek(0)
if (match is not None):
d = literal_eval(f.read())
orth_symbols_imap_list = [(int(v), k) for (k, v) in d.items()]
orth_symbols_imap_list.sort()
else:
orth_symbols_imap_list = [(int(b), a) for (a, b) in [line.split(None, 1) for line in f.read().splitlines()]]
orth_symbols_imap_list.sort()
assert (orth_symbols_imap_list[0][0] == 0)
self.orth_symbols_map = {sym: i for (i, sym) in orth_symbols_imap_list}
self.orth_symbols = [sym for (i, sym) in orth_symbols_imap_list]
reverse_map = {i: sym for (i, sym) in orth_symbols_imap_list}
self.labels['data'] = [sym for (i, sym) in sorted(reverse_map.items())]
self.seq_gen = None
else:
assert (not orth_symbols_file)
assert isinstance(phone_info, dict)
self.seq_gen = PhoneSeqGenerator(**phone_info)
self.orth_symbols = None
self.labels['data'] = self.seq_gen.get_class_labels()
if orth_replace_map_file:
orth_replace_map = load_json(filename=orth_replace_map_file)
assert isinstance(orth_replace_map, dict)
self.orth_replace_map = {key: parse_orthography_into_symbols(v, word_based=self.word_based) for (key, v) in orth_replace_map.items()}
if self.orth_replace_map:
if (len(self.orth_replace_map) <= 5):
print((' orth_replace_map: %r' % self.orth_replace_map), file=log.v5)
else:
print((' orth_replace_map: %i entries' % len(self.orth_replace_map)), file=log.v5)
else:
self.orth_replace_map = {}
if (word_end_symbol and (not word_based)):
self.orth_replace_map[' '] = [word_end_symbol]
num_labels = len(self.labels['data'])
use_uint_types = False
if BackendEngine.is_tensorflow_selected():
use_uint_types = True
if (num_labels <= (2 ** 7)):
self.dtype = 'int8'
elif ((num_labels <= (2 ** 8)) and use_uint_types):
self.dtype = 'uint8'
elif (num_labels <= (2 ** 31)):
self.dtype = 'int32'
elif ((num_labels <= (2 ** 32)) and use_uint_types):
self.dtype = 'uint32'
elif (num_labels <= (2 ** 61)):
self.dtype = 'int64'
elif ((num_labels <= (2 ** 62)) and use_uint_types):
self.dtype = 'uint64'
else:
raise Exception(('cannot handle so much labels: %i' % num_labels))
self.num_outputs = {'data': [num_labels, 1]}
self.num_inputs = num_labels
self.seq_order = None
self._tag_prefix = 'line-'
self.auto_replace_unknown_symbol = auto_replace_unknown_symbol
self.log_auto_replace_unknown_symbols = log_auto_replace_unknown_symbols
self.log_skipped_seqs = log_skipped_seqs
self.error_on_invalid_seq = error_on_invalid_seq
self.add_random_phone_seqs = add_random_phone_seqs
for i in range(add_random_phone_seqs):
self.num_outputs[('random%i' % i)] = self.num_outputs['data']
self.add_delayed_seq_data = add_delayed_seq_data
self.delayed_seq_data_start_symbol = delayed_seq_data_start_symbol
if add_delayed_seq_data:
self.num_outputs['delayed'] = self.num_outputs['data']
self.labels['delayed'] = self.labels['data']
if isinstance(corpus_file, list):
self.orths = []
for file_name in corpus_file:
self.orths += read_corpus(file_name, skip_empty_lines=skip_empty_lines)
else:
self.orths = read_corpus(corpus_file, skip_empty_lines=skip_empty_lines)
self._estimated_num_seqs = (len(self.orths) // self.partition_epoch)
print((' done, loaded %i sequences' % len(self.orths)), file=log.v4)
self.next_orth_idx = 0
self.next_seq_idx = 0
self.num_skipped = 0
self.num_unknown = 0
def get_data_keys(self):
return sorted(self.num_outputs.keys())
def get_target_list(self):
return ['data']
def get_data_dtype(self, key):
return self.dtype
def init_seq_order(self, epoch=None, seq_list=None, seq_order=None):
if (seq_list and (not self.error_on_invalid_seq)):
print('Setting error_on_invalid_seq to True since a seq_list is given. Please activate auto_replace_unknown_symbol if you want to prevent invalid sequences!', file=log.v4)
self.error_on_invalid_seq = True
super(LmDataset, self).init_seq_order(epoch=epoch, seq_list=seq_list, seq_order=seq_order)
if (seq_order is not None):
self.seq_order = seq_order
elif (seq_list is not None):
self.seq_order = [int(s[len(self._tag_prefix):]) for s in seq_list]
else:
self.seq_order = self.get_seq_order_for_epoch(epoch=epoch, num_seqs=len(self.orths), get_seq_len=(lambda i: len(self.orths[i])))
self.next_orth_idx = 0
self.next_seq_idx = 0
self.num_skipped = 0
self.num_unknown = 0
if self.seq_gen:
self.seq_gen.random_seed(self._get_random_seed_for_epoch(epoch))
return True
def supports_seq_order_sorting(self) -> bool:
return True
def get_total_num_seqs(self) -> int:
return len(self.orths)
def _reduce_log_skipped_seqs(self):
if isinstance(self.log_skipped_seqs, bool):
return
assert isinstance(self.log_skipped_seqs, int)
assert (self.log_skipped_seqs >= 1)
self.log_skipped_seqs -= 1
if (not self.log_skipped_seqs):
print('LmDataset: will stop logging about skipped sequences now', file=log.v4)
def _reduce_log_auto_replace_unknown_symbols(self):
if isinstance(self.log_auto_replace_unknown_symbols, bool):
return
assert isinstance(self.log_auto_replace_unknown_symbols, int)
assert (self.log_auto_replace_unknown_symbols >= 1)
self.log_auto_replace_unknown_symbols -= 1
if (not self.log_auto_replace_unknown_symbols):
print('LmDataset: will stop logging about auto-replace with unknown symbol now', file=log.v4)
def _collect_single_seq(self, seq_idx):
while True:
if (self.next_orth_idx >= len(self.seq_order)):
assert (self.next_seq_idx <= seq_idx), 'We expect that we iterate through all seqs.'
if (self.num_skipped > 0):
print(('LmDataset: reached end, skipped %i sequences' % self.num_skipped))
return None
assert (self.next_seq_idx == seq_idx), 'We expect that we iterate through all seqs.'
true_idx = self.seq_order[self.next_orth_idx]
orth = self.orths[true_idx]
seq_tag = (self._tag_prefix + str(true_idx))
self.next_orth_idx += 1
if (orth == '</s>'):
continue
if self.seq_gen:
try:
phones = self.seq_gen.generate_seq(orth)
except KeyError as e:
if self.log_skipped_seqs:
print(('LmDataset: skipping sequence %r because of missing lexicon entry: %s' % (orth, e)), file=log.v4)
self._reduce_log_skipped_seqs()
if self.error_on_invalid_seq:
raise Exception(('LmDataset: invalid seq %r, missing lexicon entry %r' % (orth, e)))
self.num_skipped += 1
continue
data = self.seq_gen.seq_to_class_idxs(phones, dtype=self.dtype)
elif self.orth_symbols:
orth_syms = parse_orthography(orth, **self.parse_orth_opts)
while True:
orth_syms = sum([self.orth_replace_map.get(s, [s]) for s in orth_syms], [])
i = 0
space_symbol = (self.word_end_symbol if (self.word_end_symbol and (not self.word_based)) else ' ')
while (i < (len(orth_syms) - 1)):
if (orth_syms[i:(i + 2)] == [space_symbol, space_symbol]):
orth_syms[i:(i + 2)] = [space_symbol]
else:
i += 1
if self.auto_replace_unknown_symbol:
try:
list(map(self.orth_symbols_map.__getitem__, orth_syms))
except KeyError as e:
if (sys.version_info >= (3, 0)):
orth_sym = e.args[0]
else:
orth_sym = e.message
if self.log_auto_replace_unknown_symbols:
print(('LmDataset: unknown orth symbol %r, adding to orth_replace_map as %r' % (orth_sym, self.unknown_symbol)), file=log.v3)
self._reduce_log_auto_replace_unknown_symbols()
self.orth_replace_map[orth_sym] = ([self.unknown_symbol] if (self.unknown_symbol is not None) else [])
continue
break
self.num_unknown += orth_syms.count(self.unknown_symbol)
if self.word_based:
orth_debug_str = repr(orth_syms)
else:
orth_debug_str = repr(''.join(orth_syms))
try:
data = numpy.array(list(map(self.orth_symbols_map.__getitem__, orth_syms)), dtype=self.dtype)
except KeyError as e:
if self.log_skipped_seqs:
print(('LmDataset: skipping sequence %s because of missing orth symbol: %s' % (orth_debug_str, e)), file=log.v4)
self._reduce_log_skipped_seqs()
if self.error_on_invalid_seq:
raise Exception(('LmDataset: invalid seq %s, missing orth symbol %s' % (orth_debug_str, e)))
self.num_skipped += 1
continue
else:
assert False
targets = {}
for i in range(self.add_random_phone_seqs):
assert self.seq_gen
phones = self.seq_gen.generate_garbage_seq(target_len=data.shape[0])
targets[('random%i' % i)] = self.seq_gen.seq_to_class_idxs(phones, dtype=self.dtype)
if self.add_delayed_seq_data:
targets['delayed'] = numpy.concatenate(([self.orth_symbols_map[self.delayed_seq_data_start_symbol]], data[:(- 1)])).astype(self.dtype)
assert (targets['delayed'].shape == data.shape)
self.next_seq_idx = (seq_idx + 1)
return DatasetSeq(seq_idx=seq_idx, features=data, targets=targets, seq_tag=seq_tag) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.