code stringlengths 281 23.7M |
|---|
class DictionaryParsingTests(unittest.TestCase):
simple_dict_values = [('Test-String', 1, 'string'), ('Test-Octets', 2, 'octets'), ('Test-Integer', 3, 'integer'), ('Test-Ip-Address', 4, 'ipaddr'), ('Test-Ipv6-Address', 5, 'ipv6addr'), ('Test-If-Id', 6, 'ifid'), ('Test-Date', 7, 'date'), ('Test-Abinary', 8, 'abinary'), ('Test-Tlv', 9, 'tlv'), ('Test-Tlv-Str', 1, 'string'), ('Test-Tlv-Int', 2, 'integer'), ('Test-Integer64', 10, 'integer64'), ('Test-Integer64-Hex', 10, 'integer64'), ('Test-Integer64-Oct', 10, 'integer64')]
def setUp(self):
self.path = os.path.join(home, 'data')
self.dict = Dictionary(os.path.join(self.path, 'simple'))
def testParseEmptyDictionary(self):
dict = Dictionary(StringIO(''))
self.assertEqual(len(dict), 0)
def testParseMultipleDictionaries(self):
dict = Dictionary(StringIO(''))
self.assertEqual(len(dict), 0)
one = StringIO('ATTRIBUTE Test-First 1 string')
two = StringIO('ATTRIBUTE Test-Second 2 string')
dict = Dictionary(StringIO(''), one, two)
self.assertEqual(len(dict), 2)
def testParseSimpleDictionary(self):
self.assertEqual(len(self.dict), len(self.simple_dict_values))
for (attr, code, type) in self.simple_dict_values:
attr = self.dict[attr]
self.assertEqual(attr.code, code)
self.assertEqual(attr.type, type)
def testAttributeTooFewColumnsError(self):
try:
self.dict.ReadDictionary(StringIO('ATTRIBUTE Oops-Too-Few-Columns'))
except ParseError as e:
self.assertEqual(('attribute' in str(e)), True)
else:
self.fail()
def testAttributeUnknownTypeError(self):
try:
self.dict.ReadDictionary(StringIO('ATTRIBUTE Test-Type 1 dummy'))
except ParseError as e:
self.assertEqual(('dummy' in str(e)), True)
else:
self.fail()
def testAttributeUnknownVendorError(self):
try:
self.dict.ReadDictionary(StringIO('ATTRIBUTE Test-Type 1 Simplon'))
except ParseError as e:
self.assertEqual(('Simplon' in str(e)), True)
else:
self.fail()
def testAttributeOptions(self):
self.dict.ReadDictionary(StringIO('ATTRIBUTE Option-Type 1 string has_tag,encrypt=1'))
self.assertEqual(self.dict['Option-Type'].has_tag, True)
self.assertEqual(self.dict['Option-Type'].encrypt, 1)
def testAttributeEncryptionError(self):
try:
self.dict.ReadDictionary(StringIO('ATTRIBUTE Test-Type 1 string encrypt=4'))
except ParseError as e:
self.assertEqual(('encrypt' in str(e)), True)
else:
self.fail()
def testValueTooFewColumnsError(self):
try:
self.dict.ReadDictionary(StringIO('VALUE Oops-Too-Few-Columns'))
except ParseError as e:
self.assertEqual(('value' in str(e)), True)
else:
self.fail()
def testValueForUnknownAttributeError(self):
try:
self.dict.ReadDictionary(StringIO('VALUE Test-Attribute Test-Text 1'))
except ParseError as e:
self.assertEqual(('unknown attribute' in str(e)), True)
else:
self.fail()
def testIntegerValueParsing(self):
self.assertEqual(len(self.dict['Test-Integer'].values), 0)
self.dict.ReadDictionary(StringIO('VALUE Test-Integer Value-Six 5'))
self.assertEqual(len(self.dict['Test-Integer'].values), 1)
self.assertEqual(DecodeAttr('integer', self.dict['Test-Integer'].values['Value-Six']), 5)
def testInteger64ValueParsing(self):
self.assertEqual(len(self.dict['Test-Integer64'].values), 0)
self.dict.ReadDictionary(StringIO('VALUE Test-Integer64 Value-Six 5'))
self.assertEqual(len(self.dict['Test-Integer64'].values), 1)
self.assertEqual(DecodeAttr('integer64', self.dict['Test-Integer64'].values['Value-Six']), 5)
def testStringValueParsing(self):
self.assertEqual(len(self.dict['Test-String'].values), 0)
self.dict.ReadDictionary(StringIO('VALUE Test-String Value-Custard custardpie'))
self.assertEqual(len(self.dict['Test-String'].values), 1)
self.assertEqual(DecodeAttr('string', self.dict['Test-String'].values['Value-Custard']), 'custardpie')
def testOctetValueParsing(self):
self.assertEqual(len(self.dict['Test-Octets'].values), 0)
self.dict.ReadDictionary(StringIO('ATTRIBUTE Test-Octets 1 octets\nVALUE Test-Octets Value-A 65\nVALUE Test-Octets Value-B 0x42\n'))
self.assertEqual(len(self.dict['Test-Octets'].values), 2)
self.assertEqual(DecodeAttr('octets', self.dict['Test-Octets'].values['Value-A']), b'A')
self.assertEqual(DecodeAttr('octets', self.dict['Test-Octets'].values['Value-B']), b'B')
def testTlvParsing(self):
self.assertEqual(len(self.dict['Test-Tlv'].sub_attributes), 2)
self.assertEqual(self.dict['Test-Tlv'].sub_attributes, {1: 'Test-Tlv-Str', 2: 'Test-Tlv-Int'})
def testSubTlvParsing(self):
for (attr, _, _) in self.simple_dict_values:
if attr.startswith('Test-Tlv-'):
self.assertEqual(self.dict[attr].is_sub_attribute, True)
self.assertEqual(self.dict[attr].parent, self.dict['Test-Tlv'])
else:
self.assertEqual(self.dict[attr].is_sub_attribute, False)
self.assertEqual(self.dict[attr].parent, None)
full_dict = Dictionary(os.path.join(self.path, 'full'))
self.assertEqual(full_dict['Simplon-Tlv-Str'].is_sub_attribute, True)
self.assertEqual(full_dict['Simplon-Tlv-Str'].parent, full_dict['Simplon-Tlv'])
self.assertEqual(full_dict['Simplon-Tlv-Int'].is_sub_attribute, True)
self.assertEqual(full_dict['Simplon-Tlv-Int'].parent, full_dict['Simplon-Tlv'])
def testVenderTooFewColumnsError(self):
try:
self.dict.ReadDictionary(StringIO('VENDOR Simplon'))
except ParseError as e:
self.assertEqual(('vendor' in str(e)), True)
else:
self.fail()
def testVendorParsing(self):
self.assertRaises(ParseError, self.dict.ReadDictionary, StringIO('ATTRIBUTE Test-Type 1 integer Simplon'))
self.dict.ReadDictionary(StringIO('VENDOR Simplon 42'))
self.assertEqual(self.dict.vendors['Simplon'], 42)
self.dict.ReadDictionary(StringIO('ATTRIBUTE Test-Type 1 integer Simplon'))
self.assertEqual(self.dict.attrindex['Test-Type'], (42, 1))
def testVendorOptionError(self):
self.assertRaises(ParseError, self.dict.ReadDictionary, StringIO('ATTRIBUTE Test-Type 1 integer Simplon'))
try:
self.dict.ReadDictionary(StringIO('VENDOR Simplon 42 badoption'))
except ParseError as e:
self.assertEqual(('option' in str(e)), True)
else:
self.fail()
def testVendorFormatError(self):
self.assertRaises(ParseError, self.dict.ReadDictionary, StringIO('ATTRIBUTE Test-Type 1 integer Simplon'))
try:
self.dict.ReadDictionary(StringIO('VENDOR Simplon 42 format=5,4'))
except ParseError as e:
self.assertEqual(('format' in str(e)), True)
else:
self.fail()
def testVendorFormatSyntaxError(self):
self.assertRaises(ParseError, self.dict.ReadDictionary, StringIO('ATTRIBUTE Test-Type 1 integer Simplon'))
try:
self.dict.ReadDictionary(StringIO('VENDOR Simplon 42 format=a,1'))
except ParseError as e:
self.assertEqual(('Syntax' in str(e)), True)
else:
self.fail()
def testBeginVendorTooFewColumns(self):
try:
self.dict.ReadDictionary(StringIO('BEGIN-VENDOR'))
except ParseError as e:
self.assertEqual(('begin-vendor' in str(e)), True)
else:
self.fail()
def testBeginVendorUnknownVendor(self):
try:
self.dict.ReadDictionary(StringIO('BEGIN-VENDOR Simplon'))
except ParseError as e:
self.assertEqual(('Simplon' in str(e)), True)
else:
self.fail()
def testBeginVendorParsing(self):
self.dict.ReadDictionary(StringIO('VENDOR Simplon 42\nBEGIN-VENDOR Simplon\nATTRIBUTE Test-Type 1 integer'))
self.assertEqual(self.dict.attrindex['Test-Type'], (42, 1))
def testEndVendorUnknownVendor(self):
try:
self.dict.ReadDictionary(StringIO('END-VENDOR'))
except ParseError as e:
self.assertEqual(('end-vendor' in str(e)), True)
else:
self.fail()
def testEndVendorUnbalanced(self):
try:
self.dict.ReadDictionary(StringIO('VENDOR Simplon 42\nBEGIN-VENDOR Simplon\nEND-VENDOR Oops\n'))
except ParseError as e:
self.assertEqual(('Oops' in str(e)), True)
else:
self.fail()
def testEndVendorParsing(self):
self.dict.ReadDictionary(StringIO('VENDOR Simplon 42\nBEGIN-VENDOR Simplon\nEND-VENDOR Simplon\nATTRIBUTE Test-Type 1 integer'))
self.assertEqual(self.dict.attrindex['Test-Type'], 1)
def testInclude(self):
try:
self.dict.ReadDictionary(StringIO('$INCLUDE this_file_does_not_exist\nVENDOR Simplon 42\nBEGIN-VENDOR Simplon\nEND-VENDOR Simplon\nATTRIBUTE Test-Type 1 integer'))
except IOError as e:
self.assertEqual(('this_file_does_not_exist' in str(e)), True)
else:
self.fail()
def testDictFilePostParse(self):
f = DictFile(StringIO('VENDOR Simplon 42\n'))
for _ in f:
pass
self.assertEqual(f.File(), '')
self.assertEqual(f.Line(), (- 1))
def testDictFileParseError(self):
tmpdict = Dictionary()
try:
tmpdict.ReadDictionary(os.path.join(self.path, 'dictfiletest'))
except ParseError as e:
self.assertEqual(('dictfiletest' in str(e)), True)
else:
self.fail() |
def do_train(cfg, model, resume=False):
model = check_if_freeze_model(model, cfg)
model.train()
if cfg.SOLVER.USE_CUSTOM_SOLVER:
optimizer = build_custom_optimizer(cfg, model)
else:
assert (cfg.SOLVER.OPTIMIZER == 'SGD')
assert (cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE != 'full_model')
assert (cfg.SOLVER.BACKBONE_MULTIPLIER == 1.0)
optimizer = build_optimizer(cfg, model)
scheduler = build_lr_scheduler(cfg, optimizer)
checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR, optimizer=optimizer, scheduler=scheduler)
start_iter = (checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=resume).get('iteration', (- 1)) + 1)
if (not resume):
start_iter = 0
max_iter = (cfg.SOLVER.MAX_ITER if (cfg.SOLVER.TRAIN_ITER < 0) else cfg.SOLVER.TRAIN_ITER)
periodic_checkpointer = PeriodicCheckpointer(checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD, max_iter=max_iter)
writers = ([CommonMetricPrinter(max_iter), JSONWriter(os.path.join(cfg.OUTPUT_DIR, 'metrics.json')), TensorboardXWriter(cfg.OUTPUT_DIR)] if comm.is_main_process() else [])
DatasetMapperClass = (GTRDatasetMapper if cfg.VIDEO_INPUT else CustomDatasetMapper)
mapper = DatasetMapperClass(cfg, True, augmentations=build_custom_augmentation(cfg, True))
if cfg.VIDEO_INPUT:
data_loader = build_gtr_train_loader(cfg, mapper=mapper)
else:
data_loader = build_custom_train_loader(cfg, mapper=mapper)
logger.info('Starting training from iteration {}'.format(start_iter))
with EventStorage(start_iter) as storage:
step_timer = Timer()
data_timer = Timer()
start_time = time.perf_counter()
for (data, iteration) in zip(data_loader, range(start_iter, max_iter)):
data_time = data_timer.seconds()
storage.put_scalars(data_time=data_time)
step_timer.reset()
iteration = (iteration + 1)
storage.step()
loss_dict = model(data)
losses = sum((loss for (k, loss) in loss_dict.items() if ('loss' in k)))
assert torch.isfinite(losses).all(), loss_dict
loss_dict_reduced = {k: v.item() for (k, v) in comm.reduce_dict(loss_dict).items()}
losses_reduced = sum((loss for (k, loss) in loss_dict_reduced.items() if ('loss' in k)))
if comm.is_main_process():
storage.put_scalars(total_loss=losses_reduced, **loss_dict_reduced)
optimizer.zero_grad()
losses.backward()
optimizer.step()
storage.put_scalar('lr', optimizer.param_groups[0]['lr'], smoothing_hint=False)
step_time = step_timer.seconds()
storage.put_scalars(time=step_time)
data_timer.reset()
scheduler.step()
if ((cfg.TEST.EVAL_PERIOD > 0) and ((iteration % cfg.TEST.EVAL_PERIOD) == 0) and (iteration != max_iter)):
do_test(cfg, model)
comm.synchronize()
if (((iteration - start_iter) > 5) and (((iteration % 20) == 0) or (iteration == max_iter))):
for writer in writers:
writer.write()
periodic_checkpointer.step(iteration)
total_time = (time.perf_counter() - start_time)
logger.info('Total training time: {}'.format(str(datetime.timedelta(seconds=int(total_time))))) |
def test_convoluted_quantities_units(*args, **kwargs):
from radis.test.utils import getTestFile
s = load_spec(getTestFile('CO_Tgas1500K_mole_fraction0.5.spec'), binary=True)
s.update(verbose=False)
assert (s.units['radiance_noslit'] == 'mW/cm2/sr/nm')
assert (s.units['transmittance_noslit'] == '')
s.apply_slit(0.5, norm_by='area', verbose=False)
assert (s.units['radiance'] == 'mW/cm2/sr/nm')
assert (s.units['transmittance'] == '')
s.apply_slit(0.5, norm_by='max', verbose=False)
assert is_homogeneous(s.units['radiance'], 'mW/cm2/sr')
assert (s.units['transmittance'] == 'nm') |
class TestLinkpred():
def teardown_method(self):
smokesignal.clear_all()
def config_file(self, training=False, test=False, **kwargs):
config = {'predictors': ['Random'], 'label': 'testing'}
for (var, name, fname, data) in ((training, 'training', 'foo.net', b'*Vertices 3\n1 A\n2 B\n3 C\n*Edges 1\n1 2 1\n'), (test, 'test', 'bar.net', b'*Vertices 3\n1 A\n2 B\n3 C\n*Edges 1\n3 2 1\n')):
if var:
fh = io.BytesIO()
fh.name = fname
fh.write(data)
fh.seek(0)
config[f'{name}-file'] = fh
config.update(kwargs)
return config
def test_init(self):
lp = linkpred.LinkPred(self.config_file())
assert (lp.config['label'] == 'testing')
assert (lp.training is None)
lp = linkpred.LinkPred(self.config_file(training=True))
assert isinstance(lp.training, nx.Graph)
assert (len(lp.training.nodes()) == 3)
assert (len(lp.training.edges()) == 1)
assert (lp.test is None)
def test_excluded(self):
for (value, expected) in zip(('', 'old', 'new'), (set(), {('A', 'B')}, {('B', 'C'), ('A', 'C')})):
lp = linkpred.LinkPred(self.config_file(training=True, exclude=value))
assert ({tuple(sorted(p)) for p in lp.excluded} == expected)
with pytest.raises(linkpred.exceptions.LinkPredError):
lp = linkpred.LinkPred(self.config_file(exclude='bla'))
lp.excluded
def test_preprocess_only_training(self):
lp = linkpred.LinkPred(self.config_file(training=True))
lp.preprocess()
assert (set(lp.training.nodes()) == set('AB'))
def test_preprocess_training_and_test(self):
lp = linkpred.LinkPred(self.config_file(training=True, test=True))
lp.preprocess()
assert (set(lp.training.nodes()) == {'B'})
assert (set(lp.test.nodes()) == {'B'})
def test_setup_output_evaluating_without_test(self):
lp = linkpred.LinkPred(self.config_file(training=True))
with pytest.raises(linkpred.exceptions.LinkPredError):
lp.setup_output()
def test_setup_output(self):
for (name, klass) in (('recall-precision', RecallPrecisionPlotter), ('f-score', FScorePlotter), ('ROC', ROCPlotter), ('fmax', FMaxListener), ('cache-evaluations', CacheEvaluationListener)):
config = self.config_file(training=True, test=True, output=[name])
lp = linkpred.LinkPred(config)
lp.setup_output()
assert isinstance(lp.listeners[0], klass)
smokesignal.clear_all()
assert (len(lp.evaluator.params['relevant']) == 1)
assert (lp.evaluator.params['universe'] == 2)
assert isinstance(lp.evaluator.params['universe'], int)
def test_predict_all(self):
class Stub():
def __init__(self, training, eligible, excluded):
self.training = training
self.eligible = eligible
self.excluded = excluded
def predict(self, **params):
self.params = params
return 'scoresheet'
linkpred.predictors.A = Stub
linkpred.predictors.B = Stub
config = self.config_file(training=True)
config['predictors'] = [{'name': 'A', 'parameters': {'X': 'x'}, 'displayname': 'prettyA'}, {'name': 'B', 'displayname': 'prettyB'}]
lp = linkpred.LinkPred(config)
results = list(lp.predict_all())
assert (results == [('A', 'scoresheet'), ('B', 'scoresheet')])
def test_process_predictions(self):
('prediction_finished')
def a(scoresheet, dataset, predictor):
assert scoresheet.startswith('scoresheet')
assert predictor.startswith('pred')
assert (dataset == 'testing')
a.called = True
('dataset_finished')
def b(dataset):
assert (dataset == 'testing')
b.called = True
('run_finished')
def c():
c.called = True
a.called = b.called = c.called = False
lp = linkpred.LinkPred(self.config_file())
lp.predictions = [('pred1', 'scoresheet1'), ('pred2', 'scoresheet2')]
lp.process_predictions()
assert a.called
assert b.called
assert c.called
smokesignal.clear_all() |
()
('--config-name', '-cn', required=True, type=str)
('--config-dir', '-cd', default=None, type=str)
('--seeds', '-s', default='42,43,44', type=str)
('--monitor_key', '-k', multiple=True, default=['test/mean_score'])
('--ray_address', '-ra', default='auto')
('--num_cpus', '-nc', default=7, type=float)
('--num_gpus', '-ng', default=1, type=float)
('--max_retries', '-mr', default=0, type=int)
('--monitor_max_retires', default=3, type=int)
('--data_src', '-d', default='./data', type=str)
('--unbuffer_python', '-u', is_flag=True, default=False)
('--single_node', '-sn', is_flag=True, default=False, help='run all experiments on a single machine')
('command_args', nargs=(- 1), type=str)
def main(config_name, config_dir, seeds, monitor_key, ray_address, num_cpus, num_gpus, max_retries, monitor_max_retires, data_src, unbuffer_python, single_node, command_args):
seeds = [int(x) for x in seeds.split(',')]
if (data_src is not None):
data_src = os.path.abspath(os.path.expanduser(data_src))
if (config_dir is None):
config_path_abs = pathlib.Path(__file__).parent.joinpath('diffusion_policy', 'config')
config_path_rel = str(config_path_abs.relative_to(pathlib.Path.cwd()))
else:
config_path_rel = config_dir
run_command_args = list()
monitor_command_args = list()
with hydra.initialize(version_base=None, config_path=config_path_rel):
cfg = hydra.compose(config_name=config_name, overrides=command_args)
OmegaConf.resolve(cfg)
output_dir = pathlib.Path(cfg.multi_run.run_dir)
output_dir.mkdir(parents=True, exist_ok=False)
config_path = output_dir.joinpath('config.yaml')
print(output_dir)
yaml.dump(OmegaConf.to_container(cfg, resolve=True), config_path.open('w'), default_flow_style=False)
wandb_group_id = wandb.util.generate_id()
name_base = cfg.multi_run.wandb_name_base
monitor_command_args = ['python', 'multirun_metrics.py', '--input', str(output_dir), '--use_wandb', '--project', 'diffusion_policy_metrics', '--group', wandb_group_id]
for k in monitor_key:
monitor_command_args.extend(['--key', k])
run_command_args = list()
for (i, seed) in enumerate(seeds):
test_start_seed = ((seed + 1) * 100000)
this_output_dir = output_dir.joinpath(f'train_{i}')
this_output_dir.mkdir()
wandb_name = (name_base + f'_train_{i}')
wandb_run_id = (wandb_group_id + f'_train_{i}')
this_command_args = ['python', 'train.py', ('--config-name=' + config_name), ('--config-dir=' + config_path_rel)]
this_command_args.extend(command_args)
this_command_args.extend([f'training.seed={seed}', f'task.env_runner.test_start_seed={test_start_seed}', f'logging.name={wandb_name}', f'logging.id={wandb_run_id}', f'logging.group={wandb_group_id}', f'hydra.run.dir={this_output_dir}'])
run_command_args.append(this_command_args)
root_dir = os.path.dirname(__file__)
runtime_env = {'working_dir': root_dir, 'excludes': ['.git'], 'pip': ['dm-control==1.0.9']}
ray.init(address=ray_address, runtime_env=runtime_env)
train_resources = dict()
train_bundle = dict(train_resources)
train_bundle['CPU'] = num_cpus
train_bundle['GPU'] = num_gpus
monitor_resources = dict()
monitor_resources['CPU'] = 1
monitor_bundle = dict(monitor_resources)
bundle = collections.defaultdict((lambda : 0))
n_train_bundles = 1
if single_node:
n_train_bundles = len(seeds)
for _ in range(n_train_bundles):
for (k, v) in train_bundle.items():
bundle[k] += v
for (k, v) in monitor_bundle.items():
bundle[k] += v
bundle = dict(bundle)
print('Creating placement group with resources:')
pprint(bundle)
pg = placement_group([bundle])
task_name_map = dict()
task_refs = list()
for (i, this_command_args) in enumerate(run_command_args):
if (single_node or (i == (len(run_command_args) - 1))):
print(f'Training worker {i} with placement group.')
ray.get(pg.ready())
print('Placement Group created!')
worker_ray = ray.remote(worker_fn).options(num_cpus=num_cpus, num_gpus=num_gpus, max_retries=max_retries, resources=train_resources, retry_exceptions=True, scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg))
else:
print(f'Training worker {i} without placement group.')
worker_ray = ray.remote(worker_fn).options(num_cpus=num_cpus, num_gpus=num_gpus, max_retries=max_retries, resources=train_resources, retry_exceptions=True)
task_ref = worker_ray.remote(this_command_args, data_src, unbuffer_python)
task_refs.append(task_ref)
task_name_map[task_ref] = f'train_{i}'
ray.get(pg.ready())
monitor_worker_ray = ray.remote(worker_fn).options(num_cpus=1, num_gpus=0, max_retries=monitor_max_retires, retry_exceptions=True, scheduling_strategy=PlacementGroupSchedulingStrategy(placement_group=pg))
monitor_ref = monitor_worker_ray.remote(monitor_command_args, data_src, unbuffer_python)
task_name_map[monitor_ref] = 'metrics'
try:
ready_refs = list()
rest_refs = task_refs
while (len(ready_refs) < len(task_refs)):
(this_ready_refs, rest_refs) = ray.wait(rest_refs, num_returns=1, timeout=None, fetch_local=True)
cancel_other_tasks = False
for ref in this_ready_refs:
task_name = task_name_map[ref]
try:
result = ray.get(ref)
print(f'Task {task_name} finished with result: {result}')
except KeyboardInterrupt as e:
raise KeyboardInterrupt
except Exception as e:
print(f'Task {task_name} raised exception: {e}')
this_cancel_other_tasks = True
if isinstance(e, ray.exceptions.RayTaskError):
if isinstance(e.cause, ray.exceptions.TaskCancelledError):
this_cancel_other_tasks = False
cancel_other_tasks = (cancel_other_tasks or this_cancel_other_tasks)
ready_refs.append(ref)
if cancel_other_tasks:
print('Exception! Cancelling all other tasks.')
for _ref in rest_refs:
ray.cancel(_ref, force=False)
print('Training tasks done.')
ray.cancel(monitor_ref, force=False)
except KeyboardInterrupt:
print('KeyboardInterrupt received in the driver.')
_ = [ray.cancel(x, force=False) for x in (task_refs + [monitor_ref])]
print('KeyboardInterrupt sent to workers.')
except Exception as e:
_ = [ray.cancel(x, force=True) for x in (task_refs + [monitor_ref])]
raise e
for ref in (task_refs + [monitor_ref]):
task_name = task_name_map[ref]
try:
result = ray.get(ref)
print(f'Task {task_name} finished with result: {result}')
except KeyboardInterrupt as e:
print('Force killing all workers')
_ = [ray.cancel(x, force=True) for x in task_refs]
ray.cancel(monitor_ref, force=True)
except Exception as e:
print(f'Task {task_name} raised exception: {e}') |
def generate_latin_hypercube_points(num_points, domain_bounds):
if (num_points == 0):
return numpy.array([])
points = numpy.zeros((num_points, len(domain_bounds)), dtype=numpy.float64)
for (i, interval) in enumerate(domain_bounds):
subcube_edge_length = old_div(interval.length, float(num_points))
ordering = numpy.arange(num_points)
numpy.random.shuffle(ordering)
for j in range(num_points):
point_base = (interval.min + (subcube_edge_length * ordering[j]))
points[(j, i)] = (point_base + numpy.random.uniform(0.0, subcube_edge_length))
return points |
_jit(nogil=True)
def _ld_matrix_jit(x: ArrayLike, scores: ArrayLike, chunk_window_starts: ArrayLike, chunk_window_stops: ArrayLike, abs_chunk_start: int, chunk_max_window_start: int, index_dtype: DType, value_dtype: DType, threshold: float) -> List[Any]:
rows = list()
no_threshold = np.isnan(threshold)
for ti in range(len(chunk_window_starts)):
window_start = chunk_window_starts[ti]
window_stop = chunk_window_stops[ti]
max_window_start = window_stop
if (ti < (len(chunk_window_starts) - 1)):
next_window_start = chunk_window_starts[(ti + 1)]
max_window_start = min(max_window_start, next_window_start)
max_window_start = min(max_window_start, chunk_max_window_start)
for i1 in range(window_start, max_window_start):
index = (abs_chunk_start + i1)
for i2 in range(i1, window_stop):
other = (abs_chunk_start + i2)
if (i1 == i2):
res = 1.0
else:
res = rogers_huff_r2_between(x[i1], x[i2])
cmp = np.int8(0)
if (scores.shape[0] > 0):
if (scores[i1] > scores[i2]):
cmp = np.int8(1)
elif (scores[i1] < scores[i2]):
cmp = np.int8((- 1))
if (no_threshold or ((res >= threshold) and np.isfinite(res))):
rows.append((index_dtype(index), index_dtype(other), value_dtype(res), cmp))
return rows |
def lookup_secscan_notification_severities(repository_id):
try:
repo = Repository.get(id=repository_id)
except Repository.DoesNotExist:
return None
event_kind = ExternalNotificationEvent.get(name='vulnerability_found')
for event in RepositoryNotification.select().where((RepositoryNotification.repository == repository_id), (RepositoryNotification.event == event_kind)):
severity = json.loads(event.event_config_json).get('vulnerability', {}).get('priority')
if severity:
(yield severity) |
class Load_From_URL_To_File_TestCase(Load_From_URL_Test):
def setUp(self):
super(Load_From_URL_To_File_TestCase, self).setUp()
(handle, self._target_path) = tempfile.mkstemp(prefix='testfile', text=True)
os.close(handle)
def runTest(self):
target_path = load.load_to_file(self._url, self._target_path)
self.assertEqual(target_path, self._target_path)
with open(self._target_path, 'r') as f:
self.assertEqual(self._content, f.read())
self.assertEqual(self._content, load.load_to_str(self._url))
with self.assertRaises(KickstartError):
load.load_to_file(self._url_ self._target_path)
with self.assertRaises(KickstartError):
load.load_to_file(' self._target_path)
with self.assertRaises(KickstartError):
load.load_to_file(self._url, '/no/exist')
with self.assertRaises(KickstartError):
load.load_to_file((self._url + '.TEST'), '/tmp/foo')
def tearDown(self):
super(Load_From_URL_To_File_TestCase, self).tearDown()
os.unlink(self._target_path) |
def setup_handlers(web_app):
host_pattern = '.*$'
base_url = web_app.settings['base_url']
handlers = []
if (apps.gpu.ngpus > 0):
route_pattern_gpu_util = url_path_join(base_url, URL_PATH, 'gpu_utilization')
route_pattern_gpu_usage = url_path_join(base_url, URL_PATH, 'gpu_usage')
route_pattern_gpu_resource = url_path_join(base_url, URL_PATH, 'gpu_resource')
route_pattern_pci_stats = url_path_join(base_url, URL_PATH, 'pci_stats')
route_pattern_nvlink_throughput = url_path_join(base_url, URL_PATH, 'nvlink_throughput')
handlers += [(route_pattern_gpu_util, apps.gpu.GPUUtilizationHandler), (route_pattern_gpu_usage, apps.gpu.GPUUsageHandler), (route_pattern_gpu_resource, apps.gpu.GPUResourceHandler), (route_pattern_pci_stats, apps.gpu.PCIStatsHandler), (route_pattern_nvlink_throughput, apps.gpu.NVLinkThroughputHandler)]
route_pattern_cpu_resource = url_path_join(base_url, URL_PATH, 'cpu_resource')
handlers += [(route_pattern_cpu_resource, apps.cpu.CPUResourceHandler)]
web_app.add_handlers(host_pattern, handlers) |
def main(data_dir, client, c, config):
benchmark(read_tables, config, c)
query_1 = '\n SELECT i_item_sk,\n CAST(i_category_id AS TINYINT) AS i_category_id\n FROM item\n '
item_df = c.sql(query_1)
item_df = item_df.persist()
wait(item_df)
c.create_table('item_df', item_df, persist=False)
query_2 = '\n SELECT CAST(w.wcs_user_sk AS INTEGER) as wcs_user_sk,\n wcs_click_date_sk * 86400 + wcs_click_time_sk AS tstamp,\n CAST(w.wcs_item_sk AS INTEGER) as wcs_item_sk,\n CAST(COALESCE(w.wcs_sales_sk, 0) AS INTEGER) as wcs_sales_sk\n FROM web_clickstreams AS w\n INNER JOIN item_df AS i ON w.wcs_item_sk = i.i_item_sk\n WHERE w.wcs_user_sk IS NOT NULL\n AND w.wcs_item_sk IS NOT NULL\n DISTRIBUTE BY wcs_user_sk\n '
merged_df = c.sql(query_2)
query_3 = f'''
SELECT i_item_sk, i_category_id
FROM item_df
WHERE i_category_id IN {q03_purchased_item_category_IN}
'''
item_df_filtered = c.sql(query_3)
product_view_results = merged_df.map_partitions(apply_find_items_viewed, item_mappings=item_df_filtered)
c.drop_table('item_df')
del item_df
del merged_df
del item_df_filtered
c.create_table('product_result', product_view_results, persist=False)
last_query = f'''
SELECT CAST({q03_purchased_item_IN} AS BIGINT) AS purchased_item,
i_item_sk AS lastviewed_item,
COUNT(i_item_sk) AS cnt
FROM product_result
GROUP BY i_item_sk
ORDER BY purchased_item, cnt desc, lastviewed_item
LIMIT {q03_limit}
'''
result = c.sql(last_query)
c.drop_table('product_result')
del product_view_results
return result |
('/json/load_config', endpoint='load_config')
_required('SETTINGS')
def load_config():
category = flask.request.args.get('category')
section = flask.request.args.get('section')
if ((category not in ('core', 'plugin')) or (not section)):
return (jsonify(False), 500)
conf = None
api = flask.current_app.config['PYLOAD_API']
if (category == 'core'):
conf = api.get_config_dict()
elif (category == 'plugin'):
conf = api.get_plugin_config_dict()
for (key, option) in conf[section].items():
if (key in ('desc', 'outline')):
continue
if (';' in option['type']):
option['list'] = option['type'].split(';')
return render_template('settings_item.html', skey=section, section=conf[section]) |
(simple_typed_classes(defaults=True))
def test_omit_default_roundtrip(cl_and_vals):
converter = Converter(omit_if_default=True)
(cl, vals, kwargs) = cl_and_vals
class C():
a: int = 1
b: cl = Factory((lambda : cl(*vals, **kwargs)))
inst = C()
unstructured = converter.unstructure(inst)
assert (unstructured == {})
assert (inst == converter.structure(unstructured, C))
inst = C(0)
unstructured = converter.unstructure(inst)
assert (unstructured == {'a': 0})
assert (inst == converter.structure(unstructured, C)) |
def get_contents(filename):
documents = []
with bz2.open(filename, mode='rt') as f:
for line in f:
doc = json.loads(line)
doc = preprocess_sentences(doc)
if (not doc):
continue
documents.append((doc['title'], serialize_object(doc['sentences'])))
return documents |
def build_trainer(args, device_id, model, optim, tokenizer):
grad_accum_count = args.accum_count
n_gpu = args.world_size
if (device_id >= 0):
gpu_rank = int(args.gpu_ranks[device_id])
else:
gpu_rank = 0
n_gpu = 0
print(('gpu_rank %d' % gpu_rank))
tensorboard_log_dir = args.model_path
writer = SummaryWriter(tensorboard_log_dir, comment='Unmt')
report_manager = ReportMgr(args.report_every, start_time=(- 1), tensorboard_writer=writer)
trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager, tokenizer)
if model:
n_params = _tally_parameters(model)
logger.info(('* number of parameters: %d' % n_params))
return trainer |
class NMTModel(nn.Module):
def __init__(self, encoder, decoder):
super(NMTModel, self).__init__()
self.encoder = encoder
self.decoder = decoder
def forward(self, src, seg, speaker, adj_coo, edge_types, rels, tgt, lengths, bptt=False):
tgt = tgt[:(- 1)]
(enc_state, memory_bank, lengths, memory_bank_utterance, lengths_utterance, hier_matrix) = self.encoder(src, seg, speaker, adj_coo, edge_types, rels, lengths)
if (bptt is False):
self.decoder.init_state(src, memory_bank, enc_state)
(dec_out, attns) = self.decoder(tgt=tgt, memory_bank=memory_bank, memory_lengths=lengths, memory_bank_utterance=memory_bank_utterance, memory_lengths_utterance=lengths_utterance, hier_matrix=hier_matrix)
return (dec_out, attns)
def update_dropout(self, dropout):
self.encoder.update_dropout(dropout)
self.decoder.update_dropout(dropout) |
.script
def _update(input: torch.Tensor, target: torch.Tensor, sample_weight: Optional[torch.Tensor]) -> Tuple[(torch.Tensor, torch.Tensor)]:
squared_error = torch.square((target - input))
if (sample_weight is None):
sum_squared_error = squared_error.sum(dim=0)
sum_weight = torch.tensor(target.size(0), device=target.device)
else:
if (squared_error.ndim == 2):
sample_weight = sample_weight.unsqueeze((- 1))
sum_squared_error = (squared_error * sample_weight).sum(dim=0)
sum_weight = sample_weight.sum(dim=0).squeeze()
return (sum_squared_error, sum_weight) |
def test_get_function_call_str():
class TestObject():
def __str__(self):
raise NotImplementedError()
def __repr__(self):
return 'test'
def test_function():
pass
function_str_kv = qcore.inspection.get_function_call_str(test_function, (1, 2, 3), {'k': 'v'})
function_str_dummy = qcore.inspection.get_function_call_str(test_function, (TestObject(),), {})
assert_eq('test_inspection.test_function(1,2,3,k=v)', function_str_kv)
assert_eq('test_inspection.test_function(test)', function_str_dummy) |
def test__torque_driven_ocp__minimize_segment_velocity():
from bioptim.examples.torque_driven_ocp import example_minimize_segment_velocity as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/triple_pendulum.bioMod'), n_shooting=5, phase_dynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, expand_dynamics=False) |
class QUDevMonitorObserverMixin(MonitorObserverMixin):
def _setup_notifier(self, monitor, notifier_class):
MonitorObserverMixin._setup_notifier(self, monitor, notifier_class)
self._action_signal_map = {'add': self.deviceAdded, 'remove': self.deviceRemoved, 'change': self.deviceChanged, 'move': self.deviceMoved}
import warnings
warnings.warn('Will be removed in 1.0. Use pyudev.pyqt4.MonitorObserver instead.', DeprecationWarning)
def _emit_event(self, device):
self.deviceEvent.emit(device.action, device)
signal = self._action_signal_map.get(device.action)
if (signal is not None):
signal.emit(device) |
def get_assign_value(node):
try:
targets = node.targets
except AttributeError:
targets = [node.target]
if (len(targets) == 1):
target = targets[0]
if isinstance(target, astroid.nodes.AssignName):
name = target.name
elif isinstance(target, astroid.nodes.AssignAttr):
name = target.attrname
else:
return None
return (name, _get_const_values(node.value))
return None |
.parametrize('prefer_grpc', [False, True])
def test_record_upload(prefer_grpc):
records = (Record(id=idx, vector=np.random.rand(DIM).tolist(), payload=one_random_payload_please(idx)) for idx in range(NUM_VECTORS))
client = QdrantClient(prefer_grpc=prefer_grpc, timeout=TIMEOUT)
client.recreate_collection(collection_name=COLLECTION_NAME, vectors_config=VectorParams(size=DIM, distance=Distance.DOT), timeout=TIMEOUT)
client.upload_records(collection_name=COLLECTION_NAME, records=records, parallel=2)
sleep(1)
collection_info = client.get_collection(collection_name=COLLECTION_NAME)
assert (collection_info.points_count == NUM_VECTORS)
result_count = client.count(COLLECTION_NAME, count_filter=Filter(must=[FieldCondition(key='rand_number', range=Range(gte=0.5))]))
assert (result_count.count < 900)
assert (result_count.count > 100) |
class BehavioralRTLIRTypeCheckVisitorL3(BehavioralRTLIRTypeCheckVisitorL2):
def __init__(s, component, freevars, accessed, tmpvars, rtlir_getter):
super().__init__(component, freevars, accessed, tmpvars, rtlir_getter)
s.type_expect['Attribute'] = (('value', (rt.Component, rt.Signal), 'the base of an attribute must be one of: component, signal!'),)
def get_enforce_visitor(s):
return BehavioralRTLIRTypeEnforcerL3
def _visit_Assign_single_target(s, node, target, i):
try:
rhs_type = node.value.Type.get_dtype()
lhs_type = target.Type.get_dtype()
except AttributeError:
rhs_type = None
lhs_type = None
l_is_struct = isinstance(lhs_type, rdt.Struct)
r_is_struct = isinstance(rhs_type, rdt.Struct)
if (l_is_struct or r_is_struct):
if (l_is_struct and r_is_struct):
if (lhs_type.get_name() != rhs_type.get_name()):
raise PyMTLTypeError(s.blk, node.ast, f'LHS and RHS of assignment should have the same type (LHS target#{(i + 1)} of {lhs_type} vs {rhs_type})!')
else:
(struct_type, vector_type) = (lhs_type, rhs_type)
if r_is_struct:
(struct_type, vector_type) = (rhs_type, lhs_type)
if (not isinstance(vector_type, rdt.Vector)):
raise PyMTLTypeError(s.blk, node.ast, f'LHS and RHS of assignment should have agreeable types (LHS target#{(i + 1)} of {lhs_type} vs {rhs_type})!')
is_rhs_reinterpretable = (not node.value._is_explicit)
(struct_nbits, vector_nbits) = (struct_type.get_length(), vector_type.get_length())
if ((not r_is_struct) and is_rhs_reinterpretable and (struct_nbits != vector_nbits)):
s.enforcer.enter(s.blk, rt.NetWire(rdt.Vector(struct_nbits)), node.value)
if l_is_struct:
vector_nbits = node.value.Type.get_dtype().get_length()
if (struct_nbits != vector_nbits):
if l_is_struct:
(lnbits, rnbits) = (struct_nbits, vector_nbits)
else:
(lnbits, rnbits) = (vector_nbits, struct_nbits)
raise PyMTLTypeError(s.blk, node.ast, f'LHS and RHS of assignment should have the same bitwidth (LHS target#{(i + 1)} of {lhs_type} ({lnbits} bits) vs {rhs_type} ({rnbits} bits))!')
else:
super()._visit_Assign_single_target(node, target, i)
def visit_Attribute(s, node):
if isinstance(node.value.Type, rt.Signal):
dtype = node.value.Type.get_dtype()
if (not isinstance(dtype, rdt.Struct)):
raise PyMTLTypeError(s.blk, node.ast, 'attribute base should be a struct signal!')
if (not dtype.has_property(node.attr)):
raise PyMTLTypeError(s.blk, node.ast, f'{dtype.get_name()} does not have field {node.attr}!')
dtype = dtype.get_property(node.attr)
if isinstance(node.value.Type, rt.Port):
rtype = rt.Port(node.value.Type.get_direction(), dtype)
elif isinstance(node.value.Type, rt.Wire):
rtype = rt.Wire(dtype)
elif isinstance(node.value.Type, rt.Const):
obj = node.value.Type.get_object()
if (obj is None):
rtype = rt.Const(dtype)
else:
try:
rtype = rt.Const(dtype, getattr(obj, node.attr))
except AttributeError:
rtype = rt.Const(dtype)
else:
raise PyMTLTypeError(s.blk, node.ast, f'unrecognized signal type {node.value.Type}!')
node.Type = rtype
dtype = node.Type.get_dtype()
if (isinstance(node.Type, rt.Const) and isinstance(dtype, rdt.Vector)):
node._is_explicit = dtype.is_explicit()
else:
node._is_explicit = True
else:
super().visit_Attribute(node)
def visit_StructInst(s, node):
cls = node.struct
dtype = rdt.get_rtlir_dtype(cls())
all_properties = dtype.get_all_properties()
if (len(all_properties) != len(node.values)):
raise PyMTLTypeError(s.blk, node.ast, f'BitStruct {cls.__name__} has {len(all_properties)} fields but only {len(node.values)} arguments are given!')
all_types = zip(node.values, list(all_properties.items()))
for (idx, (value, (name, field))) in enumerate(all_types):
s.visit(value)
if (not isinstance(value.Type, rt.Signal)):
raise PyMTLTypeError(s.blk, node.ast, f'argument #{(idx + 1)} has type {value.Type} but not a signal!')
v_dtype = value.Type.get_dtype()
is_field_reinterpretable = (not value._is_explicit)
if (v_dtype != field):
if is_field_reinterpretable:
target_nbits = field.get_length()
s.enforcer.enter(s.blk, rt.NetWire(rdt.Vector(target_nbits)), value)
else:
raise PyMTLTypeError(s.blk, node.ast, f'Expected argument#{(idx + 1)} ( field {name} ) to have type {field}, but got {v_dtype}.')
node.Type = rt.Const(dtype)
node._is_explicit = True |
def mkl_spmv(A, x):
(m, _) = A.shape
data = A.data.ctypes.data_as(ndpointer(np.complex128, ndim=1, flags='C'))
indptr = A.indptr.ctypes.data_as(POINTER(c_int))
indices = A.indices.ctypes.data_as(POINTER(c_int))
if (x.ndim == 1):
y = np.empty(m, dtype=np.complex128, order='C')
elif ((x.ndim == 2) and (x.shape[1] == 1)):
y = np.empty((m, 1), dtype=np.complex128, order='C')
else:
raise Exception('Input vector must be 1D row or 2D column vector')
zcsrgemv(byref(c_char(bytes(b'N'))), byref(c_int(m)), data, indptr, indices, x.ctypes.data_as(ndpointer(np.complex128, ndim=1, flags='C')), y.ctypes.data_as(ndpointer(np.complex128, ndim=1, flags='C')))
return y |
def convert_examples_to_features(examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer) -> List[InputFeatures]:
label_map = {label: i for (i, label) in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm.tqdm(enumerate(examples), desc='convert examples to features'):
if ((ex_index % 10000) == 0):
logger.info(('Writing example %d of %d' % (ex_index, len(examples))))
choices_inputs = []
for (ending_idx, (context, ending)) in enumerate(zip(example.contexts, example.endings)):
text_a = context
if (example.question.find('_') != (- 1)):
text_b = example.question.replace('_', ending)
else:
text_b = ((example.question + ' ') + ending)
inputs = tokenizer(text_a, text_b, add_special_tokens=True, max_length=max_length, padding='max_length', truncation=True, return_overflowing_tokens=True)
if (('num_truncated_tokens' in inputs) and (inputs['num_truncated_tokens'] > 0)):
logger.info('Attention! you are cropping tokens (swag task is ok). If you are training ARC and RACE and you are poping question + options,you need to try to use a bigger max seq length!')
choices_inputs.append(inputs)
label = label_map[example.label]
input_ids = [x['input_ids'] for x in choices_inputs]
attention_mask = ([x['attention_mask'] for x in choices_inputs] if ('attention_mask' in choices_inputs[0]) else None)
token_type_ids = ([x['token_type_ids'] for x in choices_inputs] if ('token_type_ids' in choices_inputs[0]) else None)
features.append(InputFeatures(example_id=example.example_id, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label))
for f in features[:2]:
logger.info('*** Example ***')
logger.info(('feature: %s' % f))
return features |
def _format_perf_breakdown(perf: Perf) -> str:
breakdown = [perf.fwd_compute, perf.fwd_comms, perf.bwd_compute, perf.bwd_comms]
breakdown_string = ','.join([(str(round(num)) if (num >= 1) else round_to_one_sigfig(num)) for num in breakdown])
return f'{str(round(perf.total, 3))} ({breakdown_string})' |
(Grant)
class GrantAdmin(ExportMixin, admin.ModelAdmin):
change_list_template = 'admin/grants/grant/change_list.html'
speaker_ids = []
resource_class = GrantResource
form = GrantAdminForm
list_display = ('user_display_name', 'country', 'is_speaker', 'conference', 'status', 'approved_type', 'ticket_amount', 'travel_amount', 'accommodation_amount', 'total_amount', 'country_type', 'applicant_reply_sent_at', 'applicant_reply_deadline', 'voucher_code', 'voucher_email_sent_at')
list_filter = ('conference', 'status', 'country_type', 'occupation', 'grant_type', 'interested_in_volunteering', ('travelling_from', CountryFilter))
search_fields = ('email', 'full_name', 'travelling_from', 'been_to_other_events', 'why', 'notes')
user_fk = 'user_id'
actions = [send_reply_emails, send_grant_reminder_to_waiting_for_confirmation, send_reply_email_waiting_list_update, create_grant_vouchers_on_pretix, send_voucher_via_email, 'delete_selected']
autocomplete_fields = ('user',)
fieldsets = (('Manage the Grant', {'fields': ('status', 'approved_type', 'country_type', 'ticket_amount', 'travel_amount', 'accommodation_amount', 'total_amount', 'applicant_message', 'applicant_reply_sent_at', 'applicant_reply_deadline', 'pretix_voucher_id', 'voucher_code', 'voucher_email_sent_at')}), ('About the Applicant', {'fields': ('name', 'full_name', 'conference', 'user', 'age_group', 'gender', 'occupation')}), ('The Grant', {'fields': ('grant_type', 'needs_funds_for_travel', 'need_visa', 'need_accommodation', 'travelling_from', 'why', 'python_usage', 'been_to_other_events', 'community_contribution', 'interested_in_volunteering', 'notes', 'website', 'twitter_handle', 'github_handle', 'linkedin_url', 'mastodon_handle')}))
(description='User')
def user_display_name(self, obj):
if obj.user_id:
return obj.user.display_name
return obj.email
(description='C')
def country(self, obj):
if obj.travelling_from:
country = countries.get(code=obj.travelling_from)
if country:
return country.emoji
return ''
(description='S')
def is_speaker(self, obj):
if (obj.user_id in self.speaker_ids):
return ''
return ''
def get_queryset(self, request):
qs = super().get_queryset(request)
if (not self.speaker_ids):
conference_id = request.GET.get('conference__id__exact')
self.speaker_ids = ScheduleItem.objects.filter(conference__id=conference_id, submission__speaker_id__isnull=False).values_list('submission__speaker_id', flat=True)
return qs
def save_form(self, request, form, change):
if ((form.cleaned_data['status'] == Grant.Status.approved) and ((form.cleaned_data['status'] != form.initial.get('status')) or (form.cleaned_data['country_type'] != form.initial.get('country_type')) or (form.cleaned_data['approved_type'] != form.initial.get('approved_type')))):
conference = form.cleaned_data['conference']
form.instance.ticket_amount = conference.grants_default_ticket_amount
if (form.cleaned_data['approved_type'] not in (Grant.ApprovedType.ticket_only, Grant.ApprovedType.ticket_travel)):
form.instance.accommodation_amount = conference.grants_default_accommodation_amount
if (form.cleaned_data['country_type'] == Grant.CountryType.italy):
form.instance.travel_amount = conference.grants_default_travel_from_italy_amount
elif (form.cleaned_data['country_type'] == Grant.CountryType.europe):
form.instance.travel_amount = conference.grants_default_travel_from_europe_amount
elif (form.cleaned_data['country_type'] == Grant.CountryType.extra_eu):
form.instance.travel_amount = conference.grants_default_travel_from_extra_eu_amount
form.instance.total_amount = ((form.instance.ticket_amount + form.instance.accommodation_amount) + form.instance.travel_amount)
return_value = super().save_form(request, form, change)
return return_value
def get_urls(self):
urls = super().get_urls()
custom_urls = [path('summary/', self.admin_site.admin_view(self.summary_view), name='grants-summary')]
return (custom_urls + urls)
def summary_view(self, request):
statuses = Grant.Status.choices
(filtered_grants, formatted_filters) = self._filter_and_format_grants(request)
grants_by_country = filtered_grants.values('travelling_from', 'status').annotate(total=Count('id'))
(country_stats, status_totals, totals_per_continent) = self._aggregate_data_by_country(grants_by_country, statuses)
gender_stats = self._aggregate_data_by_gender(filtered_grants, statuses)
(financial_summary, total_amount) = self._aggregate_financial_data_by_status(filtered_grants, statuses)
sorted_country_stats = dict(sorted(country_stats.items(), key=(lambda x: (x[0][0], x[0][2]))))
context = {'country_stats': sorted_country_stats, 'statuses': statuses, 'genders': {code: name for (code, name) in GENDERS}, 'financial_summary': financial_summary, 'total_amount': total_amount, 'total_grants': filtered_grants.count(), 'status_totals': status_totals, 'totals_per_continent': totals_per_continent, 'gender_stats': gender_stats, 'filters': formatted_filters, **self.admin_site.each_context(request)}
return TemplateResponse(request, 'admin/grants/grant_summary.html', context)
def _aggregate_data_by_country(self, grants_by_country, statuses):
summary = {}
status_totals = {status[0]: 0 for status in statuses}
totals_per_continent = {}
for data in grants_by_country:
country = countries.get(code=data['travelling_from'])
continent = (country.continent.name if country else 'Unknown')
country_name = (f'{country.name} {country.emoji}' if country else 'Unknown')
country_code = (country.code if country else 'Unknown')
key = (continent, country_name, country_code)
if (key not in summary):
summary[key] = {status[0]: 0 for status in statuses}
summary[key][data['status']] += data['total']
status_totals[data['status']] += data['total']
if (continent not in totals_per_continent):
totals_per_continent[continent] = {status[0]: 0 for status in statuses}
totals_per_continent[continent][data['status']] += data['total']
return (summary, status_totals, totals_per_continent)
def _aggregate_data_by_gender(self, filtered_grants, statuses):
gender_data = filtered_grants.values('user__gender', 'status').annotate(total=Count('id'))
gender_summary = {gender: {status[0]: 0 for status in statuses} for (gender, _) in GENDERS}
gender_summary[''] = {status[0]: 0 for status in statuses}
for data in gender_data:
gender = (data['user__gender'] if data['user__gender'] else '')
status = data['status']
total = data['total']
gender_summary[gender][status] += total
return gender_summary
def _aggregate_financial_data_by_status(self, filtered_grants, statuses):
financial_data = filtered_grants.values('status').annotate(total_amount_sum=Sum('total_amount'))
financial_summary = {status[0]: 0 for status in statuses}
overall_total = 0
for data in financial_data:
status = data['status']
total_amount = (data['total_amount_sum'] or 0)
financial_summary[status] += total_amount
overall_total += total_amount
return (financial_summary, overall_total)
def _filter_and_format_grants(self, request):
field_lookups = ['__exact', '__in', '__gt', '__lt', '__contains', '__startswith', '__endswith', '__range', '__isnull']
filter_mapping = {'conference__id': 'Conference ID', 'status': 'Status', 'country_type': 'Country Type', 'occupation': 'Occupation', 'grant_type': 'Grant Type', 'travelling_from': 'Country'}
allowed_filters = {(f + lookup) for f in filter_mapping.keys() for lookup in field_lookups}
def map_filter_key(key):
base_key = next((key[:(- len(lookup))] for lookup in field_lookups if key.endswith(lookup)), key)
return filter_mapping.get(base_key, base_key.capitalize())
raw_filter_params = {k: v for (k, v) in request.GET.items() if (k in allowed_filters)}
filter_params = {map_filter_key(k): v for (k, v) in raw_filter_params.items()}
filtered_grants = Grant.objects.filter(**raw_filter_params)
return (filtered_grants, filter_params)
class Media():
js = ['admin/js/jquery.init.js'] |
class PdfDocument(pdfium_i.AutoCloseable):
def __init__(self, input, password=None, autoclose=False):
if isinstance(input, str):
input = Path(input)
if isinstance(input, Path):
input = input.expanduser().resolve()
if (not input.is_file()):
raise FileNotFoundError(input)
self._input = input
self._password = password
self._autoclose = autoclose
self._data_holder = []
self._data_closer = []
self.formenv = None
if isinstance(self._input, pdfium_c.FPDF_DOCUMENT):
self.raw = self._input
else:
(self.raw, to_hold, to_close) = _open_pdf(self._input, self._password, self._autoclose)
self._data_holder += to_hold
self._data_closer += to_close
super().__init__(PdfDocument._close_impl, self._data_holder, self._data_closer)
def __repr__(self):
if isinstance(self._input, Path):
input_r = repr(str(self._input))
elif isinstance(self._input, bytes):
input_r = f'<bytes object at {hex(id(self._input))}>'
elif isinstance(self._input, pdfium_c.FPDF_DOCUMENT):
input_r = f'<FPDF_DOCUMENT at {hex(id(self._input))}>'
else:
input_r = repr(self._input)
return f'{super().__repr__()[:(- 1)]} from {input_r}>'
def parent(self):
return None
def _close_impl(raw, data_holder, data_closer):
pdfium_c.FPDF_CloseDocument(raw)
for data in data_holder:
id(data)
for data in data_closer:
data.close()
data_holder.clear()
data_closer.clear()
def __len__(self):
return pdfium_c.FPDF_GetPageCount(self)
def __iter__(self):
for i in range(len(self)):
(yield self[i])
def __getitem__(self, i):
return self.get_page(i)
def __delitem__(self, i):
self.del_page(i)
def new(cls):
new_pdf = pdfium_c.FPDF_CreateNewDocument()
return cls(new_pdf)
def init_forms(self, config=None):
formtype = self.get_formtype()
if ((formtype == pdfium_c.FORMTYPE_NONE) or self.formenv):
return
if (('V8' in PDFIUM_INFO.flags) and (PDFIUM_INFO.origin != 'sourcebuild') and (PDFIUM_INFO.build <= 5677)):
raise RuntimeError('V8 enabled pdfium-binaries builds <= 5677 crash on init_forms().')
if (not config):
if ('XFA' in PDFIUM_INFO.flags):
js_platform = pdfium_c.IPDF_JSPLATFORM(version=3)
config = pdfium_c.FPDF_FORMFILLINFO(version=2, xfa_disabled=False, m_pJsPlatform=ctypes.pointer(js_platform))
else:
config = pdfium_c.FPDF_FORMFILLINFO(version=2)
raw = pdfium_c.FPDFDOC_InitFormFillEnvironment(self, config)
if (not raw):
raise PdfiumError(f'Initializing form env failed for document {self}.')
self.formenv = PdfFormEnv(raw, config, self)
self._add_kid(self.formenv)
if (formtype in (pdfium_c.FORMTYPE_XFA_FULL, pdfium_c.FORMTYPE_XFA_FOREGROUND)):
if ('XFA' in PDFIUM_INFO.flags):
ok = pdfium_c.FPDF_LoadXFA(self)
if (not ok):
err = pdfium_c.FPDF_GetLastError()
logger.warning(f'FPDF_LoadXFA() failed with {pdfium_i.XFAErrorToStr.get(err)}')
else:
logger.warning('init_forms() called on XFA pdf, but this pdfium binary was compiled without XFA support.\nRun `PDFIUM_PLATFORM=auto-v8 pip install -v pypdfium2 --no-binary pypdfium2` to get a build with XFA support.')
def get_formtype(self):
return pdfium_c.FPDF_GetFormType(self)
def get_pagemode(self):
return pdfium_c.FPDFDoc_GetPageMode(self)
def is_tagged(self):
return bool(pdfium_c.FPDFCatalog_IsTagged(self))
def save(self, dest, version=None, flags=pdfium_c.FPDF_NO_INCREMENTAL):
if isinstance(dest, (str, Path)):
(buffer, need_close) = (open(dest, 'wb'), True)
elif pdfium_i.is_buffer(dest, 'w'):
(buffer, need_close) = (dest, False)
else:
raise ValueError(f"Cannot save to '{dest}'")
try:
saveargs = (self, pdfium_i.get_bufwriter(buffer), flags)
ok = (pdfium_c.FPDF_SaveAsCopy(*saveargs) if (version is None) else pdfium_c.FPDF_SaveWithVersion(*saveargs, version))
if (not ok):
raise PdfiumError('Failed to save document.')
finally:
if need_close:
buffer.close()
def get_identifier(self, type=pdfium_c.FILEIDTYPE_PERMANENT):
n_bytes = pdfium_c.FPDF_GetFileIdentifier(self, type, None, 0)
buffer = ctypes.create_string_buffer(n_bytes)
pdfium_c.FPDF_GetFileIdentifier(self, type, buffer, n_bytes)
return buffer.raw[:(n_bytes - 2)]
def get_version(self):
version = ctypes.c_int()
ok = pdfium_c.FPDF_GetFileVersion(self, version)
if (not ok):
return None
return version.value
def get_metadata_value(self, key):
enc_key = (key + '\x00').encode('utf-8')
n_bytes = pdfium_c.FPDF_GetMetaText(self, enc_key, None, 0)
buffer = ctypes.create_string_buffer(n_bytes)
pdfium_c.FPDF_GetMetaText(self, enc_key, buffer, n_bytes)
return buffer.raw[:(n_bytes - 2)].decode('utf-16-le')
METADATA_KEYS = ('Title', 'Author', 'Subject', 'Keywords', 'Creator', 'Producer', 'CreationDate', 'ModDate')
def get_metadata_dict(self, skip_empty=False):
metadata = {k: self.get_metadata_value(k) for k in self.METADATA_KEYS}
if skip_empty:
metadata = {k: v for (k, v) in metadata.items() if v}
return metadata
def count_attachments(self):
return pdfium_c.FPDFDoc_GetAttachmentCount(self)
def get_attachment(self, index):
raw_attachment = pdfium_c.FPDFDoc_GetAttachment(self, index)
if (not raw_attachment):
raise PdfiumError(f'Failed to get attachment at index {index}.')
return PdfAttachment(raw_attachment, self)
def new_attachment(self, name):
enc_name = (name + '\x00').encode('utf-16-le')
enc_name_ptr = ctypes.cast(enc_name, pdfium_c.FPDF_WIDESTRING)
raw_attachment = pdfium_c.FPDFDoc_AddAttachment(self, enc_name_ptr)
if (not raw_attachment):
raise PdfiumError(f"Failed to create new attachment '{name}'.")
return PdfAttachment(raw_attachment, self)
def del_attachment(self, index):
ok = pdfium_c.FPDFDoc_DeleteAttachment(self, index)
if (not ok):
raise PdfiumError(f'Failed to delete attachment at index {index}.')
def get_page(self, index):
raw_page = pdfium_c.FPDF_LoadPage(self, index)
if (not raw_page):
raise PdfiumError('Failed to load page.')
page = PdfPage(raw_page, self, self.formenv)
if self.formenv:
pdfium_c.FORM_OnAfterLoadPage(page, self.formenv)
self.formenv._add_kid(page)
else:
self._add_kid(page)
return page
def new_page(self, width, height, index=None):
if (index is None):
index = len(self)
raw_page = pdfium_c.FPDFPage_New(self, index, width, height)
page = PdfPage(raw_page, self, None)
self._add_kid(page)
return page
def del_page(self, index):
pdfium_c.FPDFPage_Delete(self, index)
def import_pages(self, pdf, pages=None, index=None):
if (index is None):
index = len(self)
if isinstance(pages, str):
ok = pdfium_c.FPDF_ImportPages(self, pdf, pages.encode('ascii'), index)
else:
page_count = 0
c_pages = None
if pages:
page_count = len(pages)
c_pages = (ctypes.c_int * page_count)(*pages)
ok = pdfium_c.FPDF_ImportPagesByIndex(self, pdf, c_pages, page_count, index)
if (not ok):
raise PdfiumError('Failed to import pages.')
def get_page_size(self, index):
size = pdfium_c.FS_SIZEF()
ok = pdfium_c.FPDF_GetPageSizeByIndexF(self, index, size)
if (not ok):
raise PdfiumError('Failed to get page size by index.')
return (size.width, size.height)
def get_page_label(self, index):
n_bytes = pdfium_c.FPDF_GetPageLabel(self, index, None, 0)
buffer = ctypes.create_string_buffer(n_bytes)
pdfium_c.FPDF_GetPageLabel(self, index, buffer, n_bytes)
return buffer.raw[:(n_bytes - 2)].decode('utf-16-le')
def page_as_xobject(self, index, dest_pdf):
raw_xobject = pdfium_c.FPDF_NewXObjectFromPage(dest_pdf, self, index)
if (raw_xobject is None):
raise PdfiumError(f'Failed to capture page at index {index} as FPDF_XOBJECT.')
xobject = PdfXObject(raw=raw_xobject, pdf=dest_pdf)
self._add_kid(xobject)
return xobject
def _get_bookmark(self, bookmark, level):
n_bytes = pdfium_c.FPDFBookmark_GetTitle(bookmark, None, 0)
buffer = ctypes.create_string_buffer(n_bytes)
pdfium_c.FPDFBookmark_GetTitle(bookmark, buffer, n_bytes)
title = buffer.raw[:(n_bytes - 2)].decode('utf-16-le')
count = pdfium_c.FPDFBookmark_GetCount(bookmark)
is_closed = (True if (count < 0) else (None if (count == 0) else False))
n_kids = abs(count)
dest = pdfium_c.FPDFBookmark_GetDest(self, bookmark)
page_index = pdfium_c.FPDFDest_GetDestPageIndex(self, dest)
if (page_index == (- 1)):
page_index = None
n_params = ctypes.c_ulong()
view_pos = (pdfium_c.FS_FLOAT * 4)()
view_mode = pdfium_c.FPDFDest_GetView(dest, n_params, view_pos)
view_pos = list(view_pos)[:n_params.value]
return PdfOutlineItem(level=level, title=title, is_closed=is_closed, n_kids=n_kids, page_index=page_index, view_mode=view_mode, view_pos=view_pos)
def get_toc(self, max_depth=15, parent=None, level=0, seen=None):
if (seen is None):
seen = set()
bookmark = pdfium_c.FPDFBookmark_GetFirstChild(self, parent)
while bookmark:
address = ctypes.addressof(bookmark.contents)
if (address in seen):
logger.warning('A circular bookmark reference was detected whilst parsing the table of contents.')
break
else:
seen.add(address)
(yield self._get_bookmark(bookmark, level))
if (level < (max_depth - 1)):
(yield from self.get_toc(max_depth=max_depth, parent=bookmark, level=(level + 1), seen=seen))
bookmark = pdfium_c.FPDFBookmark_GetNextSibling(self, bookmark)
def render(self, converter, renderer=PdfPage.render, page_indices=None, pass_info=False, n_processes=None, mk_formconfig=None, **kwargs):
warnings.warn('The document-level pdf.render() API is deprecated and uncored due to serious issues in the original concept. Use page.render() and a caller-side loop or process pool instead.', category=DeprecationWarning)
if (not page_indices):
page_indices = [i for i in range(len(self))]
for i in page_indices:
bitmap = renderer(self[i], **kwargs)
if pass_info:
(yield (converter(bitmap), bitmap.get_info()))
else:
(yield converter(bitmap)) |
def rescaleData(data, scale, offset, dtype, clip):
data_out = np.empty_like(data, dtype=dtype)
key = (data.dtype.name, data_out.dtype.name)
func = rescale_functions.get(key)
if (func is None):
func = numba.guvectorize([f'{key[0]}[:],f8,f8,f8,f8,{key[1]}[:]'], '(n),(),(),(),()->(n)', nopython=True)(rescale_clip_source)
rescale_functions[key] = func
func(data, scale, offset, clip[0], clip[1], out=data_out)
return data_out |
class DiscriminatorBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, hidden_channels=None, ksize=3, pad=1, activation=F.relu, downsample=False, sn=True):
super(DiscriminatorBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform(np.sqrt(2))
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.downsample = downsample
self.learnable_sc = ((in_channels != out_channels) or downsample)
hidden_channels = (in_channels if (hidden_channels is None) else hidden_channels)
with self.init_scope():
if sn:
self.c1 = SNConvolution2D(in_channels, hidden_channels, ksize=ksize, pad=pad, initialW=initializer)
self.c2 = SNConvolution2D(hidden_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer)
if self.learnable_sc:
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
else:
self.c1 = L.Convolution2D(in_channels, hidden_channels, ksize=ksize, pad=pad, initialW=initializer)
self.c2 = L.Convolution2D(hidden_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer)
if self.learnable_sc:
self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
h = self.activation(h)
h = self.c2(h)
if self.downsample:
h = _downsample(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.downsample:
return _downsample(x)
else:
return x
else:
return x
def __call__(self, x):
return (self.residual(x) + self.shortcut(x)) |
_commands.command(name='cluster-undeploy')
('--namespace', '-n', default='default', help='Kubernetes namespace [default]')
('--instance-name', default='reana', help='REANA instance name')
def cluster_undeploy(namespace, instance_name):
helm_releases = run_command(f'helm ls --short -n {namespace}', 'reana', return_output=True).splitlines()
if (instance_name in helm_releases):
for cmd in [f'helm uninstall {instance_name} -n {namespace}', f"kubectl get secrets -n {namespace} -o custom-columns=':metadata.name' | grep {instance_name} | xargs kubectl delete secret -n {namespace}", "docker exec -i -t kind-control-plane sh -c '/bin/rm -rf /var/reana/*'"]:
run_command(cmd, 'reana')
else:
msg = 'No REANA cluster to undeploy.'
display_message(msg, 'reana') |
class CondenseLinear(nn.Module):
def __init__(self, in_features, out_features, drop_rate=0.5):
super(CondenseLinear, self).__init__()
drop_in_features = int((in_features * drop_rate))
self.linear = nn.Linear(in_features=drop_in_features, out_features=out_features)
self.register_buffer('index', torch.LongTensor(drop_in_features))
self.index.fill_(0)
def forward(self, x):
x = torch.index_select(x, dim=1, index=Variable(self.index))
x = self.linear(x)
return x |
def get_context_with_bottleneck_to_question_model(rnn_dim: int, q2c: bool, res_rnn: bool, res_self_att: bool):
recurrent_layer = CudnnGru(rnn_dim, w_init=TruncatedNormal(stddev=0.05))
answer_encoder = BinaryAnswerEncoder()
res_model = get_res_fc_seq_fc(model_rnn_dim=rnn_dim, rnn=res_rnn, self_att=res_self_att)
question_to_context = (AttentionWithPostMapper(BiAttention(TriLinear(bias=True), True), post_mapper=res_model) if q2c else None)
context_to_question = AttentionWithPostMapper(BiAttention(TriLinear(bias=True), True), post_mapper=res_model)
return SingleContextWithBottleneckToQuestionModel(encoder=QuestionsAndParagraphsEncoder(answer_encoder), word_embed=FixedWordEmbedder(vec_name='glove.840B.300d', word_vec_init_scale=0, learn_unk=False, cpu=True), char_embed=CharWordEmbedder(LearnedCharEmbedder(word_size_th=14, char_th=50, char_dim=20, init_scale=0.05, force_cpu=True), MaxPool(Conv1d(100, 5, 0.8)), shared_parameters=True), embed_mapper=SequenceMapperSeq(VariationalDropoutLayer(0.8), recurrent_layer, VariationalDropoutLayer(0.8)), question_to_context_attention=question_to_context, context_to_question_attention=context_to_question, sequence_encoder=CudnnGruEncoder(rnn_dim, w_init=TruncatedNormal(stddev=0.05)), rep_merge=ConcatLayer(), predictor=BinaryFixedPredictor()) |
def main(args):
utils.init_distributed_mode(args)
os.makedirs(args.output_dir, exist_ok=True)
os.environ['output_dir'] = args.output_dir
logger = setup_logger(output=os.path.join(args.output_dir, 'info.txt'), distributed_rank=args.rank, color=False, name='DAB-DETR')
logger.info('git:\n {}\n'.format(utils.get_sha()))
logger.info(('Command: ' + ' '.join(sys.argv)))
if (args.rank == 0):
save_json_path = os.path.join(args.output_dir, 'config.json')
with open(save_json_path, 'w') as f:
json.dump(vars(args), f, indent=2)
logger.info('Full config saved to {}'.format(save_json_path))
logger.info('world size: {}'.format(args.world_size))
logger.info('rank: {}'.format(args.rank))
logger.info('local_rank: {}'.format(args.local_rank))
logger.info((('args: ' + str(args)) + '\n'))
if (args.frozen_weights is not None):
assert args.masks, 'Frozen training is meant for segmentation only'
print(args)
device = torch.device(args.device)
seed = (args.seed + utils.get_rank())
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
(model, criterion, postprocessors) = build_model_main(args)
print('print loaded model')
wo_class_error = False
model.to(device)
Vcoco_param = {}
model_without_ddp = model
HICO_param = torch.load('/mnt/gluster/home/mashuailei/DAB-DETR-main/dahoi/logs/2KA_PLANBV1_MULTI_base/checkpoint0141.pth', map_location='cpu')['model']
for (key, value) in HICO_param.items():
if ('backbone' in key):
Vcoco_param[key] = value
DDETR_param = torch.load('/mnt/gluster/home/mashuailei/DAB-DETR-main/dahoi/param/r50_deformable_detr-checkpoint.pth', map_location='cpu')['model']
for (key, value) in DDETR_param.items():
if ('transformer' in key):
Vcoco_param[key] = value
Vcoco_param['transformer.level_embed'] = HICO_param['transformer.level_embed']
(missing_keys, unexpected_keys) = model_without_ddp.load_state_dict(Vcoco_param, strict=False)
if (len(missing_keys) > 0):
print('Missing Keys: {}'.format(missing_keys))
if (len(unexpected_keys) > 0):
print('Unexpected Keys: {}'.format(unexpected_keys))
a = 1 |
_handler('resource')
def qute_resource(url: QUrl) -> _HandlerRet:
path = url.path().lstrip('/')
mimetype = utils.guess_mimetype(path, fallback=True)
try:
data = resources.read_file_binary(path)
except FileNotFoundError as e:
raise NotFoundError(str(e))
return (mimetype, data) |
def preprocess(image):
(w, h) = image.size
(w, h) = map((lambda x: (x - (x % 32))), (w, h))
image = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos'])
image = (np.array(image).astype(np.float32) / 255.0)
image = image[None].transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return ((2.0 * image) - 1.0) |
class _ConstantCumulativeRiskMetric(object):
def __init__(self, field, value):
self._field = field
self._value = value
def end_of_bar(self, packet, *args):
packet['cumulative_risk_metrics'][self._field] = self._value
def end_of_session(self, packet, *args):
packet['cumulative_risk_metrics'][self._field] = self._value |
def test_cyclonedx_fix(monkeypatch, vuln_data, fix_data):
import pip_audit._format.cyclonedx as cyclonedx
logger = pretend.stub(warning=pretend.call_recorder((lambda s: None)))
monkeypatch.setattr(cyclonedx, 'logger', logger)
formatter = CycloneDxFormat(inner_format=CycloneDxFormat.InnerFormat.Json)
assert (json.loads(formatter.format(vuln_data, fix_data)) is not None)
assert (len(logger.warning.calls) == 1) |
def createCaseFromTemplate(output_path, source_path, backup_path=None):
if (backup_path and os.path.isdir(output_path)):
shutil.move(output_path, backup_path)
if os.path.isdir(output_path):
shutil.rmtree(output_path)
os.makedirs(output_path)
if (source_path.find('tutorials') >= 0):
if (not os.path.isabs(source_path)):
source_path = ((getFoamDir() + os.path.sep) + source_path)
if os.path.exists(source_path):
cloneExistingCase(output_path, source_path)
else:
raise Exception('Error: tutorial case folder: {} not found'.format(source_path))
elif (source_path[(- 4):] == '.zip'):
template_path = source_path
if os.path.isfile(template_path):
import zipfile
with zipfile.ZipFile(source_path, 'r') as z:
z.extractall(output_path)
else:
raise Exception('Error: template case file {} not found'.format(source_path))
else:
raise Exception('Error: template {} is not a tutorials case path or zipped file'.format(source_path)) |
class Effect1590(BaseEffect):
type = 'passive'
def handler(fit, container, context, projectionRange, **kwargs):
level = (container.level if ('skill' in context) else 1)
penalize = (False if (('skill' in context) or ('implant' in context) or ('booster' in context)) else True)
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Missile Launcher Operation')), 'aoeVelocity', (container.getModifiedItemAttr('aoeVelocityBonus') * level), stackingPenalties=penalize, **kwargs) |
class AutoQuant():
def __init__(self, session: tf.compat.v1.Session, starting_op_names: List[str], output_op_names: List[str], dataset: tf.compat.v1.data.Dataset, eval_callback: Callable[([tf.compat.v1.Session], float)], param_bw: int=8, output_bw: int=8, quant_scheme: QuantScheme=QuantScheme.post_training_tf_enhanced, rounding_mode: str='nearest', config_file: str=None, results_dir: str='./tmp', cache_id: str=None, strict_validation: bool=True) -> None:
_validate_inputs(session, starting_op_names, output_op_names, dataset, eval_callback, results_dir, strict_validation, quant_scheme, param_bw, output_bw, rounding_mode)
self.fp32_model = session
self.starting_op_names = starting_op_names
self.output_op_names = output_op_names
self.dataset = dataset
self.eval_callback = eval_callback
self._fp32_accuracy = None
self._quantsim_params = dict(param_bw=param_bw, output_bw=output_bw, quant_scheme=_QuantSchemePair(quant_scheme, quant_scheme), rounding_mode=rounding_mode, config_file=config_file)
self.results_dir = results_dir
if cache_id:
self.cache_dir = os.path.join(results_dir, '.auto_quant_cache', cache_id)
else:
self.cache_dir = None
def forward_pass_callback(sess: tf.compat.v1.Session, _: Any=None) -> None:
output_ops = [sess.graph.get_operation_by_name(op_name) for op_name in output_op_names]
count = 0
iterator = iterate_tf_dataset(self.dataset)
for inputs in tqdm(iterator):
feed_dict = create_input_feed_dict(sess.graph, starting_op_names, inputs)
sess.run(output_ops, feed_dict=feed_dict)
count += len(inputs)
self.forward_pass_callback = forward_pass_callback
self.eval_callback = eval_callback
iterator = iterate_tf_dataset(self.dataset)
batch_size = None
data_count = 0
for inputs in iterator:
if (not batch_size):
batch_size = len(inputs)
data_count += len(inputs)
if (data_count >= 2000):
break
num_samples = min(data_count, 2000)
batch_size = batch_size
num_batches = math.floor((num_samples / batch_size))
self.adaround_params = AdaroundParameters(self.dataset, num_batches)
self.eval_manager = _EvalManager(quantsim_factory=self._create_quantsim_and_encodings, eval_func=self._evaluate_model_performance, starting_op_names=self.starting_op_names, output_op_names=self.output_op_names, results_dir=self.results_dir, strict_validation=strict_validation)
self._quant_scheme_candidates = _QUANT_SCHEME_CANDIDATES
def _evaluate_model_performance(self, sess: tf.compat.v1.Session) -> float:
return self.eval_callback(sess, NUM_SAMPLES_FOR_PERFORMANCE_EVALUATION)
def run_inference(self) -> Tuple[(QuantizationSimModel, float)]:
model = self.fp32_model
with self.eval_manager.session('Batchnorm Folding', ptq=True) as sess:
(model, _) = self._apply_batchnorm_folding(model)
sim = self._create_quantsim_and_encodings(model)
if (sess.ptq_result is None):
sess.set_ptq_result(model=model, sim=sim, applied_techniques=['batchnorm_folding'])
if (sess.ptq_result is None):
acc = self._evaluate_model_performance(sim.session)
else:
acc = sess.ptq_result.accuracy
return (sim, acc)
def optimize(self, allowed_accuracy_drop: float=0.0) -> Tuple[(tf.compat.v1.Session, float, str)]:
result = self._optimize_helper(self._optimize_main, allowed_accuracy_drop)
return (result['model'], result['accuracy'], result['encoding_path'])
def set_adaround_params(self, adaround_params: AdaroundParameters) -> None:
self.adaround_params = adaround_params
def _create_quantsim_and_encodings(self, model: tf.compat.v1.Session, rounding_mode: str=None, output_bw: int=None, output_quant_scheme: QuantScheme=None, output_percentile: float=None, param_bw: int=None, param_quant_scheme: QuantScheme=None, param_percentile: float=None, config_file: str=None, encoding_path: str=None) -> QuantizationSimModel:
if (output_bw is not None):
assert (output_bw <= 32)
if (param_bw is not None):
assert (param_bw <= 32)
if ((output_quant_scheme is None) or (param_quant_scheme is None)):
assert (self._quantsim_params['quant_scheme'] is not None)
kwargs = dict(rounding_mode=(rounding_mode or self._quantsim_params['rounding_mode']), default_output_bw=(output_bw or self._quantsim_params['output_bw']), default_param_bw=(param_bw or self._quantsim_params['param_bw']), config_file=(config_file or self._quantsim_params['config_file']))
prevent_param_modification = False
with deepcopy_tf_session(model) as _model:
sim = QuantizationSimModel(_model, self.starting_op_names, self.output_op_names, **kwargs)
if encoding_path:
prevent_param_modification = True
(param_quantizers, activation_quantizers) = (list(sim._param_quantizers.values()), list(sim._activation_quantizers.values()))
default_quant_scheme = self._quantsim_params.get('quant_scheme')
if (default_quant_scheme is not None):
output_quant_scheme = (output_quant_scheme or default_quant_scheme.output_quant_scheme)
output_percentile = (output_percentile or default_quant_scheme.output_percentile)
param_quant_scheme = (param_quant_scheme or default_quant_scheme.param_quant_scheme)
param_percentile = (param_percentile or default_quant_scheme.param_percentile)
for quantizer in activation_quantizers:
quantizer.quant_scheme = output_quant_scheme
if ((quantizer.quant_scheme == QuantScheme.post_training_percentile) and (output_percentile is not None)):
quantizer.set_percentile_value(output_percentile)
if (not prevent_param_modification):
for quantizer in param_quantizers:
quantizer.quant_scheme = (param_quant_scheme or default_quant_scheme.param_quant_scheme)
if ((quantizer.quant_scheme == QuantScheme.post_training_percentile) and (param_percentile is not None)):
quantizer.set_percentile_value(param_percentile)
if (output_bw == 32):
for quantizer in activation_quantizers:
quantizer.enabled = False
if (param_bw == 32):
for quantizer in param_quantizers:
quantizer.enabled = False
if encoding_path:
sim.set_and_freeze_param_encodings(encoding_path)
if any((quantizer.enabled for quantizer in (param_quantizers + activation_quantizers))):
sim.compute_encodings(self.forward_pass_callback, None)
return sim
def _apply_batchnorm_folding(self, sess: tf.compat.v1.Session) -> Tuple[(tf.compat.v1.Session, List[Tuple[(tf.Operation, tf.Operation)]])]:
with deepcopy_tf_session(sess) as sess:
return fold_all_batch_norms(sess, self.starting_op_names, self.output_op_names)
('cle', TfSessionSerializationProtocol())
def _apply_cross_layer_equalization(self, sess: tf.compat.v1.Session) -> tf.compat.v1.Session:
with deepcopy_tf_session(sess) as sess:
return equalize_model(sess, self.starting_op_names, self.output_op_names)
def _apply_adaround(self, sess: tf.compat.v1.Session) -> Tuple[(tf.compat.v1.Session, str)]:
if (self.adaround_params is None):
raise RuntimeError
if (self._quantsim_params['param_bw'] == 4):
self.adaround_params.num_iterations = 15000
filename_prefix = 'adaround'
adaround_encoding_path = os.path.join(self.results_dir, '{}.encodings'.format(filename_prefix))
_apply_adaround_cached = cache.mark('adaround', TfSessionSerializationProtocol())(Adaround.apply_adaround)
ada_sess = _apply_adaround_cached(sess, self.starting_op_names, self.output_op_names, self.adaround_params, path=self.results_dir, filename_prefix=filename_prefix, default_param_bw=self._quantsim_params['param_bw'], default_quant_scheme=self._quantsim_params.get('quant_scheme').param_quant_scheme, default_config_file=self._quantsim_params['config_file'])
return (ada_sess, adaround_encoding_path)
def _optimize_helper(self, optimize_fn: Callable, allowed_accuracy_drop: float) -> Tuple[(tf.compat.v1.Session, float, str)]:
allowed_accuracy_drop = float(allowed_accuracy_drop)
if (allowed_accuracy_drop < 0):
raise ValueError('`allowed_accuracy_drop` must be a positive value. Got {:.2f}'.format(allowed_accuracy_drop))
self.eval_manager.clear()
try:
with cache.enable(self.cache_dir):
_logger.info('Starting AutoQuant')
self._fp32_accuracy = self._evaluate_model_performance(self.fp32_model)
target_acc = (self._fp32_accuracy - allowed_accuracy_drop)
_logger.info('Target eval score: %f', target_acc)
_logger.info('FP32 eval score (W32A32): %f', self._fp32_accuracy)
ret = optimize_fn(self.fp32_model, target_acc)
acc = ret['accuracy']
if (acc is not None):
_logger.info('Best eval score: %f', acc)
best_res = self.eval_manager.get_best_ptq_result()
best_res.save_result_as('best_model')
if (acc < target_acc):
_logger.info('AutoQuant is unable to match the target accuracy. Consider Quantization Aware Training.')
return ret
finally:
self.eval_manager.export_diagnostics()
def get_quant_scheme_candidates(self) -> Tuple[(_QuantSchemePair, ...)]:
return self._quant_scheme_candidates
def set_quant_scheme_candidates(self, candidates: Tuple[(_QuantSchemePair, ...)]):
self._quant_scheme_candidates = copy.copy(candidates)
def _choose_default_quant_scheme(self):
def eval_fn(pair: _QuantSchemePair):
sim = self._create_quantsim_and_encodings(self.fp32_model, param_quant_scheme=pair.param_quant_scheme, param_percentile=pair.param_percentile, output_quant_scheme=pair.output_quant_scheme, output_percentile=pair.output_percentile)
eval_score = self._evaluate_model_performance(sim.session)
_logger.info('Evaluation finished: %s (eval score: %f)', pair, eval_score)
return eval_score
param_bw = self._quantsim_params['param_bw']
output_bw = self._quantsim_params['output_bw']
candidates = self.get_quant_scheme_candidates()
if (param_bw >= 16):
candidates = [candidate for candidate in candidates if (candidate.param_quant_scheme == QuantScheme.post_training_tf)]
if (output_bw >= 16):
candidates = [candidate for candidate in candidates if (candidate.output_quant_scheme == QuantScheme.post_training_tf)]
if (len(candidates) == 1):
return candidates[0]
assert candidates
return max(candidates, key=eval_fn)
def _optimize_main(self, fp32_model: tf.compat.v1.Session, target_acc: float):
with self.eval_manager.session('QuantScheme Selection') as sess:
self._quantsim_params['quant_scheme'] = self._choose_default_quant_scheme()
with self.eval_manager.session(f'W32 Evaluation') as sess:
w32_eval_score = sess.eval(sess=fp32_model, param_bw=32)
_logger.info('Evaluation finished: W32A%d (eval score: %f)', self._quantsim_params['output_bw'], w32_eval_score)
if (w32_eval_score < target_acc):
_logger.info('W32A%d eval score (%f) is lower than the target eval score (%f). This means it is unlikely that the target eval score can be met using PTQ techniques. Please consider finetuning the model using range learning.', self._quantsim_params['output_bw'], w32_eval_score, target_acc)
return {'model': None, 'accuracy': None, 'encoding_path': None, 'applied_techniques': None}
sess.result['target_satisfied'] = True
with self.eval_manager.session('Batchnorm Folding', ptq=True) as sess:
(model, _) = self._apply_batchnorm_folding(fp32_model)
if (sess.ptq_result is None):
sess.set_ptq_result(model=model, applied_techniques=['batchnorm_folding'])
best_result = self.eval_manager.get_best_ptq_result()
if (best_result and (best_result.accuracy >= target_acc)):
sess.result['target_satisfied'] = True
return best_result.as_dict()
with self.eval_manager.session('Cross-Layer Equalization', ptq=True) as sess:
model = self._apply_cross_layer_equalization(fp32_model)
if (sess.ptq_result is None):
sess.set_ptq_result(model=model, applied_techniques=['cross_layer_equalization'])
best_result = self.eval_manager.get_best_ptq_result()
if (best_result and (best_result.accuracy >= target_acc)):
sess.result['target_satisfied'] = True
return best_result.as_dict()
if (best_result is None):
model = fp32_model
applied_techniques = []
else:
if ('cross_layer_equalization' not in best_result.applied_techniques):
sess.result['effective'] = False
model = best_result.load_model()
applied_techniques = best_result.applied_techniques
with self.eval_manager.session('AdaRound', ptq=True) as sess:
(model, encoding_path) = self._apply_adaround(model)
if (sess.ptq_result is None):
sess.set_ptq_result(model=model, encoding_path=encoding_path, applied_techniques=[*applied_techniques, 'adaround'])
best_result = self.eval_manager.get_best_ptq_result()
if best_result:
if ('adaround' not in best_result.applied_techniques):
sess.result['effective'] = False
if (best_result.accuracy >= target_acc):
sess.result['target_satisfied'] = True
return best_result.as_dict()
raise RuntimeError('None of batchnorm folding, CLE, or Adaround has been finished successfully.') |
.parametrize('driver', [pytest.param('energy', id='energy'), pytest.param('gradient', id='gradient'), pytest.param('hessian', id='hessian')])
def test_full_run(driver, tmpdir, acetone):
if (not GaussianHarness.found()):
pytest.skip('Gaussian 09/16 not available test skipped.')
with tmpdir.as_cwd():
qc_spec = qcel.models.common_models.Model(method='wB97XD', basis='6-311++G(d,p)')
qc_task = qcel.models.AtomicInput(molecule=acetone.to_qcschema(), driver=driver, model=qc_spec)
g = GaussianHarness()
result = g.compute(input_data=qc_task, config=qcng.config.TaskConfig(**{'memory': 2, 'ncores': 2, 'nnodes': 1, 'retries': 1}))
outfiles = {}
with open(get_data('gaussian.log')) as log:
outfiles['gaussian.log'] = log.read()
with open(get_data('gaussian.fchk')) as fchk:
outfiles['lig.fchk'] = fchk.read()
ref_result = g.parse_output(outfiles=outfiles, input_model=qc_task)
assert np.allclose(ref_result.return_result, result.return_result) |
def _wil_update(input: Union[(str, List[str])], target: Union[(str, List[str])]) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
if isinstance(input, str):
input = [input]
if isinstance(target, str):
target = [target]
assert (len(input) == len(target)), f'Arguments must contain the same number of strings, but got len(input)={len(input)} and len(target)={len(target)}'
(errors, max_total, target_total, input_total) = _get_errors_and_totals(input, target)
return ((errors - max_total), target_total, input_total) |
def test_direct_junction_offsets_suc_pre_2_right(direct_junction_right_lane_fixture):
(main_road, small_road, junction_creator) = direct_junction_right_lane_fixture
main_road.add_successor(xodr.ElementType.junction, junction_creator.id)
small_road.add_predecessor(xodr.ElementType.junction, junction_creator.id)
junction_creator.add_connection(main_road, small_road, (- 3), (- 1))
assert (main_road.succ_direct_junction == {small_road.id: 2})
assert (small_road.pred_direct_junction == {main_road.id: (- 2)})
assert (junction_creator.junction.connections[0].links[0] == ((- 3), (- 1))) |
class LOSArrow(pg.GraphicsWidget, pg.GraphicsWidgetAnchor):
def __init__(self, model):
pg.GraphicsWidget.__init__(self)
pg.GraphicsWidgetAnchor.__init__(self)
self.model = model
self.arrow = pg.ArrowItem(parent=self, angle=0.0, brush=(0, 0, 0, 180), pen=(255, 255, 255), pxMode=True)
self.label = pg.LabelItem('Towards Sat.', justify='right', size='8pt', parent=self)
self.label.anchor(itemPos=(1.0, (- 1.0)), parentPos=(1.0, 0.0))
self.orientArrow()
self.model.sigSceneChanged.connect(self.orientArrow)
self.setFlag(self.ItemIgnoresTransformations)
()
def orientArrow(self):
phi = np.nanmedian(self.model.scene.phi)
theta = np.nanmedian(self.model.scene.theta)
angle = (180.0 - np.rad2deg(phi))
theta_f = (theta / (np.pi / 2))
tipAngle = (30.0 + (theta_f * 20.0))
tailLen = (15 + (theta_f * 15.0))
self.arrow.setStyle(angle=0.0, tipAngle=tipAngle, tailLen=tailLen, tailWidth=6, headLen=25)
self.arrow.setRotation(angle)
rect_label = self.label.boundingRect()
self.arrow.boundingRect()
self.label.setPos(((- rect_label.width()) / 2.0), (rect_label.height() * 1.33))
def setParentItem(self, parent):
pg.GraphicsWidget.setParentItem(self, parent)
def boundingRect(self):
return QtCore.QRectF(0, 0, self.width(), self.height()) |
class DetectionBlock(nn.Module):
def __init__(self, in_channels, out_channels, conv_cfg=None, norm_cfg=dict(type='BN', requires_grad=True), act_cfg=dict(type='LeakyReLU', negative_slope=0.1)):
super(DetectionBlock, self).__init__()
double_out_channels = (out_channels * 2)
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)
self.conv2 = ConvModule(out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)
self.conv4 = ConvModule(out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)
def forward(self, x):
tmp = self.conv1(x)
tmp = self.conv2(tmp)
tmp = self.conv3(tmp)
tmp = self.conv4(tmp)
out = self.conv5(tmp)
return out |
.parametrize('ds_order, lifted, dist_op, dist_params, size, rtol', [(('x',), True, normal, (np.array((- 10.0), dtype=np.float64), np.array(1e-06, dtype=np.float64)), (), 1e-07), ((0, 1, 2), True, normal, (np.array(0).astype(config.floatX), np.array(1e-06).astype(config.floatX)), (2, 1, 2), 0.001)])
def test_Dimshuffle_lift_rename(ds_order, lifted, dist_op, dist_params, size, rtol):
rng = shared(np.random.default_rng(1233532), borrow=False)
(new_out, *_) = apply_local_rewrite_to_rv(local_dimshuffle_rv_lift, (lambda rv: rv.dimshuffle(ds_order)), dist_op, dist_params, size, rng, name='test_name')
assert (new_out.name == 'test_name_lifted') |
def test_cache_clear_all(tester: ApplicationTester, repository_one: str, repository_cache_dir: Path, cache: FileCache[T]) -> None:
exit_code = tester.execute(f'cache clear {repository_one} --all', inputs='yes')
repository_one_dir = (repository_cache_dir / repository_one)
assert (exit_code == 0)
assert (tester.io.fetch_output() == '')
assert ((not repository_one_dir.exists()) or (not any(repository_one_dir.iterdir())))
assert (not cache.has('cachy:0.1'))
assert (not cache.has('cleo:0.2')) |
class PipeQueue1RTL(Component):
def construct(s, Type):
s.enq = RecvIfcRTL(Type)
s.deq = SendIfcRTL(Type)
s.buffer = m = RegEn(Type)
m.en //= s.enq.en
m.in_ //= s.enq.msg
m.out //= s.deq.msg
s.full = Reg(Bits1)
def up_pipeq_use_deq_rdy():
s.deq.en = (s.full.out & s.deq.rdy)
s.enq.rdy = ((~ s.full.out) | s.deq.rdy)
def up_pipeq_full():
s.full.in_ = (s.enq.en | (s.full.out & (~ s.deq.rdy)))
def line_trace(s):
return s.buffer.line_trace() |
class TestDocumentWithoutRequest(TestDocumentBase):
def test_slot_behaviour(self, document):
for attr in document.__slots__:
assert (getattr(document, attr, 'err') != 'err'), f"got extra slot '{attr}'"
assert (len(mro_slots(document)) == len(set(mro_slots(document)))), 'duplicate slot'
def test_creation(self, document):
assert isinstance(document, Document)
assert isinstance(document.file_id, str)
assert isinstance(document.file_unique_id, str)
assert document.file_id
assert document.file_unique_id
def test_expected_values(self, document):
assert (document.file_size == self.file_size)
assert (document.mime_type == self.mime_type)
assert (document.file_name == self.file_name)
assert (document.thumbnail.file_size == self.thumb_file_size)
assert (document.thumbnail.width == self.thumb_width)
assert (document.thumbnail.height == self.thumb_height)
def test_de_json(self, bot, document):
json_dict = {'file_id': self.document_file_id, 'file_unique_id': self.document_file_unique_id, 'thumbnail': document.thumbnail.to_dict(), 'file_name': self.file_name, 'mime_type': self.mime_type, 'file_size': self.file_size}
test_document = Document.de_json(json_dict, bot)
assert (test_document.api_kwargs == {})
assert (test_document.file_id == self.document_file_id)
assert (test_document.file_unique_id == self.document_file_unique_id)
assert (test_document.thumbnail == document.thumbnail)
assert (test_document.file_name == self.file_name)
assert (test_document.mime_type == self.mime_type)
assert (test_document.file_size == self.file_size)
def test_to_dict(self, document):
document_dict = document.to_dict()
assert isinstance(document_dict, dict)
assert (document_dict['file_id'] == document.file_id)
assert (document_dict['file_unique_id'] == document.file_unique_id)
assert (document_dict['file_name'] == document.file_name)
assert (document_dict['mime_type'] == document.mime_type)
assert (document_dict['file_size'] == document.file_size)
def test_equality(self, document):
a = Document(document.file_id, document.file_unique_id)
b = Document('', document.file_unique_id)
d = Document('', '')
e = Voice(document.file_id, document.file_unique_id, 0)
assert (a == b)
assert (hash(a) == hash(b))
assert (a is not b)
assert (a != d)
assert (hash(a) != hash(d))
assert (a != e)
assert (hash(a) != hash(e))
async def test_error_send_without_required_args(self, bot, chat_id):
with pytest.raises(TypeError):
(await bot.send_document(chat_id=chat_id))
.parametrize('disable_content_type_detection', [True, False, None])
async def test_send_with_document(self, monkeypatch, bot, chat_id, document, disable_content_type_detection):
async def make_assertion(url, request_data: RequestData, *args, **kwargs):
data = request_data.parameters
type_detection = (data.get('disable_content_type_detection') == disable_content_type_detection)
return ((data['document'] == document.file_id) and type_detection)
monkeypatch.setattr(bot.request, 'post', make_assertion)
message = (await bot.send_document(document=document, chat_id=chat_id, disable_content_type_detection=disable_content_type_detection))
assert message
.parametrize('local_mode', [True, False])
async def test_send_document_local_files(self, monkeypatch, bot, chat_id, local_mode):
try:
bot._local_mode = local_mode
test_flag = False
file = data_file('telegram.jpg')
expected = file.as_uri()
async def make_assertion(_, data, *args, **kwargs):
nonlocal test_flag
if local_mode:
test_flag = ((data.get('document') == expected) and (data.get('thumbnail') == expected))
else:
test_flag = (isinstance(data.get('document'), InputFile) and isinstance(data.get('thumbnail'), InputFile))
monkeypatch.setattr(bot, '_post', make_assertion)
(await bot.send_document(chat_id, file, thumbnail=file))
assert test_flag
finally:
bot._local_mode = False
async def test_get_file_instance_method(self, monkeypatch, document):
async def make_assertion(*_, **kwargs):
return (kwargs['file_id'] == document.file_id)
assert check_shortcut_signature(Document.get_file, Bot.get_file, ['file_id'], [])
assert (await check_shortcut_call(document.get_file, document.get_bot(), 'get_file'))
assert (await check_defaults_handling(document.get_file, document.get_bot()))
monkeypatch.setattr(document.get_bot(), 'get_file', make_assertion)
assert (await document.get_file()) |
class CoupledInputForgetGateLSTMCell(rnn_cell_impl.RNNCell):
def __init__(self, num_units, use_peepholes=False, initializer=None, num_proj=None, proj_clip=None, num_unit_shards=1, num_proj_shards=1, forget_bias=1.0, state_is_tuple=True, activation=math_ops.tanh, reuse=None):
super(CoupledInputForgetGateLSTMCell, self).__init__(_reuse=reuse)
if (not state_is_tuple):
logging.warn('%s: Using a concatenated state is slower and will soon be deprecated. Use state_is_tuple=True.', self)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._initializer = initializer
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
self._activation = activation
self._reuse = reuse
if num_proj:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_proj) if state_is_tuple else (num_units + num_proj))
self._output_size = num_proj
else:
self._state_size = (rnn_cell_impl.LSTMStateTuple(num_units, num_units) if state_is_tuple else (2 * num_units))
self._output_size = num_units
def state_size(self):
return self._state_size
def output_size(self):
return self._output_size
def call(self, inputs, state):
sigmoid = math_ops.sigmoid
num_proj = (self._num_units if (self._num_proj is None) else self._num_proj)
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [(- 1), self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [(- 1), num_proj])
dtype = inputs.dtype
input_size = inputs.get_shape().with_rank(2)[1]
if (input_size.value is None):
raise ValueError('Could not infer input size from inputs.get_shape()[-1]')
self.w_xi = tf.get_variable('_w_xi', [input_size.value, self._num_units])
self.w_hi = tf.get_variable('_w_hi', [self._num_units, self._num_units])
self.w_ci = tf.get_variable('_w_ci', [self._num_units, self._num_units])
self.w_xo = tf.get_variable('_w_xo', [input_size.value, self._num_units])
self.w_ho = tf.get_variable('_w_ho', [self._num_units, self._num_units])
self.w_co = tf.get_variable('_w_co', [self._num_units, self._num_units])
self.w_xc = tf.get_variable('_w_xc', [input_size.value, self._num_units])
self.w_hc = tf.get_variable('_w_hc', [self._num_units, self._num_units])
self.b_i = tf.get_variable('_b_i', [self._num_units], initializer=init_ops.zeros_initializer())
self.b_c = tf.get_variable('_b_c', [self._num_units], initializer=init_ops.zeros_initializer())
self.b_o = tf.get_variable('_b_o', [self._num_units], initializer=init_ops.zeros_initializer())
i_t = sigmoid((((math_ops.matmul(inputs, self.w_xi) + math_ops.matmul(m_prev, self.w_hi)) + math_ops.matmul(c_prev, self.w_ci)) + self.b_i))
c_t = (((1 - i_t) * c_prev) + (i_t * self._activation(((math_ops.matmul(inputs, self.w_xc) + math_ops.matmul(m_prev, self.w_hc)) + self.b_c))))
o_t = sigmoid((((math_ops.matmul(inputs, self.w_xo) + math_ops.matmul(m_prev, self.w_ho)) + math_ops.matmul(c_t, self.w_co)) + self.b_o))
h_t = (o_t * self._activation(c_t))
new_state = (rnn_cell_impl.LSTMStateTuple(c_t, h_t) if self._state_is_tuple else array_ops.concat([c_t, h_t], 1))
return (h_t, new_state) |
def test_wind():
w = OSC.Wind(0, 1)
w2 = OSC.Wind(0, 1)
w3 = OSC.Wind(1, 1)
assert (w == w2)
assert (w != w3)
prettyprint(w)
w4 = OSC.Wind.parse(w.get_element())
assert (w == w4)
assert (version_validation('Wind', w, 0) == ValidationResponse.OSC_VERSION)
assert (version_validation('Wind', w, 1) == ValidationResponse.OK)
assert (version_validation('Wind', w, 2) == ValidationResponse.OK) |
def tracing_client_from_config(raw_config: config.RawConfig, log_if_unconfigured: bool=True) -> TracingClient:
cfg = config.parse_config(raw_config, {'tracing': {'service_name': config.String, 'endpoint': config.Optional(config.Endpoint), 'queue_name': config.Optional(config.String), 'max_span_queue_size': config.Optional(config.Integer, default=50000), 'num_span_workers': config.Optional(config.Integer, default=5), 'span_batch_interval': config.Optional(config.Timespan, default=config.Timespan('500 milliseconds')), 'num_conns': config.Optional(config.Integer, default=100), 'sample_rate': config.Optional(config.Fallback(config.Percent, config.Float), default=0.1)}})
return make_client(service_name=cfg.tracing.service_name, tracing_endpoint=cfg.tracing.endpoint, tracing_queue_name=cfg.tracing.queue_name, max_span_queue_size=cfg.tracing.max_span_queue_size, num_span_workers=cfg.tracing.num_span_workers, span_batch_interval=cfg.tracing.span_batch_interval.total_seconds(), num_conns=cfg.tracing.num_conns, sample_rate=cfg.tracing.sample_rate, log_if_unconfigured=log_if_unconfigured) |
def add_tune_args(parser):
group = parser.add_argument_group('Tune parameter parser.')
group.add_argument('--n-grid', default=6, type=int, metavar='N', help='how many grid added to tune for each weight.')
group.add_argument('--weight-lower-bound', default=0.0, type=float, help='lower bound for each weight.')
group.add_argument('--weight-upper-bound', default=1.0, type=float, help='upper bound for each weight.')
group.add_argument('--num-trails-ax-opt', default=5, type=int, help='number of trials in AX optimization.')
group.add_argument('--output-json-best-parameters', default='best_parameters.json', type=str, help='name of output file for the best parameters.')
group.add_argument('--output-json-best-value', default='best_value.json', type=str, help='name of output file for the best value of the evaluation function.')
return parser |
def test_report_disconnect(mock_emit_session_update, solo_two_world_session):
log = MagicMock()
session_dict = {'user-id': 1234, 'worlds': [1]}
a1 = database.WorldUserAssociation.get_by_instances(world=1, user=1234)
a1.connection_state = GameConnectionStatus.InGame
a1.save()
world_api.report_disconnect(MagicMock(), session_dict, log)
a1 = database.WorldUserAssociation.get_by_instances(world=1, user=1234)
assert (a1.connection_state == GameConnectionStatus.Disconnected)
mock_emit_session_update.assert_called_once_with(database.MultiplayerSession.get_by_id(1)) |
def _parse_requirement_details(tokenizer: Tokenizer) -> Tuple[(str, str, Optional[MarkerList])]:
specifier = ''
url = ''
marker = None
if tokenizer.check('AT'):
tokenizer.read()
tokenizer.consume('WS')
url_start = tokenizer.position
url = tokenizer.expect('URL', expected='URL after ').text
if tokenizer.check('END', peek=True):
return (url, specifier, marker)
tokenizer.expect('WS', expected='whitespace after URL')
if tokenizer.check('END', peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(tokenizer, span_start=url_start, after='URL and whitespace')
else:
specifier_start = tokenizer.position
specifier = _parse_specifier(tokenizer)
tokenizer.consume('WS')
if tokenizer.check('END', peek=True):
return (url, specifier, marker)
marker = _parse_requirement_marker(tokenizer, span_start=specifier_start, after=('version specifier' if specifier else 'name and no valid version specifier'))
return (url, specifier, marker) |
def direct(x_samp, y_samp, x_bl, y_bl, y_data, Rinv, baseline_as_mean=False, **kwargs):
(nsamples, nx, ny, x_bl, y_bl, y_data, delta_x, delta_y, innovation) = _preproc(x_samp, y_samp, x_bl, y_bl, y_data, baseline_as_mean)
dy = (delta_y np.linalg.pinv(delta_x))
return ((dy.T Rinv) innovation) |
def _select_lstm_internal_ops_to_quantize(graph: tf.Graph, internal_ops: List[tf.Operation]) -> Tuple[(List[str], List[int], List[str])]:
(curr_module_ops_with_param_names, curr_module_input_indices) = _get_internal_ops_to_quantize_params_for(graph, internal_ops)
curr_module_activation_op_names = _get_internal_ops_to_quantize_activations_for(internal_ops, lstm_recurrent_inner_op_types_to_quantize)
return (curr_module_ops_with_param_names, curr_module_input_indices, curr_module_activation_op_names) |
class MuteStream(Scaffold):
async def mute_stream(self, chat_id: Union[(int, str)]):
if (self._app is None):
raise NoMTProtoClientSet()
if (not self._is_running):
raise ClientNotStarted()
chat_id = (await self._resolve_chat_id(chat_id))
try:
return (await ToAsync(self._binding.mute, chat_id))
except ConnectionError:
raise NotInGroupCallError() |
class DwsConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, bias=False, use_bn=True, bn_eps=1e-05, activation=(lambda : nn.ReLU(inplace=True))):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv_block(in_channels=in_channels, out_channels=in_channels, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation)
self.pw_conv = conv1x1_block(in_channels=in_channels, out_channels=out_channels, bias=bias, use_bn=use_bn, bn_eps=bn_eps, activation=activation)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x |
class UniSpeechConfig(PretrainedConfig):
model_type = 'unispeech'
def __init__(self, vocab_size=32, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout=0.1, activation_dropout=0.1, attention_dropout=0.1, feat_proj_dropout=0.0, feat_quantizer_dropout=0.0, final_dropout=0.1, layerdrop=0.1, initializer_range=0.02, layer_norm_eps=1e-05, feat_extract_norm='group', feat_extract_activation='gelu', conv_dim=(512, 512, 512, 512, 512, 512, 512), conv_stride=(5, 2, 2, 2, 2, 2, 2), conv_kernel=(10, 3, 3, 3, 3, 2, 2), conv_bias=False, num_conv_pos_embeddings=128, num_conv_pos_embedding_groups=16, do_stable_layer_norm=False, apply_spec_augment=True, mask_time_prob=0.05, mask_time_length=10, mask_time_min_masks=2, mask_feature_prob=0.0, mask_feature_length=10, mask_feature_min_masks=0, num_codevectors_per_group=320, num_codevector_groups=2, contrastive_logits_temperature=0.1, num_negatives=100, codevector_dim=256, proj_codevector_dim=256, diversity_loss_weight=0.1, ctc_loss_reduction='mean', ctc_zero_infinity=False, use_weighted_layer_sum=False, classifier_proj_size=256, num_ctc_classes=80, pad_token_id=0, bos_token_id=1, eos_token_id=2, replace_prob=0.5, **kwargs):
super().__init__(**kwargs, pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id)
self.hidden_size = hidden_size
self.feat_extract_norm = feat_extract_norm
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
self.num_hidden_layers = num_hidden_layers
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.num_attention_heads = num_attention_heads
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.feat_proj_dropout = feat_proj_dropout
self.final_dropout = final_dropout
self.layerdrop = layerdrop
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.num_ctc_classes = num_ctc_classes
self.vocab_size = vocab_size
self.do_stable_layer_norm = do_stable_layer_norm
self.use_weighted_layer_sum = use_weighted_layer_sum
self.classifier_proj_size = classifier_proj_size
if ((len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers)):
raise ValueError(f'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.')
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
self.num_codevectors_per_group = num_codevectors_per_group
self.num_codevector_groups = num_codevector_groups
self.contrastive_logits_temperature = contrastive_logits_temperature
self.feat_quantizer_dropout = feat_quantizer_dropout
self.num_negatives = num_negatives
self.codevector_dim = codevector_dim
self.proj_codevector_dim = proj_codevector_dim
self.diversity_loss_weight = diversity_loss_weight
self.ctc_loss_reduction = ctc_loss_reduction
self.ctc_zero_infinity = ctc_zero_infinity
self.replace_prob = replace_prob
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1) |
_tests('rsa_oaep_2048_sha1_mgf1sha1_test.json', 'rsa_oaep_2048_sha224_mgf1sha1_test.json', 'rsa_oaep_2048_sha224_mgf1sha224_test.json', 'rsa_oaep_2048_sha256_mgf1sha1_test.json', 'rsa_oaep_2048_sha256_mgf1sha256_test.json', 'rsa_oaep_2048_sha384_mgf1sha1_test.json', 'rsa_oaep_2048_sha384_mgf1sha384_test.json', 'rsa_oaep_2048_sha512_mgf1sha1_test.json', 'rsa_oaep_2048_sha512_mgf1sha512_test.json', 'rsa_oaep_3072_sha256_mgf1sha1_test.json', 'rsa_oaep_3072_sha256_mgf1sha256_test.json', 'rsa_oaep_3072_sha512_mgf1sha1_test.json', 'rsa_oaep_3072_sha512_mgf1sha512_test.json', 'rsa_oaep_4096_sha256_mgf1sha1_test.json', 'rsa_oaep_4096_sha256_mgf1sha256_test.json', 'rsa_oaep_4096_sha512_mgf1sha1_test.json', 'rsa_oaep_4096_sha512_mgf1sha512_test.json', 'rsa_oaep_misc_test.json')
def test_rsa_oaep_encryption(backend, wycheproof):
digest = _DIGESTS[wycheproof.testgroup['sha']]
mgf_digest = _DIGESTS[wycheproof.testgroup['mgfSha']]
assert (digest is not None)
assert (mgf_digest is not None)
padding_algo = padding.OAEP(mgf=padding.MGF1(algorithm=mgf_digest), algorithm=digest, label=binascii.unhexlify(wycheproof.testcase['label']))
if (not backend.rsa_encryption_supported(padding_algo)):
pytest.skip(f'Does not support OAEP using {mgf_digest.name} MGF1 or {digest.name} hash.')
key = wycheproof.cache_value_to_group('cached_key', (lambda : serialization.load_pem_private_key(wycheproof.testgroup['privateKeyPem'].encode('ascii'), password=None, unsafe_skip_rsa_key_validation=True)))
assert isinstance(key, rsa.RSAPrivateKey)
if (backend._fips_enabled and (key.key_size < backend._fips_rsa_min_key_size)):
pytest.skip('Invalid params for FIPS. <2048 bit keys are disallowed')
if (wycheproof.valid or wycheproof.acceptable):
pt = key.decrypt(binascii.unhexlify(wycheproof.testcase['ct']), padding_algo)
assert (pt == binascii.unhexlify(wycheproof.testcase['msg']))
else:
with pytest.raises(ValueError):
key.decrypt(binascii.unhexlify(wycheproof.testcase['ct']), padding_algo) |
def plot_reward_curves(yss: Iterable[Iterable[np.ndarray]], labels: Iterable[str], bs: int, title: str):
(fig, ax) = plt.subplots(1, 1, sharey=True, figsize=(4, 4))
fmt = '-'
(y_min, y_max) = (0, 0)
for (ys, label) in zip(yss, labels):
Y = np.stack(ys)
y_mean = Y.mean(axis=0)
y_sd = Y.std(axis=0)
y_max = max(y_max, max(y_mean))
y_min = max(y_min, max(y_mean))
x = (np.arange(len(y_mean)) + 1)
ax.plot(x, y_mean, fmt, label=label, alpha=0.9)
if (len(Y) >= 3):
ax.fill_between(x, (y_mean - y_sd), (y_mean + y_sd), alpha=0.3)
n_iters = (len(x) // bs)
ax.vlines([(bs * (i + 1)) for i in range(n_iters)], y_min, y_max, color='r', ls='dashed', lw=0.5)
formatter = ticker.FuncFormatter(abbreviate_k_or_M)
ax.xaxis.set_major_formatter(formatter)
style_axis(ax)
ax.set_ylabel(title)
ax.legend(loc='lower right')
fig.tight_layout()
return fig |
class ImagenetSpecificationTest(tf.test.TestCase):
def validate_num_span_images(self, span_leaves, num_span_images):
for (node, leaves) in span_leaves.items():
self.assertEqual(num_span_images[node], sum([num_span_images[l] for l in leaves]))
def validate_splits(self, splits):
train_graph = splits[learning_spec.Split.TRAIN]
valid_graph = splits[learning_spec.Split.VALID]
test_graph = splits[learning_spec.Split.TEST]
def ensure_isolated(nodes):
for n in nodes:
for c in n.children:
self.assertIn(c, nodes)
for p in n.parents:
self.assertIn(p, nodes)
ensure_isolated(train_graph)
ensure_isolated(valid_graph)
ensure_isolated(test_graph)
train_classes = imagenet_spec.get_leaves(train_graph)
valid_classes = imagenet_spec.get_leaves(valid_graph)
test_classes = imagenet_spec.get_leaves(test_graph)
all_classes = ((train_classes + valid_classes) + test_classes)
self.assertLen(set(all_classes), 1000)
self.assertLen(set(all_classes), len(all_classes))
def test_imagenet_specification(self):
spec = imagenet_spec.create_imagenet_specification(learning_spec.Split, set())
(splits, _, graph_nodes, synsets_2012, num_synset_2012_images, roots) = spec
span_leaves = imagenet_spec.get_spanning_leaves(graph_nodes)
num_span_images = imagenet_spec.get_num_spanning_images(span_leaves, num_synset_2012_images)
validate_graph(graph_nodes, synsets_2012, self)
validate_spanning_leaves(span_leaves, synsets_2012, self)
self.validate_splits(splits)
self.validate_num_span_images(span_leaves, num_span_images)
test_lowest_common_ancestor(graph_nodes, self)
test_get_upward_paths(graph_nodes, self)
(valid_subgraph, test_subgraph) = (splits[learning_spec.Split.VALID], splits[learning_spec.Split.TEST])
(valid_root, test_root) = (roots['valid'], roots['test'])
test_lowest_common_ancestor(valid_subgraph, self, valid_root)
test_get_upward_paths(valid_subgraph, self, valid_root)
test_lowest_common_ancestor(test_subgraph, self, test_root)
test_get_upward_paths(test_subgraph, self, test_root)
def test_toy_graph_specification(self):
specification = create_toy_graph()
(toy_graph_nodes, toy_span_leaves, toy_synsets_2012) = specification
validate_graph(toy_graph_nodes, toy_synsets_2012, self)
validate_spanning_leaves(toy_span_leaves, toy_synsets_2012, self) |
def train_one_epoch():
stat_dict = {}
adjust_learning_rate(optimizer, EPOCH_CNT)
bnm_scheduler.step()
net.train()
for (batch_idx, batch_data_label) in enumerate(TRAIN_DATALOADER):
for key in batch_data_label:
batch_data_label[key] = batch_data_label[key].to(device)
optimizer.zero_grad()
inputs = {'point_clouds': batch_data_label['point_clouds']}
end_points = net(inputs)
for key in batch_data_label:
assert (key not in end_points)
end_points[key] = batch_data_label[key]
(loss, end_points) = criterion(end_points, DATASET_CONFIG)
loss.backward()
optimizer.step()
for key in end_points:
if (('loss' in key) or ('acc' in key) or ('ratio' in key)):
if (key not in stat_dict):
stat_dict[key] = 0
stat_dict[key] += end_points[key].item()
batch_interval = 20
if (((batch_idx + 1) % batch_interval) == 0):
log_string((' ---- batch: %03d ----' % (batch_idx + 1)))
for key in sorted(stat_dict.keys()):
log_string(('mean %s: %f' % (key, (stat_dict[key] / batch_interval))))
stat_dict[key] = 0 |
def postprocess_dataset(dataset: List[DatasetEntry], remove_identical_pairs: bool=True, remove_duplicates: bool=True, add_sampled_pairs: bool=True, max_num_text_b_for_text_a_and_label: int=2, label_smoothing: float=0.2, seed: int=42, explanation=False) -> List[DatasetEntry]:
postprocessed_dataset = []
num_text_b_for_text_a_and_label = defaultdict(int)
rng = random.Random(seed)
rng.shuffle(dataset)
if remove_duplicates:
dataset = list(set(dataset))
for example in dataset:
if (remove_identical_pairs and (example.text_a == example.text_b)):
continue
if (('<mask>' in example.text_a) or ('<mask>' in example.text_b)):
continue
example.label = ((float(example.label) * (1 - label_smoothing)) + ((label_smoothing / 3) * 1.5))
if (max_num_text_b_for_text_a_and_label > 0):
if (num_text_b_for_text_a_and_label[(example.text_a, example.label)] >= max_num_text_b_for_text_a_and_label):
continue
postprocessed_dataset.append(DatasetEntry(text_a=example.text_a, text_b=example.text_b, label=example.label))
num_text_b_for_text_a_and_label[(example.text_a, example.label)] += 1
if add_sampled_pairs:
sampled_dataset = []
for text_a in set((x.text_a for x in postprocessed_dataset)):
for _ in range(2):
text_b = rng.choice(postprocessed_dataset).text_b
if explanation:
sampled_dataset.append(DatasetEntryWithExp(text_a=text_a, text_b=text_b, label=0, explanation='They are completely different in terms of the meaning and the words used.'))
else:
sampled_dataset.append(DatasetEntry(text_a=text_a, text_b=text_b, label=0))
postprocessed_dataset += sampled_dataset
return postprocessed_dataset |
class TransformerEmbedding(nn.Module):
def __init__(self, args, embed_tokens):
super().__init__()
self.dropout = args.dropout
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(embed_dim)
self.embed_positions = fairseq_transformer.PositionalEmbedding(1024, embed_dim, self.padding_idx, learned=args.encoder_learned_pos)
def forward(self, src_tokens, src_lengths):
x = self.embed_tokens(src_tokens)
src_tokens_tensor = pytorch_translate_utils.get_source_tokens_tensor(src_tokens)
x = (self.embed_scale * x)
positions = self.embed_positions(src_tokens_tensor)
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
x = x.transpose(0, 1)
encoder_padding_mask = src_tokens_tensor.eq(self.padding_idx)
if (not encoder_padding_mask.any()):
encoder_padding_mask = None
return (x, encoder_padding_mask, positions) |
def cibuildwheel_run(project_path, package_dir='.', env=None, add_env=None, output_dir=None, add_args=None):
if (env is None):
env = os.environ.copy()
env.pop('MACOSX_DEPLOYMENT_TARGET', None)
if (add_args is None):
add_args = []
if (add_env is not None):
env.update(add_env)
_update_pip_cache_dir(env)
with TemporaryDirectory() as tmp_output_dir:
subprocess.run([sys.executable, '-m', 'cibuildwheel', '--prerelease-pythons', '--output-dir', str((output_dir or tmp_output_dir)), str(package_dir), *add_args], env=env, cwd=project_path, check=True)
wheels = os.listdir((output_dir or tmp_output_dir))
return wheels |
def calculate_remediations(vulns, db_full):
remediations = defaultdict(dict)
package_metadata = {}
secure_vulns_by_user = set()
if (not db_full):
return remediations
precompute_remediations(remediations, package_metadata, vulns, secure_vulns_by_user)
compute_sec_ver(remediations, package_metadata, secure_vulns_by_user, db_full)
return remediations |
class AdBaseAdmin(RemoveDeleteMixin, admin.ModelAdmin):
readonly_fields = ('date', 'advertisement', 'publisher', 'page_url', 'keywords', 'country', 'browser_family', 'os_family', 'is_mobile', 'is_proxy', 'paid_eligible', 'user_agent', 'ip', 'div_id', 'ad_type_slug', 'client_id', 'modified', 'created')
list_display = readonly_fields[:(- 3)]
list_select_related = ('advertisement', 'publisher')
list_filter = ('is_mobile', 'is_proxy', 'paid_eligible', 'publisher', 'advertisement__flight__campaign__advertiser')
paginator = EstimatedCountPaginator
search_fields = ('advertisement__name', 'url', 'ip', 'country', 'user_agent', 'client_id')
show_full_result_count = False
def page_url(self, instance):
if instance.url:
return mark_safe('<a href="{url}">{url}</a>'.format(url=escape(instance.url)))
return None
def has_add_permission(self, request):
return False |
class _TestClassA(torch.nn.Module):
def __init__(self, arg1, arg2, arg3=3):
super().__init__()
self.arg1 = arg1
self.arg2 = arg2
self.arg3 = arg3
assert (arg1 == 1)
assert (arg2 == 2)
assert (arg3 == 3)
def from_config(cls, cfg):
args = {'arg1': cfg.ARG1, 'arg2': cfg.ARG2}
return args |
def get_xritdecompress_cmd():
cmd = os.environ.get('XRIT_DECOMPRESS_PATH', None)
if (not cmd):
raise IOError('XRIT_DECOMPRESS_PATH is not defined (complete path to xRITDecompress)')
question = 'Did you set the environment variable XRIT_DECOMPRESS_PATH correctly?'
if (not os.path.exists(cmd)):
raise IOError(((str(cmd) + ' does not exist!\n') + question))
elif os.path.isdir(cmd):
raise IOError(((str(cmd) + ' is a directory!\n') + question))
return cmd |
class EncoderLayer(nn.Module):
def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation='relu'):
super(EncoderLayer, self).__init__()
d_ff = (d_ff or (4 * d_model))
self.attention = attention
self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
self.activation = (F.relu if (activation == 'relu') else F.gelu)
def forward(self, x, attn_mask=None):
x = (x + self.dropout(self.attention(x, x, x, attn_mask=attn_mask)))
y = x = self.norm1(x)
y = self.dropout(self.activation(self.conv1(y.transpose((- 1), 1))))
y = self.dropout(self.conv2(y).transpose((- 1), 1))
return self.norm2((x + y)) |
def calc_diversity(data_dir, num_samples=5):
dir_list = os.listdir(data_dir)
dir_list.sort()
transform = transforms.Compose([transforms.ToTensor()])
total = len(dir_list)
std = 0
for i in tqdm(range(total), total=total, smoothing=0.01):
imgs = []
for j in range(num_samples):
img = Image.open(os.path.join(os.path.join(data_dir, str(i), f'output_{str(j)}.png')))
img = img.convert('RGB')
img = transform(img)
img = (img * 255.0)
imgs.append(img)
img_mean = torch.zeros_like(imgs[0])
for j in range(num_samples):
img_mean = (img_mean + imgs[j])
img_mean = (img_mean / num_samples)
img_var = torch.zeros_like(imgs[0])
for j in range(num_samples):
img_var = (img_var + ((imgs[j] - img_mean) ** 2))
img_var = (img_var / num_samples)
img_std = torch.sqrt(img_var)
std = (std + torch.mean(img_std))
std = (std / total)
print(data_dir)
print(f'diversity: {std}') |
def make_dataset(pos_pairs, neg_pairs, neg_pairs_asin, pct_dev=0):
ds_train = []
ds_dev = []
def _add_to_ds(ds_train, ds_dev, array, label, pct_dev):
split = int((len(array) * pct_dev))
for pair in array[:split]:
ds_dev.append((*pair, label))
for pair in array[split:]:
ds_train.append((*pair, label))
_add_to_ds(ds_train, ds_dev, pos_pairs, 1, pct_dev)
_add_to_ds(ds_train, ds_dev, neg_pairs, (- 1), pct_dev)
_add_to_ds(ds_train, ds_dev, neg_pairs_asin, (- 1), pct_dev)
n_total = (len(ds_train) + len(ds_dev))
return (ds_train, ds_dev) |
class unknowns(plugin):
def __init__(self, os, maxsize):
plugin.__init__(self, os, maxsize, __name__)
print(('loaded %s' % __name__))
def preGet(self):
pass
def postGet(self):
pass
def check(self, path):
import ops.env
import os, os.path
__in = os.path.join(ops.env.get('_LOGPATH'), 'tmp', 'unknowns.txt')
__out = os.path.join(ops.env.get('_LOGPATH'), 'GetFiles', 'NOSEND', 'unknowns.txt')
if (not os.path.exists(os.path.split(__out)[0])):
os.makedirs(os.path.split(__out)[0])
try:
filesToCheck = open(__in)
except:
print(("Couldn't open file %s for input" % __in))
return
try:
saveFile = open(__out, 'a+b')
except:
print(("Couldn't open file %s for output" % __out))
return
for file in filesToCheck.readlines():
rPath = os.path.split(file.strip())[0]
rMask = os.path.split(file.strip())[1]
rDir = ops.files.dirs.get_dirlisting(path=rPath, mask=rMask, hash=True)
for diritem in rDir.diritem:
for fileitem in diritem.fileitem:
saveFile.write((fileitem.fullpath + '\n'))
for filehash in fileitem.filehash:
remoteHash = filehash.value
saveFile.write((((filehash.type + ': ') + filehash.value) + '\n'))
saveFile.write('\n\n')
saveFile.close()
print(('Hashes saved to ' + __out))
filesToCheck.close() |
def compute_faiss_kmeans(dim, num_partitions, kmeans_niters, shared_lists, return_value_queue=None):
use_gpu = torch.cuda.is_available()
kmeans = faiss.Kmeans(dim, num_partitions, niter=kmeans_niters, gpu=use_gpu, verbose=True, seed=123)
sample = shared_lists[0][0]
sample = sample.float().numpy()
kmeans.train(sample)
centroids = torch.from_numpy(kmeans.centroids)
print_memory_stats(f'RANK:0*')
if (return_value_queue is not None):
return_value_queue.put(centroids)
return centroids |
def test_no_install(local_client: QdrantClient=None, collection_name: str='demo_collection', docs: Dict[(str, List[Union[(str, int, Any)]])]=None):
if (local_client is None):
local_client = QdrantClient(':memory:')
if (docs is None):
docs = {'documents': ['Qdrant has Langchain integrations', 'Qdrant also has Llama Index integrations'], 'metadatas': [{'source': 'Langchain-docs'}, {'source': 'LlamaIndex-docs'}], 'ids': [42, 2]}
if local_client._is_fastembed_installed:
pytest.skip('FastEmbed is installed, skipping test')
else:
with pytest.raises(ImportError):
local_client.add(collection_name, docs) |
def test_project__empty():
project = Project()
assert (project.admins == [])
assert isinstance(project.created_at, datetime)
assert (project.location is None)
assert (project.project_observation_rules == [])
assert (project.search_parameters == [])
assert (project.user is None) |
def senet154(num_classes=1000, pretrained='imagenet'):
model = SENet(SEBottleneck, [3, 8, 36, 3], groups=64, reduction=16, dropout_p=0.2, num_classes=num_classes)
if (pretrained is not None):
settings = pretrained_settings['senet154'][pretrained]
initialize_pretrained_model(model, num_classes, settings)
return model |
def test_parse_basic_remote_manifest():
manifest = OCIManifest(Bytes.for_string_or_unicode(SAMPLE_REMOTE_MANIFEST))
assert (not manifest.is_manifest_list)
assert (manifest.digest == 'sha256:dd18ed87a00474aff683cee7160771e043f1f0eaddbc0678a984a5e')
assert (manifest.blob_digests == ['sha256:9834876dcfb05cb167a5c24953eba58c4ac89b1adf57f28f2f9d09af107ee8f0', 'sha256:3c3a4604a545cdc127456d94e421cd355bca5b528f4a9c1905b15da2eb4a4c6b', 'sha256:ec4bc89419d1af06b5f7636b4ac3da7fad867736', 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7'])
assert (manifest.local_blob_digests == ['sha256:9834876dcfb05cb167a5c24953eba58c4ac89b1adf57f28f2f9d09af107ee8f0', 'sha256:ec4bc89419d1af06b5f7636b4ac3da7fad867736', 'sha256:b5b2b2c507a0944348e0303114d8d93aaaa081732b86451d9bce1f432a537bc7'])
assert (len(manifest.filesystem_layers) == 3)
assert (str(manifest.leaf_filesystem_layer.digest) == 'sha256:ec4bc89419d1af06b5f7636b4ac3da7fad867736')
assert manifest.has_remote_layer
assert (not manifest.has_legacy_image)
assert (not manifest.get_legacy_image_ids(None)) |
class SegToImageTransforms(TransformsConfig):
def __init__(self, opts):
super(SegToImageTransforms, self).__init__(opts)
def get_transforms(self):
transforms_dict = {'transform_gt_train': transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_source': transforms.Compose([transforms.Resize((256, 256)), augmentations.ToOneHot(self.opts.label_nc), transforms.ToTensor()]), 'transform_test': transforms.Compose([transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]), 'transform_inference': transforms.Compose([transforms.Resize((256, 256)), augmentations.ToOneHot(self.opts.label_nc), transforms.ToTensor()])}
return transforms_dict |
def deprecated_alias(alias, func):
(func)
def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn('Call to deprecated function alias {}, use {} instead.'.format(alias, func.__name__), category=DeprecationWarning, stacklevel=2)
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return new_func |
class BoolectorOptions(SolverOptions):
def __init__(self, **base_options):
SolverOptions.__init__(self, **base_options)
if (self.random_seed is not None):
raise PysmtValueError('BTOR Does not support Random Seed setting.')
self.incrementality = True
self.internal_options = [pyboolector.BTOR_OPT_MODEL_GEN, pyboolector.BTOR_OPT_INCREMENTAL, pyboolector.BTOR_OPT_INCREMENTAL_SMT1, pyboolector.BTOR_OPT_INPUT_FORMAT, pyboolector.BTOR_OPT_OUTPUT_NUMBER_FORMAT, pyboolector.BTOR_OPT_OUTPUT_FORMAT, pyboolector.BTOR_OPT_ENGINE, pyboolector.BTOR_OPT_SAT_ENGINE, pyboolector.BTOR_OPT_AUTO_CLEANUP, pyboolector.BTOR_OPT_PRETTY_PRINT, pyboolector.BTOR_OPT_EXIT_CODES, pyboolector.BTOR_OPT_SEED, pyboolector.BTOR_OPT_VERBOSITY, pyboolector.BTOR_OPT_LOGLEVEL, pyboolector.BTOR_OPT_REWRITE_LEVEL, pyboolector.BTOR_OPT_SKELETON_PREPROC, pyboolector.BTOR_OPT_ACKERMANN, pyboolector.BTOR_OPT_BETA_REDUCE, pyboolector.BTOR_OPT_ELIMINATE_SLICES, pyboolector.BTOR_OPT_VAR_SUBST, pyboolector.BTOR_OPT_UCOPT, pyboolector.BTOR_OPT_MERGE_LAMBDAS, pyboolector.BTOR_OPT_EXTRACT_LAMBDAS, pyboolector.BTOR_OPT_NORMALIZE, pyboolector.BTOR_OPT_NORMALIZE_ADD, pyboolector.BTOR_OPT_FUN_PREPROP, pyboolector.BTOR_OPT_FUN_PRESLS, pyboolector.BTOR_OPT_FUN_DUAL_PROP, pyboolector.BTOR_OPT_FUN_DUAL_PROP_QSORT, pyboolector.BTOR_OPT_FUN_JUST, pyboolector.BTOR_OPT_FUN_JUST_HEURISTIC, pyboolector.BTOR_OPT_FUN_LAZY_SYNTHESIZE, pyboolector.BTOR_OPT_FUN_EAGER_LEMMAS, pyboolector.BTOR_OPT_FUN_STORE_LAMBDAS, pyboolector.BTOR_OPT_SLS_NFLIPS, pyboolector.BTOR_OPT_SLS_STRATEGY, pyboolector.BTOR_OPT_SLS_JUST, pyboolector.BTOR_OPT_SLS_MOVE_GW, pyboolector.BTOR_OPT_SLS_MOVE_RANGE, pyboolector.BTOR_OPT_SLS_MOVE_SEGMENT, pyboolector.BTOR_OPT_SLS_MOVE_RAND_WALK, pyboolector.BTOR_OPT_SLS_PROB_MOVE_RAND_WALK, pyboolector.BTOR_OPT_SLS_MOVE_RAND_ALL, pyboolector.BTOR_OPT_SLS_MOVE_RAND_RANGE, pyboolector.BTOR_OPT_SLS_MOVE_PROP, pyboolector.BTOR_OPT_SLS_MOVE_PROP_N_PROP, pyboolector.BTOR_OPT_SLS_MOVE_PROP_N_SLS, pyboolector.BTOR_OPT_SLS_MOVE_PROP_FORCE_RW, pyboolector.BTOR_OPT_SLS_MOVE_INC_MOVE_TEST, pyboolector.BTOR_OPT_SLS_USE_RESTARTS, pyboolector.BTOR_OPT_SLS_USE_BANDIT, pyboolector.BTOR_OPT_PROP_NPROPS, pyboolector.BTOR_OPT_PROP_USE_RESTARTS, pyboolector.BTOR_OPT_PROP_USE_BANDIT, pyboolector.BTOR_OPT_PROP_PATH_SEL, pyboolector.BTOR_OPT_PROP_PROB_USE_INV_VALUE, pyboolector.BTOR_OPT_PROP_PROB_FLIP_COND, pyboolector.BTOR_OPT_PROP_PROB_FLIP_COND_CONST, pyboolector.BTOR_OPT_PROP_FLIP_COND_CONST_DELTA, pyboolector.BTOR_OPT_PROP_FLIP_COND_CONST_NPATHSEL, pyboolector.BTOR_OPT_PROP_PROB_SLICE_KEEP_DC, pyboolector.BTOR_OPT_PROP_PROB_CONC_FLIP, pyboolector.BTOR_OPT_PROP_PROB_SLICE_FLIP, pyboolector.BTOR_OPT_PROP_PROB_EQ_FLIP, pyboolector.BTOR_OPT_PROP_PROB_AND_FLIP, pyboolector.BTOR_OPT_PROP_NO_MOVE_ON_CONFLICT, pyboolector.BTOR_OPT_AIGPROP_USE_RESTARTS, pyboolector.BTOR_OPT_AIGPROP_USE_BANDIT, pyboolector.BTOR_OPT_QUANT_SYNTH, pyboolector.BTOR_OPT_QUANT_DUAL_SOLVER, pyboolector.BTOR_OPT_QUANT_SYNTH_LIMIT, pyboolector.BTOR_OPT_QUANT_SYNTH_QI, pyboolector.BTOR_OPT_QUANT_DER, pyboolector.BTOR_OPT_QUANT_CER, pyboolector.BTOR_OPT_QUANT_MINISCOPE, pyboolector.BTOR_OPT_SORT_EXP, pyboolector.BTOR_OPT_SORT_AIG, pyboolector.BTOR_OPT_SORT_AIGVEC, pyboolector.BTOR_OPT_AUTO_CLEANUP_INTERNAL, pyboolector.BTOR_OPT_SIMPLIFY_CONSTRAINTS, pyboolector.BTOR_OPT_CHK_FAILED_ASSUMPTIONS, pyboolector.BTOR_OPT_CHK_MODEL, pyboolector.BTOR_OPT_CHK_UNCONSTRAINED, pyboolector.BTOR_OPT_PARSE_INTERACTIVE, pyboolector.BTOR_OPT_SAT_ENGINE_LGL_FORK, pyboolector.BTOR_OPT_SAT_ENGINE_CADICAL_FREEZE, pyboolector.BTOR_OPT_SAT_ENGINE_N_THREADS, pyboolector.BTOR_OPT_SIMP_NORMAMLIZE_ADDERS, pyboolector.BTOR_OPT_DECLSORT_BV_WIDTH, pyboolector.BTOR_OPT_QUANT_SYNTH_ITE_COMPLETE, pyboolector.BTOR_OPT_QUANT_FIXSYNTH, pyboolector.BTOR_OPT_RW_ZERO_LOWER_SLICE, pyboolector.BTOR_OPT_NONDESTR_SUBST]
def _set_option(self, btor, name, value):
available_options = {pyboolector.BoolectorOpt(btor, io).lng: io for io in self.internal_options}
try:
btor.Set_opt(available_options[name], value)
except TypeError:
raise PysmtValueError(("Error setting the option '%s=%s'" % (name, value)))
except pyboolector.BoolectorException:
raise PysmtValueError(("Error setting the option '%s=%s'" % (name, value)))
except KeyError:
raise PysmtValueError(("Unable to set non-existing option '%s'. The accepted options options are: %s" % (name, ', '.join((pyboolector.BoolectorOpt(btor, io).lng for io in self.internal_options)))))
def __call__(self, solver):
if self.generate_models:
self._set_option(solver.btor, 'model-gen', 1)
else:
self._set_option(solver.btor, 'model-gen', 0)
if self.incrementality:
self._set_option(solver.btor, 'incremental', 1)
for (k, v) in self.solver_options.items():
self._set_option(solver.btor, str(k), v) |
def do_chopper(params):
(ijob, datadir, nfiles, nsamples, tmin, (grouping, mult)) = params
sq = squirrel.Squirrel(datadir, persistent='bla')
sq.add(os.path.join(datadir, 'data'))
ntr = 0
for tr in sq.get_waveforms(uncut=True):
ntr += 1
assert (tr.data_len() == nsamples)
assert (ntr == nfiles)
trs = sq.get_waveforms(tmin=(tmin + 10), tmax=(tmin + 200))
for tr in trs:
assert num.all((tr.get_ydata() == num.ones(190)))
trs = sq.get_waveforms(tmin=(tmin - 100), tmax=(tmin + 100))
for tr in trs:
assert (len(tr.get_ydata()) == 100)
codes = sq.get_codes(kind='waveform')
s = 0
tinc = 122.0
(sq_tmin, sq_tmax) = sq.get_time_span('waveform')
nbatches = 0
for batch in sq.chopper_waveforms(tmin=None, tmax=(sq_tmax + 1.0), tinc=tinc, degap=False, codes=codes, grouping=grouping):
for tr in batch.traces:
s += num.sum(tr.ydata)
nbatches += 1
assert (nbatches == (int(math.ceil(((nsamples * nfiles) / tinc))) * mult))
assert (int(round(s)) == (nfiles * nsamples))
database = sq.get_database()
del sq
from pyrocko.squirrel import database as dbm
dbm.close_database(database)
return ijob |
class CompletionItemKind():
Text = 1
Method = 2
Function = 3
Constructor = 4
Field = 5
Variable = 6
Class = 7
Interface = 8
Module = 9
Property = 10
Unit = 11
Value = 12
Enum = 13
Keyword = 14
Snippet = 15
Color = 16
File = 17
Reference = 18
Folder = 19
EnumMember = 20
Constant = 21
Struct = 22
Event = 23
Operator = 24
TypeParameter = 25 |
class Effect5306(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredChargeBoost((lambda mod: mod.charge.requiresSkill('Rockets')), 'kineticDamage', ship.getModifiedItemAttr('shipBonusCD1'), skill='Caldari Destroyer', **kwargs) |
_vcs_handler('git', 'pieces_from_vcs')
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if (not os.path.exists(os.path.join(root, '.git'))):
if verbose:
print(('no .git in %s' % root))
raise NotThisMethod('no .git directory')
GITS = ['git']
if (sys.platform == 'win32'):
GITS = ['git.cmd', 'git.exe']
describe_out = run_command(GITS, ['describe', '--tags', '--dirty', '--always', '--long'], cwd=root)
if (describe_out is None):
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ['rev-parse', 'HEAD'], cwd=root)
if (full_out is None):
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces['long'] = full_out
pieces['short'] = full_out[:7]
pieces['error'] = None
git_describe = describe_out
dirty = git_describe.endswith('-dirty')
pieces['dirty'] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex('-dirty')]
if ('-' in git_describe):
mo = re.search('^(.+)-(\\d+)-g([0-9a-f]+)$', git_describe)
if (not mo):
pieces['error'] = ("unable to parse git-describe output: '%s'" % describe_out)
return pieces
full_tag = mo.group(1)
if (not full_tag.startswith(tag_prefix)):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print((fmt % (full_tag, tag_prefix)))
pieces['error'] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix))
return pieces
pieces['closest-tag'] = full_tag[len(tag_prefix):]
pieces['distance'] = int(mo.group(2))
pieces['short'] = mo.group(3)
else:
pieces['closest-tag'] = None
count_out = run_command(GITS, ['rev-list', 'HEAD', '--count'], cwd=root)
pieces['distance'] = int(count_out)
return pieces |
class ModelArguments():
model_name_or_path: Optional[str] = field(default=None, metadata={'help': "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."})
model_type: Optional[str] = field(default=None, metadata={'help': ('If training from scratch, pass a model type from the list: ' + ', '.join(MODEL_TYPES))})
config_overrides: Optional[str] = field(default=None, metadata={'help': 'Override some existing default config settings when a model is trained from scratch. Example: n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
use_fast_tokenizer: bool = field(default=True, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `huggingface-cli login` (necessary to use this script with private models).'})
def __post_init__(self):
if ((self.config_overrides is not None) and ((self.config_name is not None) or (self.model_name_or_path is not None))):
raise ValueError("--config_overrides can't be used in combination with --config_name or --model_name_or_path") |
_canonicalize
_specialize
_rewriter([Elemwise])
def local_useless_composite_outputs(fgraph, node):
if ((not isinstance(node.op, Elemwise)) or (not isinstance(node.op.scalar_op, ps.Composite))):
return
comp = node.op.scalar_op
used_outputs_idxs = [i for (i, o_extern) in enumerate(node.outputs) if fgraph.clients[o_extern]]
used_inner_outputs = [comp.outputs[i] for i in used_outputs_idxs]
comp_fgraph = FunctionGraph(inputs=comp.inputs, outputs=used_inner_outputs, clone=False)
used_inputs_idxs = [i for (i, i_intern) in enumerate(comp_fgraph.inputs) if comp_fgraph.clients[i_intern]]
used_inner_inputs = [comp.inputs[i] for i in used_inputs_idxs]
if ((len(used_inner_inputs) < len(node.inputs)) or (len(used_inner_outputs) < len(node.outputs))):
used_inputs = [node.inputs[i] for i in used_inputs_idxs]
c = ps.Composite(inputs=used_inner_inputs, outputs=used_inner_outputs)
e = Elemwise(scalar_op=c)(*used_inputs, return_list=True)
return dict(zip([node.outputs[i] for i in used_outputs_idxs], e)) |
def test_attribute():
sAttr = Attribute.getInstance()
info = sAttr.getAttributeInfo('maxRange')
assert (info.attributeID == 54)
assert (type(info.attributeID) is int)
assert (info.attributeName == 'maxRange')
assert (type(info.attributeName) is str)
assert (info.defaultValue == 0.0)
assert (type(info.defaultValue) is float)
assert (info.description == 'Distance below which range does not affect the to-hit equation.')
assert (type(info.description) is str)
assert (info.displayName == 'Optimal Range')
assert (type(info.displayName) is str)
assert (info.highIsGood is True)
assert (type(info.highIsGood) is bool)
assert (info.iconID == 1391)
assert (type(info.iconID) is int)
assert (info.name == 'maxRange')
assert (type(info.name) is str)
assert (info.published is True)
assert (type(info.published) is bool)
assert (info.unitID == 1)
assert (type(info.unitID) is int)
assert (info.unit.ID == 1)
assert (type(info.unit.ID) is int)
assert (info.unit.displayName == 'm')
assert (type(info.unit.displayName) is str)
assert (info.unit.name == 'Length')
assert (type(info.unit.name) is str)
assert (info.unit.unitID == 1)
assert (type(info.unit.unitID) is int)
assert (info.unit.unitName == 'Length')
assert (type(info.unit.unitName) is str) |
def test_can_handle_nms_with_constant_maxnum():
class ModuleNMS(torch.nn.Module):
def forward(self, boxes, scores):
return nms(boxes, scores, iou_threshold=0.4, max_num=10)
onnx_model = export_nms_module_to_onnx(ModuleNMS)
preprocess_onnx_model = preprocess_onnx(onnx_model)
for node in preprocess_onnx_model.graph.node:
if ('NonMaxSuppression' in node.name):
assert (len(node.attribute) == 5), 'The NMS must have 5 attributes.' |
def test_prepare_sdist(config: Config, config_cache_dir: Path, artifact_cache: ArtifactCache, fixture_dir: FixtureDirGetter, mock_file_downloads: None) -> None:
chef = Chef(artifact_cache, EnvManager.get_system_env(), Factory.create_pool(config))
archive = (fixture_dir('distributions') / 'demo-0.1.0.tar.gz').resolve()
destination = artifact_cache.get_cache_directory_for_link(Link(archive.as_uri()))
wheel = chef.prepare(archive)
assert (wheel.parent == destination)
assert (wheel.name == 'demo-0.1.0-py3-none-any.whl') |
class CoberturaReportSuite(Suite):
.skipif((lxml is None), reason='Cannot import lxml. Is it installed?')
def test_get_line_rate(self) -> None:
assert_equal('1.0', get_line_rate(0, 0))
assert_equal('0.3333', get_line_rate(1, 3))
.skipif((lxml is None), reason='Cannot import lxml. Is it installed?')
def test_as_xml(self) -> None:
import lxml.etree as etree
cobertura_package = CoberturaPackage('foobar')
cobertura_package.covered_lines = 21
cobertura_package.total_lines = 42
child_package = CoberturaPackage('raz')
child_package.covered_lines = 10
child_package.total_lines = 10
child_package.classes['class'] = etree.Element('class')
cobertura_package.packages['raz'] = child_package
expected_output = textwrap.dedent(' <package complexity="1.0" name="foobar" branch-rate="0" line-rate="0.5000">\n <classes/>\n <packages>\n <package complexity="1.0" name="raz" branch-rate="0" line-rate="1.0000">\n <classes>\n <class/>\n </classes>\n </package>\n </packages>\n </package>\n ').encode('ascii')
assert_equal(expected_output, etree.tostring(cobertura_package.as_xml(), pretty_print=True)) |
class TouchExecutor(ActionExecutor):
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_reachable(state, node, info, char_index):
if modify:
(yield state.change_state([], in_place=in_place))
else:
(yield state)
def check_reachable(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo, char_index):
if (not _is_character_close_to(state, node, char_index)):
info.error('{} is not close to {}', _get_character_node(state, char_index), node)
return False
if _is_inside(state, node):
info.error('{} is inside other closed thing', node)
return False
return True |
class F23_TestCase(F18_TestCase):
def runTest(self):
self.assert_parse(('timezone --utc Europe/Prague --ntpservers=ntp.cesnet.cz,0.fedora.pool.ntp.org,' + '0.fedora.pool.ntp.org,0.fedora.pool.ntp.org,0.fedora.pool.ntp.org'), ('timezone Europe/Prague --isUtc --ntpservers=ntp.cesnet.cz,0.fedora.pool.ntp.org,' + '0.fedora.pool.ntp.org,0.fedora.pool.ntp.org,0.fedora.pool.ntp.org\n'))
self.assert_parse('timezone --utc Europe/Sofia --ntpservers=,0.fedora.pool.ntp.org,')
self.assert_parse_error('timezone Europe/Sofia --nontp --ntpservers=0.fedora.pool.ntp.org,1.fedora.pool.ntp.org') |
class Annotation(NamedTuple):
area: float
image_id: str
bbox: BoundingBox
category_no: int
category_id: str
id: Optional[int] = None
source: Optional[str] = None
confidence: Optional[float] = None
is_group_of: Optional[bool] = None
is_truncated: Optional[bool] = None
is_occluded: Optional[bool] = None
is_depiction: Optional[bool] = None
is_inside: Optional[bool] = None
segmentation: Optional[Dict] = None |
class Color():
__slots__ = ['_val']
def __init__(self, *args):
if (len(args) == 1):
color = args[0]
if isinstance(color, (int, float)):
self._set_from_tuple(args)
elif isinstance(color, str):
self._set_from_str(color)
else:
self._set_from_tuple(color)
else:
self._set_from_tuple(args)
def __repr__(self):
f = (lambda v: f'{v:0.4f}'.rstrip('0').ljust(3, '0'))
return f'Color({f(self.r)}, {f(self.g)}, {f(self.b)}, {f(self.a)})'
def __array_interface__(self):
readonly = True
ptr = ctypes.addressof(self._val)
x = dict(version=3, shape=(4,), typestr='<f4', data=(ptr, readonly))
return x
def __len__(self):
return 4
def __getitem__(self, index):
return self._val[index]
def __iter__(self):
return self.rgba.__iter__()
def __eq__(self, other):
if (not isinstance(other, Color)):
other = Color(other)
return all(((self._val[i] == other._val[i]) for i in range(4)))
def __add__(self, other):
return Color((self.r + other.r), (self.g + other.g), (self.b + other.b), self.a)
def __mul__(self, factor):
if (not isinstance(factor, (float, int))):
raise TypeError('Can only multiple a color with a scalar.')
return Color((self.r * factor), (self.g * factor), (self.b * factor), self.a)
def __truediv__(self, factor):
if (not isinstance(factor, (float, int))):
raise TypeError('Can only multiple a color with a scalar.')
return self.__mul__((1 / factor))
def _set_from_rgba(self, r, g, b, a):
a = max(0.0, min(1.0, float(a)))
self._val = F4(float(r), float(g), float(b), a)
def _set_from_tuple(self, color):
color = tuple((float(c) for c in color))
if (len(color) == 4):
self._set_from_rgba(*color)
elif (len(color) == 3):
self._set_from_rgba(*color, 1)
elif (len(color) == 2):
self._set_from_rgba(color[0], color[0], color[0], color[1])
elif (len(color) == 1):
self._set_from_rgba(color[0], color[0], color[0], 1)
else:
raise ValueError(f'Cannot parse color tuple with {len(color)} values')
def _set_from_str(self, color):
color = color.lower()
if color.startswith('#'):
if (len(color) == 7):
self._set_from_rgba((int(color[1:3], 16) / 255), (int(color[3:5], 16) / 255), (int(color[5:7], 16) / 255), 1)
elif (len(color) == 4):
self._set_from_rgba((int(color[1], 16) / 15), (int(color[2], 16) / 15), (int(color[3], 16) / 15), 1)
elif (len(color) == 9):
self._set_from_rgba((int(color[1:3], 16) / 255), (int(color[3:5], 16) / 255), (int(color[5:7], 16) / 255), (int(color[7:9], 16) / 255))
elif (len(color) == 5):
self._set_from_rgba((int(color[1], 16) / 15), (int(color[2], 16) / 15), (int(color[3], 16) / 15), (int(color[4], 16) / 15))
else:
raise ValueError(f'Expecting 4, 5, 7, or 9 chars in a hex number, got {len(color)}.')
elif color.startswith(('rgb(', 'rgba(')):
parts = color.split('(')[1].split(')')[0].split(',')
parts = [_float_from_css_value(p, i) for (i, p) in enumerate(parts)]
if (len(parts) == 3):
self._set_from_rgba(parts[0], parts[1], parts[2], 1)
elif (len(parts) == 4):
self._set_from_rgba(parts[0], parts[1], parts[2], parts[3])
else:
raise ValueError(f"CSS color {color.split('(')[0]}(..) must have 3 or 4 elements, not {len(parts)} ")
else:
try:
color_int = NAMED_COLORS[color.lower()]
except KeyError:
raise ValueError(f"Unknown color: '{color}'") from None
else:
self._set_from_str(color_int)
def rgba(self):
return (self._val[0], self._val[1], self._val[2], self._val[3])
def rgb(self):
return (self._val[0], self._val[1], self._val[2])
def r(self):
return self._val[0]
def g(self):
return self._val[1]
def b(self):
return self._val[2]
def a(self):
return self._val[3]
def gray(self):
(r, g, b) = self.rgb
return (((0.2989 * r) + (0.587 * g)) + (0.114 * b))
def hex(self):
c = self.clip()
r = int(((c.r * 255) + 0.5))
b = int(((c.b * 255) + 0.5))
g = int(((c.g * 255) + 0.5))
i = (((r << 16) + (g << 8)) + b)
return ('#' + hex(i)[2:].rjust(6, '0'))
def hexa(self):
c = self.clip()
r = int(((c.r * 255) + 0.5))
b = int(((c.b * 255) + 0.5))
g = int(((c.g * 255) + 0.5))
a = int(((c.a * 255) + 0.5))
i = ((((r << 24) + (g << 16)) + (b << 8)) + a)
return ('#' + hex(i)[2:].rjust(8, '0'))
def css(self):
(r, g, b, a) = self.rgba
if (a == 1):
return f'rgb({int(((255 * r) + 0.5))},{int(((255 * g) + 0.5))},{int(((255 * b) + 0.5))})'
else:
return f'rgba({int(((255 * r) + 0.5))},{int(((255 * g) + 0.5))},{int(((255 * b) + 0.5))},{a:0.3f})'
def clip(self):
return Color((max(0.0, min(1.0, x)) for x in self.rgba))
def from_physical(cls, r, g, b, a=1):
return Color(_physical2srgb(r), _physical2srgb(g), _physical2srgb(b), a)
def to_physical(self):
return (_srgb2physical(self.r), _srgb2physical(self.g), _srgb2physical(self.b))
def from_hsv(cls, hue, saturation, value):
return Color(colorsys.hsv_to_rgb(hue, saturation, value))
def to_hsv(self):
return colorsys.rgb_to_hsv(*self.rgb)
def from_hsl(cls, hue, saturation, lightness):
return Color(colorsys.hls_to_rgb(hue, lightness, saturation))
def to_hsl(self):
(hue, lightness, saturation) = colorsys.rgb_to_hls(*self.rgb)
return (hue, saturation, lightness) |
class HubertCriterionConfig(FairseqDataclass):
pred_masked_weight: float = field(default=1.0, metadata={'help': 'weight for predictive loss for masked frames'})
pred_nomask_weight: float = field(default=0.0, metadata={'help': 'weight for predictive loss for unmasked frames'})
loss_weights: Optional[List[float]] = field(default=None, metadata={'help': 'weights for additional loss terms (not first one)'})
log_keys: List[str] = field(default_factory=(lambda : []), metadata={'help': 'output keys to log'}) |
def U2NETP(input=(None, None, 3), out_ch=1):
inp = Input(input)
x = Lambda((lambda x: (x / 255)))(inp)
x1 = RSU7(x, 16, 64)
x = MaxPool2D(2, 2)(x1)
x2 = RSU6(x, 16, 64)
x = MaxPool2D(2, 2)(x2)
x3 = RSU5(x, 16, 64)
x = MaxPool2D(2, 2)(x3)
x4 = RSU4(x, 16, 64)
x = MaxPool2D(2, 2)(x4)
x5 = RSU4(x, 16, 64)
x = MaxPool2D(2, 2)(x5)
x6 = RSU4F(x, 16, 64)
x = _upsample_like(x6, x5)
x5 = RSU4F(tf.concat([x, x5], axis=(- 1)), 16, 64)
x = _upsample_like(x5, x4)
x4 = RSU4(tf.concat([x, x4], axis=(- 1)), 16, 64)
x = _upsample_like(x4, x3)
x3 = RSU5(tf.concat([x, x3], axis=(- 1)), 16, 64)
x = _upsample_like(x3, x2)
x2 = RSU6(tf.concat([x, x2], axis=(- 1)), 16, 64)
x = _upsample_like(x2, x1)
x1 = RSU7(tf.concat([x, x1], axis=(- 1)), 16, 64)
output_activation = 'sigmoid'
x = ZeroPadding2D((1, 1))(x1)
d1 = Conv2D(out_ch, 3)(x)
d1 = Activation(output_activation)(d1)
x = ZeroPadding2D((1, 1))(x2)
x = Conv2D(out_ch, 3)(x)
d2 = _upsample_like(x, d1)
d2 = Activation(output_activation)(d2)
x = ZeroPadding2D((1, 1))(x3)
x = Conv2D(out_ch, 3)(x)
d3 = _upsample_like(x, d1)
d3 = Activation(output_activation)(d3)
x = ZeroPadding2D((1, 1))(x4)
x = Conv2D(out_ch, 3)(x)
d4 = _upsample_like(x, d1)
d4 = Activation(output_activation)(d4)
x = ZeroPadding2D((1, 1))(x5)
x = Conv2D(out_ch, 3)(x)
d5 = _upsample_like(x, d1)
d5 = Activation(output_activation)(d5)
x = ZeroPadding2D((1, 1))(x6)
x = Conv2D(out_ch, 3)(x)
d6 = _upsample_like(x, d1)
d6 = Activation(output_activation)(d6)
d0 = Conv2D(out_ch, 1)(tf.concat([d1, d2, d3, d4, d5, d6], axis=(- 1)))
d0 = Activation(output_activation)(d0)
output = tf.stack([d0, d1, d2, d3, d4, d5, d6])
model = Model(inp, output)
return model |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.