code stringlengths 281 23.7M |
|---|
class CosFaceLoss(LargeMarginSoftmaxLoss):
def __init__(self, *args, margin=0.35, scale=64, **kwargs):
super().__init__(*args, margin=margin, scale=scale, **kwargs)
def init_margin(self):
pass
def cast_types(self, dtype, device):
self.W.data = c_f.to_device(self.W.data, device=device, dtype=dtype)
def modify_cosine_of_target_classes(self, cosine_of_target_classes):
if self.collect_stats:
with torch.no_grad():
self.get_angles(cosine_of_target_classes)
return (cosine_of_target_classes - self.margin)
def scale_logits(self, logits, *_):
return (logits * self.scale) |
def test_using_last_explicit_seed(ourtester):
ourtester.makepyfile(test_one='\n def test_a():\n pass\n ')
out = ourtester.runpytest('--randomly-seed=33')
out.assert_outcomes(passed=1, failed=0)
out.stdout.fnmatch_lines(['Using --randomly-seed=33'])
out = ourtester.runpytest('--randomly-seed=last')
out.assert_outcomes(passed=1, failed=0)
out.stdout.fnmatch_lines(['Using --randomly-seed=33']) |
def model(pretrained=False, **kwargs):
model = VGG(make_layers(cfg['O'], dilation=dilation['D1']), **kwargs)
if pretrained:
model_dict = model.state_dict()
pretrained_dict = model_zoo.load_url(model_urls['vgg16'])
print('load pretrained model from {}'.format(model_urls['vgg16']))
for k in pretrained_dict.keys():
if (k not in model_dict):
print('Key {} is removed from vgg16'.format(k))
for k in model_dict.keys():
if (k not in pretrained_dict):
print('Key {} is new added for DA Net'.format(k))
pretrained_dict = {k: v for (k, v) in pretrained_dict.items() if (k in model_dict)}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model |
class CheckpointHandler():
def __init__(self, coordinator_args: CoordinatorArguments, collab_optimizer_args: CollaborativeOptimizerArguments, averager_args: AveragerArguments, dht: hivemind.DHT):
self.save_checkpoint_step_interval = coordinator_args.save_checkpoint_step_interval
self.repo_path = coordinator_args.repo_path
self.upload_interval = coordinator_args.upload_interval
self.previous_step = (- 1)
config = AlbertConfig.from_pretrained(coordinator_args.model_config_path)
self.model = AlbertForPreTraining(config)
tokenizer = AlbertBengaliTokenizerFast.from_pretrained('tokenizer/data')
self.model.resize_token_embeddings(len(tokenizer))
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': 0.01}, {'params': [p for (n, p) in self.model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
opt = Lamb(optimizer_grouped_parameters, lr=0.00176, weight_decay=0.01, clamp_value=10000.0, debias=True)
adjusted_target_batch_size = (collab_optimizer_args.target_batch_size - collab_optimizer_args.batch_size_lead)
self.collaborative_optimizer = hivemind.CollaborativeOptimizer(opt=opt, dht=dht, prefix=experiment_prefix, compression_type=hivemind.utils.CompressionType.Value(collab_optimizer_args.compression), throughput=collab_optimizer_args.bandwidth, target_batch_size=adjusted_target_batch_size, client_mode=collab_optimizer_args.client_mode, verbose=True, start=True, **asdict(averager_args))
self.previous_timestamp = time.time()
def is_time_to_save_state(self, cur_step):
if (self.save_checkpoint_step_interval is None):
return False
elif ((cur_step - self.previous_step) >= self.save_checkpoint_step_interval):
return True
else:
return False
def save_state(self, cur_step):
self.collaborative_optimizer.load_state_from_peers()
self.previous_step = cur_step
def is_time_to_upload(self):
if (self.repo_path is None):
return False
elif ((time.time() - self.previous_timestamp) >= self.upload_interval):
return True
else:
return False
def upload_checkpoint(self, current_loss):
self.model.save_pretrained(self.repo_path)
torch.save(self.collaborative_optimizer.opt.state_dict(), f'{self.repo_path}/optimizer_state.pt')
self.previous_timestamp = time.time()
try:
subprocess.run('git add --all', shell=True, check=True, cwd=self.repo_path)
current_step = self.collaborative_optimizer.collaboration_state.optimizer_step
subprocess.run(f"git commit -m 'Step {current_step}, loss {current_loss:.3f}'", shell=True, check=True, cwd=self.repo_path)
subprocess.run('git push', shell=True, check=True, cwd=self.repo_path)
except subprocess.CalledProcessError as e:
logger.warning('Error while uploading model:', e.output) |
def test_async_event_hook():
calls = []
()
def handler1(*args):
assert_gt(len(args), 0)
calls.append(('handler1%s' % str(args)))
def handler2(*args):
calls.append(('handler2%s' % str(args)))
hook = AsyncEventHook([handler1])
hook.subscribe(handler2)
hook.trigger(1, 2, 'a')
assert_unordered_list_eq(["handler1(1, 2, 'a')", "handler2(1, 2, 'a')"], calls)
calls = []
()
def async_trigger():
(yield hook.trigger.asynq(2, 3))
async_trigger()
assert_unordered_list_eq(['handler1(2, 3)', 'handler2(2, 3)'], calls)
calls = []
hook2 = AsyncEventHook([handler1, handler2])
with AssertRaises(AssertionError):
hook2.safe_trigger()
assert_eq(['handler2()'], calls)
calls = []
hook3 = AsyncEventHook([handler2, handler1])
with AssertRaises(AssertionError):
hook3.safe_trigger()
assert_eq(['handler2()'], calls) |
def main(args):
print(args)
if args.database:
databases_to_fix = [args.database]
else:
databases_to_fix = set((list(implemented_database_fixes.keys()) + list(databases_add_foreign_keys.keys())))
for db in databases_to_fix:
if ((db not in implemented_database_fixes) and (db not in databases_add_foreign_keys)):
raise NotImplementedError(f'Do not know how to fix database {db}')
db_path = get_path_and_make_backup(db, args.spider_path, database_path=args.database_path)
if (db in implemented_database_fixes):
(subset, fixer) = implemented_database_fixes[db]
fixer(db_path)
if (db in databases_add_foreign_keys):
(subset, foreign_key_list) = databases_add_foreign_keys[db]
fix_database_add_foreign_keys(db_path, foreign_key_list, keys_to_delete=(databases_delete_foreign_keys[db] if (db in databases_delete_foreign_keys) else None)) |
def test_separate_processes():
test_args = ('python', 'tests/standalone_script.py')
run_params = {'args': test_args, 'capture_output': True, 'text': True}
run_process = functools.partial(subprocess.run, **run_params)
result = run_process()
assert (result.stdout.strip() == 'two 2')
start = time()
result = run_process()
end = time()
assert (result.stdout.strip() == 'two 2')
assert ((end - start) < 3) |
_tf
class TFXGLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = ((TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ())
all_generative_model_classes = ((TFXGLMForCausalLM,) if is_tf_available() else ())
pipeline_model_mapping = ({'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {})
test_onnx = False
test_missing_keys = False
test_pruning = False
def setUp(self):
self.model_tester = TFXGLMModelTester(self)
self.config_tester = ConfigTester(self, config_class=XGLMConfig, n_embd=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_common_attributes(self):
(config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if (model_class in self.all_generative_model_classes):
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert (name is None)
else:
x = model.get_output_embeddings()
assert (x is None)
name = model.get_bias()
assert (name is None)
def test_batch_generation(self):
model = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tokenizer = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
tokenizer.padding_side = 'left'
sentences = ['Hello, my dog is a little', 'Today, I']
inputs = tokenizer(sentences, return_tensors='tf', padding=True)
outputs = model.generate(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
inputs_non_padded = tokenizer(sentences[0], return_tensors='tf').input_ids
output_non_padded = model.generate(input_ids=inputs_non_padded)
num_paddings = (inputs_non_padded.shape[(- 1)] - tf.math.reduce_sum(tf.cast(inputs['attention_mask'][(- 1)], dtype=tf.int64)).numpy())
inputs_padded = tokenizer(sentences[1], return_tensors='tf').input_ids
output_padded = model.generate(input_ids=inputs_padded, max_length=(model.config.max_length - num_paddings))
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
expected_output_sentence = ['Hello, my dog is a little bit of a shy one, but he is very friendly', 'Today, I am going to share with you a few of my favorite things']
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertListEqual(expected_output_sentence, [non_padded_sentence, padded_sentence])
def test_model_from_pretrained(self):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
model = TFXGLMModel.from_pretrained(model_name)
self.assertIsNotNone(model)
(reason='Currently, model embeddings are going to undergo a major refactor.')
def test_resize_token_embeddings(self):
super().test_resize_token_embeddings() |
()
('-n', '--nodeid', default=None)
('-a', '--allof', default=None)
('-o', '--offset', default='')
_options
_options
def submit(nodeid, allof, offset, metadir, accept_metadir, controller, ctrlopt, modelsetup, modelopt, backend, local, verbosity):
handle_common_options(verbosity)
ys = handle_connection_options(metadir, accept_metadir, controller, ctrlopt, modelsetup, modelopt, backend, local)
controller = ys.controller
if (not (allof or nodeid)):
click_print_submittable_nodes(controller)
return
if nodeid:
nodes_to_submit = ([nodeid] if (nodeid in controller.submittable_nodes()) else [])
if allof:
(offset, name) = allof.rsplit('/', 1)
rule = controller.adageobj.view().getRule(name=name, offset=offset)
if (not rule):
click.secho('stage not found!', fg='red')
return
all_submittable = controller.submittable_nodes()
(_, s2r, _) = utils.rule_steps_indices(controller.adageobj)
nodes_to_submit = [x for x in all_submittable if (s2r[x] == rule.identifier)]
if (not nodes_to_submit):
click.secho('No nodes to submit (perhaps already submitted?)', fg='yellow')
return
controller.submit_nodes(nodes_to_submit)
click.secho('submitted: {}'.format(nodes_to_submit), fg='green') |
_fixtures(ConfigWithFiles)
def test_incorrect_settings(config_with_files):
fixture = config_with_files
config_file = fixture.new_config_file(filename=ConfigWithSetting.filename, contents='some_key.some_wrong_name = 3')
fixture.set_config_spec(easter_egg, 'reahl.component_dev.test_config:ConfigWithSetting')
config = StoredConfiguration(fixture.config_dir.name)
with expected(ConfigurationException):
config.configure() |
class InceptionV1Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, end_points) = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
self.assertTrue(('Predictions' in end_points))
self.assertListEqual(end_points['Predictions'].get_shape().as_list(), [batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
(height, width) = (224, 224)
inputs = tf.random_uniform((batch_size, height, width, 3))
(mixed_6c, end_points) = inception.inception_v1_base(inputs)
self.assertTrue(mixed_6c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_6c.get_shape().as_list(), [batch_size, 7, 7, 1024])
expected_endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
(height, width) = (224, 224)
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c', 'MaxPool_4a_3x3', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_4f', 'MaxPool_5a_2x2', 'Mixed_5b', 'Mixed_5c']
for (index, endpoint) in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
(out_tensor, end_points) = inception.inception_v1_base(inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(('InceptionV1/' + endpoint)))
self.assertItemsEqual(endpoints[:(index + 1)], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
(height, width) = (224, 224)
inputs = tf.random_uniform((batch_size, height, width, 3))
(_, end_points) = inception.inception_v1_base(inputs, final_endpoint='Mixed_5c')
endpoints_shapes = {'Conv2d_1a_7x7': [5, 112, 112, 64], 'MaxPool_2a_3x3': [5, 56, 56, 64], 'Conv2d_2b_1x1': [5, 56, 56, 64], 'Conv2d_2c_3x3': [5, 56, 56, 192], 'MaxPool_3a_3x3': [5, 28, 28, 192], 'Mixed_3b': [5, 28, 28, 256], 'Mixed_3c': [5, 28, 28, 480], 'MaxPool_4a_3x3': [5, 14, 14, 480], 'Mixed_4b': [5, 14, 14, 512], 'Mixed_4c': [5, 14, 14, 512], 'Mixed_4d': [5, 14, 14, 512], 'Mixed_4e': [5, 14, 14, 528], 'Mixed_4f': [5, 14, 14, 832], 'MaxPool_5a_2x2': [5, 7, 7, 832], 'Mixed_5b': [5, 7, 7, 832], 'Mixed_5c': [5, 7, 7, 1024]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue((endpoint_name in end_points))
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(), expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
(height, width) = (224, 224)
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v1_arg_scope()):
inception.inception_v1_base(inputs)
(total_params, _) = slim.model_analyzer.analyze_vars(slim.get_model_variables())
self.assertAlmostEqual(5607184, total_params)
def testHalfSizeImages(self):
batch_size = 5
(height, width) = (112, 112)
inputs = tf.random_uniform((batch_size, height, width, 3))
(mixed_5c, _) = inception.inception_v1_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV1/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(), [batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
(height, width) = (224, 224)
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
(logits, end_points) = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknowBatchSize(self):
batch_size = 1
(height, width) = (224, 224)
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
(logits, _) = inception.inception_v1(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV1/Logits'))
self.assertListEqual(logits.get_shape().as_list(), [None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
(height, width) = (224, 224)
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
(logits, _) = inception.inception_v1(eval_inputs, num_classes, is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
(height, width) = (224, 224)
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v1(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
(logits, _) = inception.inception_v1(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
(logits, _) = inception.inception_v1(images, num_classes=num_classes, spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes]) |
.integration
def test_arms_import(long_project):
new_arms = [{'arm_num': 3, 'name': 'Drug C'}]
response = long_project.import_arms(new_arms)
assert (response == 1)
new_events = [{'event_name': 'new_event', 'arm_num': '3'}]
response = long_project.import_events(new_events)
response = long_project.export_arms()
assert (len(response) == 3)
arm_nums = [arm['arm_num'] for arm in response]
arm_names = [arm['name'] for arm in response]
assert (arm_nums == [1, 2, 3])
assert (arm_names == ['Drug A', 'Drug B', 'Drug C']) |
def main():
args = get_args()
if args.dir:
args.dirs = [item for sublist in args.dir for item in sublist]
else:
args.dirs = []
symbolizer = Symbolizer(sys.stdout, args.dirs, args.strip_path)
for line in sys.stdin:
symbolizer.write(line)
symbolizer.flush() |
class SilentListener(AbstractListener):
def _set_volume(self, volume):
pass
def _set_position(self, position):
pass
def _set_forward_orientation(self, orientation):
pass
def _set_up_orientation(self, orientation):
pass
def _set_orientation(self):
pass |
def test_parse_tree():
problem = '(q-transform/hint (quote (lambda (cdr (cdr (var ()))))) (quote ((() y . 1) (#f y () . #t) (#f b () b . y) (x #f (#f . #f) . #t) (a #f y x s . a))))'
step = 0
print('Starting problem:', problem)
with Interaction(lisp.parse(problem)) as env:
signal = None
while (signal != 'solved'):
choices = parse_tree(env.state)
signal = env.follow_path(env.good_path)
step += 1
print('Step', step, 'Signal:', signal)
print('Completed.') |
def tsym_block(qubits: List[cirq.Qid], params: List[Number]) -> List[cirq.Operation]:
mapped_circuit = _load_circuit('tsym_permute.json').transform_qubits({cirq.GridQubit(0, 0): qubits[0], cirq.GridQubit(0, 1): qubits[1]})
rots = [(cirq.Y(qubits[0]) ** params[0]), (cirq.Y(qubits[1]) ** params[1])]
return (rots + list(mapped_circuit.all_operations())) |
class CLinker(Linker):
def __init__(self, schedule=None):
self.fgraph = None
super().__init__(scheduler=schedule)
def accept(self, fgraph: 'FunctionGraph', no_recycling=None, profile=None) -> 'CLinker':
if (no_recycling is None):
no_recycling = []
if ((self.fgraph is not None) and (self.fgraph is not fgraph)):
return type(self)(self.schedule).accept(fgraph, no_recycling, profile)
self.fgraph = fgraph
self.fetch_variables()
self.no_recycling = no_recycling
return self
def fetch_variables(self):
fgraph = self.fgraph
self.inputs = fgraph.inputs
self.outputs = fgraph.outputs
self.node_order = self.schedule(fgraph)
self.variables = [var for var in self.inputs if (not len(fgraph.clients[var]))]
self.variables += list(vars_between(self.inputs, self.outputs))
self.node_params = dict()
for node in self.node_order:
if (not isinstance(node.op, CLinkerOp)):
continue
try:
params = node.op.get_params(node)
except MethodNotDefined:
params = NoParams
if (params is not NoParams):
if (params in self.node_params):
var = self.node_params[params]
assert (var.type == node.params_type)
fgraph.clients[var].append((node, 'params'))
else:
var = Constant(node.params_type, params)
fgraph.clients[var] = [(node, 'params')]
self.node_params[params] = var
self.variables.append(var)
self.orphans = [r for r in self.variables if (isinstance(r, AtomicVariable) and (r not in self.inputs))]
self.consts = []
for variable in self.orphans:
if (isinstance(variable, Constant) and isinstance(variable.type, CLinkerType) and variable.type.c_literal(variable.data)):
self.consts.append(variable)
self.orphans.remove(variable)
self.temps = list(set(self.variables).difference(self.inputs).difference(self.outputs).difference(self.orphans))
def code_gen(self):
if getattr(self, 'struct_code', False):
return self.struct_code
no_recycling = self.no_recycling
c_support_code_apply = []
c_init_code_apply = []
symbol = {}
init_tasks = []
tasks = []
init_blocks = []
blocks = []
failure_var = '__failure'
id = 1
for variable in self.variables:
if (not isinstance(variable.type, CLinkerType)):
raise NotImplementedError(f'Type of {variable} cannot produce C code')
sub = dict(failure_var=failure_var)
if (variable in self.consts):
symbol[variable] = (('(' + variable.type.c_literal(variable.data)) + ')')
continue
elif (variable in self.inputs):
policy = [[get_nothing, get_nothing, get_nothing], [get_c_declare, get_c_extract, get_c_cleanup]]
elif (variable in self.orphans):
if (not isinstance(variable, AtomicVariable)):
raise TypeError(f'All orphans to CLinker must be Constant instances. Got {variable}')
policy = [[get_c_declare, get_c_extract, get_c_cleanup], [get_nothing, get_nothing, get_nothing]]
elif (variable in self.temps):
if (variable.type.c_is_simple() or (variable in no_recycling)):
policy = [[get_nothing, get_nothing, get_nothing], [get_c_declare, get_c_init, get_c_cleanup]]
else:
policy = [[get_c_declare, get_c_init, get_c_cleanup], [get_nothing, get_nothing, get_nothing]]
elif (variable in self.outputs):
if (variable.type.c_is_simple() or (variable in no_recycling)):
policy = [[get_nothing, get_nothing, get_nothing], [get_c_declare, get_c_init, (get_c_sync, get_c_cleanup)]]
else:
policy = [[get_nothing, get_nothing, get_nothing], [get_c_declare, get_c_extract_out, (get_c_sync, get_c_cleanup)]]
else:
raise Exception("this shouldn't be possible, please report this exception")
(builder, block) = struct_variable_codeblocks(self.fgraph, variable, policy, id, symbol, sub)
init_tasks.append((variable, 'init', id))
init_blocks.append(builder)
tasks.append((variable, 'get', (id + 1)))
blocks.append(block)
id += 2
for (node_num, node) in enumerate(self.node_order):
op = node.op
if (not isinstance(op, CLinkerOp)):
raise NotImplementedError(f'{op} cannot produce C code')
sub = dict(failure_var=failure_var)
try:
params = op.get_params(node)
except MethodNotDefined:
params = NoParams
if (params is not NoParams):
params_var = symbol[self.node_params[params]]
name = f'node_<<<<HASH_PLACEHOLDER>>>>_{node_num}'
isyms = [symbol[r] for r in node.inputs]
osyms = [symbol[r] for r in node.outputs]
sub['id'] = id
sub['fail'] = failure_code(sub)
if (params is not NoParams):
sub['params'] = params_var
sub_struct = dict()
sub_struct['id'] = (id + 1)
sub_struct['fail'] = failure_code_init(sub)
if (params is not NoParams):
sub_struct['params'] = params_var
c_support_code_apply.append(op.c_support_code_apply(node, name))
c_init_code_apply.append(op.c_init_code_apply(node, name))
struct_init = op.c_init_code_struct(node, name, sub_struct)
struct_support = op.c_support_code_struct(node, name)
struct_cleanup = op.c_cleanup_code_struct(node, name)
behavior = op.c_code(node, name, isyms, osyms, sub)
assert isinstance(behavior, str), f"{node.op} didn't return a string for c_code"
behavior = ((('// Op class ' + node.op.__class__.__name__) + '\n') + behavior)
cleanup = op.c_code_cleanup(node, name, isyms, osyms, sub)
_logger.info(f'compiling un-versioned Apply {node}')
blocks.append(CodeBlock('', behavior, cleanup, sub))
tasks.append((node, 'code', id))
id += 1
init_blocks.append(CodeBlock(struct_support, struct_init, struct_cleanup, {'id': id}))
init_tasks.append((node, 'init', id))
id += 1
args = []
args += [f'storage_{symbol[variable]}' for variable in uniq(((self.inputs + self.outputs) + self.orphans))]
struct_name = f"__struct_compiled_op_{'<<<<HASH_PLACEHOLDER>>>>'}"
struct_code = struct_gen(args, init_blocks, blocks, dict(failure_var=failure_var, name=struct_name))
self.struct_code = struct_code
self.struct_name = struct_name
self.args = args
self.r2symbol = symbol
self.init_blocks = init_blocks
self.init_tasks = init_tasks
self.blocks = blocks
self.tasks = tasks
all_info = ((self.inputs + self.outputs) + self.orphans)
self.c_support_code_apply = c_support_code_apply
self.c_init_code_apply = c_init_code_apply
if ((self.init_tasks, self.tasks) != self.get_init_tasks()):
print('init_tasks\n', self.init_tasks, file=sys.stderr)
print(self.get_init_tasks()[0], file=sys.stderr)
print('tasks\n', self.tasks, file=sys.stderr)
print(self.get_init_tasks()[1], file=sys.stderr)
assert ((self.init_tasks, self.tasks) == self.get_init_tasks())
self.dupidx = [i for (i, x) in enumerate(all_info) if ((all_info.count(x) > 1) and (all_info.index(x) != i))]
return self.struct_code
def support_code(self):
ret = []
if config.cmodule__debug:
ret.append('\n #ifndef DEBUG\n #define DEBUG\n #endif\n ')
for x in ([y.type for y in self.variables] + [y.op for y in self.node_order]):
support_code = x.c_support_code()
if isinstance(support_code, list):
ret.extend(support_code)
else:
ret.append(support_code)
return ret
def compile_args(self):
ret = ['-O3']
ret += ['-fno-math-errno', '-Wno-unused-label', '-Wno-unused-variable', '-Wno-write-strings']
c_compiler = self.c_compiler()
for x in ([y.type for y in self.variables] + [y.op for y in self.node_order]):
if isinstance(x, CLinkerObject):
ret += x.c_compile_args(c_compiler=c_compiler)
ret = uniq(ret)
ret += c_compiler.compile_args()
for x in ([y.type for y in self.variables] + [y.op for y in self.node_order]):
if isinstance(x, CLinkerObject):
no_comp = x.c_no_compile_args(c_compiler=c_compiler)
for i in no_comp:
try:
ret.remove(i)
except ValueError:
pass
return ret
def headers(self):
ret = []
c_compiler = self.c_compiler()
for x in ([y.type for y in self.variables] + [y.op for y in self.node_order]):
if isinstance(x, CLinkerObject):
ret += x.c_headers(c_compiler=c_compiler)
return uniq(ret)
def init_code(self):
ret = []
for x in ([y.type for y in self.variables] + [y.op for y in self.node_order]):
if isinstance(x, CLinkerObject):
ret += x.c_init_code()
return uniq(ret)
def c_compiler(self):
c_compiler = None
for x in ([y.type for y in self.variables] + [y.op for y in self.node_order]):
if hasattr(x, 'c_compiler'):
x_compiler = x.c_compiler()
else:
continue
if (c_compiler is None):
c_compiler = x_compiler
elif (x_compiler and (x_compiler != c_compiler)):
raise Exception('Nodes have requested specific different compilers', (c_compiler, x_compiler))
if (c_compiler is None):
return GCC_compiler
else:
return c_compiler
def header_dirs(self):
ret = []
c_compiler = self.c_compiler()
for x in ([y.type for y in self.variables] + [y.op for y in self.node_order]):
if isinstance(x, CLinkerObject):
ret += x.c_header_dirs(c_compiler=c_compiler)
return [r for r in uniq(ret) if r]
def libraries(self):
ret = []
c_compiler = self.c_compiler()
for x in ([y.type for y in self.variables] + [y.op for y in self.node_order]):
if isinstance(x, CLinkerObject):
ret += x.c_libraries(c_compiler=c_compiler)
return uniq(ret)
def lib_dirs(self):
ret = []
c_compiler = self.c_compiler()
for x in ([y.type for y in self.variables] + [y.op for y in self.node_order]):
if isinstance(x, CLinkerObject):
ret += x.c_lib_dirs(c_compiler=c_compiler)
return [r for r in uniq(ret) if r]
def __compile__(self, input_storage=None, output_storage=None, storage_map=None, cache: Optional['ModuleCache']=None):
error_storage = [None, None, None]
if (input_storage is None):
input_storage = tuple(([None] for variable in self.inputs))
if (output_storage is None):
map = {}
output_storage = []
for (i, variable) in enumerate(self.inputs):
map[variable] = input_storage[i]
for variable in self.outputs:
if (variable not in map):
map[variable] = [None]
output_storage.append(map[variable])
input_storage = tuple(input_storage)
output_storage = tuple(output_storage)
(thunk, module) = self.cthunk_factory(error_storage, input_storage, output_storage, storage_map, cache)
return (thunk, module, [Container(input, storage) for (input, storage) in zip(self.fgraph.inputs, input_storage)], [Container(output, storage, readonly=True) for (output, storage) in zip(self.fgraph.outputs, output_storage)], error_storage)
def get_init_tasks(self):
init_tasks = []
tasks = []
id = 1
for v in self.variables:
if (v in self.consts):
continue
init_tasks.append((v, 'init', id))
tasks.append((v, 'get', (id + 1)))
id += 2
for node in self.node_order:
tasks.append((node, 'code', id))
init_tasks.append((node, 'init', (id + 1)))
id += 2
return (init_tasks, tasks)
def make_thunk(self, input_storage=None, output_storage=None, storage_map=None, cache: Optional['ModuleCache']=None, **kwargs):
(init_tasks, tasks) = self.get_init_tasks()
(cthunk, module, in_storage, out_storage, error_storage) = self.__compile__(input_storage, output_storage, storage_map, cache)
res = _CThunk(cthunk, init_tasks, tasks, error_storage, module)
res.nodes = self.node_order
return (res, in_storage, out_storage)
def cmodule_key(self):
return self.cmodule_key_(self.fgraph, self.no_recycling, compile_args=self.compile_args(), libraries=self.libraries(), header_dirs=self.header_dirs(), c_compiler=self.c_compiler())
def cmodule_key_variables(self, inputs, outputs, no_recycling, compile_args=None, libraries=None, header_dirs=None, insert_config_hash=True, c_compiler=None):
class FakeFunctionGraph():
def __init__(self, inputs, outputs):
self.inputs = inputs
self.outputs = outputs
self.clients = defaultdict(list)
def toposort(self):
return io_toposort(self.inputs, self.outputs)
fgraph = FakeFunctionGraph(inputs, outputs)
return self.cmodule_key_(fgraph, no_recycling, compile_args, libraries, header_dirs, insert_config_hash, c_compiler)
def cmodule_key_(self, fgraph, no_recycling, compile_args=None, libraries=None, header_dirs=None, insert_config_hash=True, c_compiler=None):
if (compile_args is None):
compile_args = []
if (libraries is None):
libraries = []
if (header_dirs is None):
header_dirs = []
order = self.schedule(fgraph)
fgraph_computed_set = set()
fgraph_inputs_dict = {i: ((- 1), pos) for (pos, i) in enumerate(fgraph.inputs)}
constant_ids = dict()
op_pos = {}
sig = ['CLinker.cmodule_key']
if (compile_args is not None):
args = sorted(compile_args)
args = tuple(args)
sig.append(args)
if (libraries is not None):
args = sorted(libraries)
args = tuple(args)
sig.append(args)
if (header_dirs is not None):
args = sorted(header_dirs)
args = tuple(args)
sig.append(args)
if (np.lib.NumpyVersion(np.__version__) < '1.16.0a'):
ndarray_c_version = np.core.multiarray._get_ndarray_c_version()
else:
ndarray_c_version = np.core._multiarray_umath._get_ndarray_c_version()
sig.append(f'NPY_ABI_VERSION=0x{ndarray_c_version:X}')
if c_compiler:
sig.append(('c_compiler_str=' + c_compiler.version_str()))
if insert_config_hash:
sig.append(('md5:' + config.get_config_hash()))
else:
sig.append('md5: <omitted>')
error_on_play = [False]
def in_sig(i, topological_pos, i_idx):
if isinstance(i, AtomicVariable):
if (id(i) not in constant_ids):
isig = (i.signature(), topological_pos, i_idx)
if hasattr(isig[0], 'pytensor_hash'):
isig = (isig[0].pytensor_hash(), topological_pos, i_idx)
try:
hash(isig)
except Exception:
error_on_play[0] = True
return None
constant_ids[id(i)] = isig
else:
isig = constant_ids[id(i)]
elif (i in fgraph_inputs_dict):
isig = fgraph_inputs_dict[i]
else:
if (i.owner is None):
assert all((all(((out is not None) for out in o.outputs)) for o in order))
assert all(((input.owner is None) for input in fgraph.inputs))
raise Exception(f'Owner of {i} (clients {fgraph.clients.get(i)}) is None')
if (i in fgraph.outputs):
isig = (op_pos[i.owner], i.owner.outputs.index(i), fgraph.outputs.index(i))
else:
isig = (op_pos[i.owner], i.owner.outputs.index(i))
return (isig, (i in no_recycling))
version = []
for (node_pos, node) in enumerate(order):
if hasattr(node.op, 'c_code_cache_version_apply'):
version.append(node.op.c_code_cache_version_apply(node))
props = getattr(node.op, '__props__', None)
if props:
version.append(props)
for i in node.inputs:
if isinstance(i.type, CLinkerObject):
version.append(i.type.c_code_cache_version())
for o in node.outputs:
if isinstance(o.type, CLinkerObject):
version.append(o.type.c_code_cache_version())
sig.append((node.op, tuple(((i.type, in_sig(i, node_pos, ipos)) for (ipos, i) in enumerate(node.inputs))), (1, tuple(((o in no_recycling) for o in node.outputs)))))
if error_on_play[0]:
return None
op_pos[node] = node_pos
fgraph_computed_set.update(node.outputs)
for (ipos, var) in [(i, var) for (i, var) in enumerate(fgraph.inputs) if (not len(fgraph.clients[var]))]:
sig.append((var.type, in_sig(var, (- 1), ipos)))
sig = tuple(sig)
version = tuple(version)
for v in version:
if (not v):
return ((), sig)
return (version, sig)
def get_src_code(self):
mod = self.get_dynamic_module()
return mod.code()
def compile_cmodule(self, location=None):
if (location is None):
location = dlimport_workdir(config.compiledir)
mod = self.get_dynamic_module()
c_compiler = self.c_compiler()
libs = self.libraries()
preargs = self.compile_args()
src_code = mod.code()
with lock_ctx():
try:
_logger.debug(f'LOCATION {location}')
module = c_compiler.compile_str(module_name=mod.code_hash, src_code=src_code, location=location, include_dirs=self.header_dirs(), lib_dirs=self.lib_dirs(), libs=libs, preargs=preargs)
except Exception as e:
e.args += (str(self.fgraph),)
raise
return module
def get_dynamic_module(self):
if (not hasattr(self, '_mod')):
self.code_gen()
mod = DynamicModule()
code = self.instantiate_code((1 + len(self.args)))
instantiate = ExtFunction('instantiate', code, method=METH_VARARGS)
static = '\n static int {struct_name}_executor({struct_name} *self) {{\n return self->run();\n }}\n\n static void {struct_name}_destructor(PyObject *capsule) {{\n {struct_name} *self = ({struct_name} *)PyCapsule_GetContext(capsule);\n delete self;\n }}\n '.format(struct_name=self.struct_name)
for support_code in (self.support_code() + self.c_support_code_apply):
mod.add_support_code(support_code)
mod.add_support_code(self.struct_code)
mod.add_support_code(static)
mod.add_function(instantiate)
for header in self.headers():
mod.add_include(header)
for init_code_block in (self.init_code() + self.c_init_code_apply):
mod.add_init_code(init_code_block)
self._mod = mod
return self._mod
def cthunk_factory(self, error_storage, in_storage, out_storage, storage_map=None, cache: Optional['ModuleCache']=None):
try:
key = self.cmodule_key()
except KeyError:
key = None
if (key is None):
module = self.compile_cmodule()
else:
for node in self.node_order:
node.op.prepare_node(node, storage_map, None, 'c')
if (cache is None):
cache = get_module_cache()
module = cache.module_from_key(key=key, lnk=self)
vars = ((self.inputs + self.outputs) + self.orphans)
dupidx = [i for (i, x) in enumerate(vars) if ((vars.count(x) > 1) and (vars.index(x) != i))]
out_storage = [x for (i, x) in enumerate(out_storage) if ((i + len(in_storage)) not in dupidx)]
in_storage = [x for (i, x) in enumerate(in_storage) if (i not in dupidx)]
if (storage_map is None):
orphd = [([orphan.data] if isinstance(orphan, Constant) else []) for orphan in self.orphans]
else:
orphd = [storage_map[orphan] for orphan in self.orphans]
ret = module.instantiate(error_storage, *((in_storage + out_storage) + orphd))
return (ret, module)
def instantiate_code(self, n_args):
code = StringIO()
struct_name = self.struct_name
print('static PyObject * instantiate(PyObject * self, PyObject *argtuple) {', file=code)
print(' assert(PyTuple_Check(argtuple));', file=code)
print((' if (%(n_args)i != PyTuple_Size(argtuple)){ ' % locals()), file=code)
print((' PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected %(n_args)i, got %%i", (int)PyTuple_Size(argtuple));' % locals()), file=code)
print(' return NULL;', file=code)
print(' }', file=code)
print(f' {struct_name}* struct_ptr = new {struct_name}();', file=code)
print(' if (struct_ptr->init(', ','.join((f'PyTuple_GET_ITEM(argtuple, {n})' for n in range(n_args))), ') != 0) {', file=code)
print(' delete struct_ptr;', file=code)
print(' return NULL;', file=code)
print(' }', file=code)
print(f''' PyObject* thunk = PyCapsule_New((void*)(&{struct_name}_executor), NULL, {struct_name}_destructor);
if (thunk != NULL && PyCapsule_SetContext(thunk, struct_ptr) != 0) {{
PyErr_Clear();
Py_DECREF(thunk);
thunk = NULL;
}}
''', file=code)
print(' return thunk; }', file=code)
return code.getvalue() |
class Effect6669(BaseEffect):
type = 'passive'
def handler(fit, src, context, projectionRange, **kwargs):
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'armorHP', src.getModifiedItemAttr('hullHpBonus'), **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'hp', src.getModifiedItemAttr('hullHpBonus'), **kwargs)
fit.drones.filteredItemBoost((lambda mod: mod.item.requiresSkill('Drones')), 'shieldCapacity', src.getModifiedItemAttr('hullHpBonus'), **kwargs)
fit.fighters.filteredItemBoost((lambda mod: mod.item.requiresSkill('Fighters')), 'shieldCapacity', src.getModifiedItemAttr('hullHpBonus'), **kwargs)
fit.ship.boostItemAttr('cpuOutput', src.getModifiedItemAttr('drawback'), **kwargs) |
class DeleteEdges(StateChanger):
def __init__(self, from_node: NodeEnumerator, relations, to_node: NodeEnumerator, delete_reverse=False):
self.from_node = from_node
self.relations = relations
self.to_node = to_node
self.delete_reverse = delete_reverse
def apply_changes(self, state: EnvironmentState, **kwargs):
tm = TimeMeasurement.start('DeleteEdges')
for n1 in self.from_node.enumerate(state):
for e in self.relations:
for n2 in self.to_node.enumerate(state):
state.delete_edge(n1, e, n2)
if self.delete_reverse:
state.delete_edge(n2, e, n1)
TimeMeasurement.stop(tm) |
def test__calc_stats(detect_clearsky_helper_data):
(x, samples_per_window, sample_interval, H) = detect_clearsky_helper_data
mean_x = pd.Series((np.array([np.nan, np.nan, 5, 14, 29, 50, 77]) / 3.0))
max_x = pd.Series(np.array([np.nan, np.nan, 4, 9, 16, 25, 36]))
diff_std = np.array([np.nan, np.nan, np.sqrt(2), np.sqrt(2), np.sqrt(2), np.sqrt(2), np.sqrt(2)])
slope_nstd = (diff_std / mean_x)
slope = x.diff().shift((- 1))
expected = {}
expected['mean'] = mean_x.shift((- 1))
expected['max'] = max_x.shift((- 1))
expected['slope'] = slope
expected['slope_nstd'] = slope_nstd.shift((- 1))
result = clearsky._calc_stats(x, samples_per_window, sample_interval, H)
(res_mean, res_max, res_slope_nstd, res_slope) = result
assert_series_equal(res_mean, expected['mean'])
assert_series_equal(res_max, expected['max'])
assert_series_equal(res_slope_nstd, expected['slope_nstd'])
assert_series_equal(res_slope, expected['slope']) |
class TestClientTransactions(KazooTestCase):
def setUp(self):
KazooTestCase.setUp(self)
skip = False
if (CI_ZK_VERSION and (CI_ZK_VERSION < (3, 4))):
skip = True
elif (CI_ZK_VERSION and (CI_ZK_VERSION >= (3, 4))):
skip = False
else:
ver = self.client.server_version()
if (ver[1] < 4):
skip = True
if skip:
pytest.skip('Must use Zookeeper 3.4 or above')
def test_basic_create(self):
t = self.client.transaction()
t.create('/freddy')
t.create('/fred', ephemeral=True)
t.create('/smith', sequence=True)
results = t.commit()
assert (len(results) == 3)
assert (results[0] == '/freddy')
assert (results[2].startswith('/smith0') is True)
def test_bad_creates(self):
args_list = [(True,), ('/smith', 0), ('/smith', b'', 'bleh'), ('/smith', b'', None, 'fred'), ('/smith', b'', None, True, 'fred')]
for args in args_list:
with pytest.raises(TypeError):
t = self.client.transaction()
t.create(*args)
def test_default_acl(self):
from kazoo.security import make_digest_acl
username = uuid.uuid4().hex
password = uuid.uuid4().hex
digest_auth = ('%s:%s' % (username, password))
acl = make_digest_acl(username, password, all=True)
self.client.add_auth('digest', digest_auth)
self.client.default_acl = (acl,)
t = self.client.transaction()
t.create('/freddy')
results = t.commit()
assert (results[0] == '/freddy')
def test_basic_delete(self):
self.client.create('/fred')
t = self.client.transaction()
t.delete('/fred')
results = t.commit()
assert (results[0] is True)
def test_bad_deletes(self):
args_list = [(True,), ('/smith', 'woops')]
for args in args_list:
with pytest.raises(TypeError):
t = self.client.transaction()
t.delete(*args)
def test_set(self):
self.client.create('/fred', b'01')
t = self.client.transaction()
t.set_data('/fred', b'oops')
t.commit()
res = self.client.get('/fred')
assert (res[0] == b'oops')
def test_bad_sets(self):
args_list = [(42, 52), ('/smith', False), ('/smith', b'', 'oops')]
for args in args_list:
with pytest.raises(TypeError):
t = self.client.transaction()
t.set_data(*args)
def test_check(self):
self.client.create('/fred')
version = self.client.get('/fred')[1].version
t = self.client.transaction()
t.check('/fred', version)
t.create('/blah')
results = t.commit()
assert (results[0] is True)
assert (results[1] == '/blah')
def test_bad_checks(self):
args_list = [(42, 52), ('/smith', 'oops')]
for args in args_list:
with pytest.raises(TypeError):
t = self.client.transaction()
t.check(*args)
def test_bad_transaction(self):
from kazoo.exceptions import RolledBackError, NoNodeError
t = self.client.transaction()
t.create('/fred')
t.delete('/smith')
results = t.commit()
assert (results[0].__class__ == RolledBackError)
assert (results[1].__class__ == NoNodeError)
def test_bad_commit(self):
t = self.client.transaction()
t.committed = True
with pytest.raises(ValueError):
t.commit()
def test_bad_context(self):
with pytest.raises(TypeError):
with self.client.transaction() as t:
t.check(4232)
def test_context(self):
with self.client.transaction() as t:
t.create('/smith', b'32')
assert (self.client.get('/smith')[0] == b'32') |
def list_deltas(namespace: str, table_name: str, partition_values: Optional[List[Any]]=None, table_version: Optional[str]=None, first_stream_position: Optional[int]=None, last_stream_position: Optional[int]=None, ascending_order: Optional[bool]=None, include_manifest: bool=False, *args, **kwargs) -> ListResult[Delta]:
stream = get_stream(namespace, table_name, table_version, *args, **kwargs)
if (stream is None):
return ListResult.of([], None, None)
partition = get_partition(stream.locator, partition_values, *args, **kwargs)
all_deltas = list_partition_deltas(partition, *args, first_stream_position=first_stream_position, last_stream_position=last_stream_position, ascending_order=ascending_order, include_manifest=include_manifest, **kwargs).all_items()
result = []
for delta in all_deltas:
if (((not first_stream_position) or (first_stream_position < delta.stream_position)) and ((not last_stream_position) or (delta.stream_position <= last_stream_position))):
result.append(delta)
if (not include_manifest):
delta.manifest = None
result.sort(reverse=(not ascending_order), key=(lambda d: d.stream_position))
return ListResult.of(result, None, None) |
_oriented
class DenseTSDF(BaseMap):
def __init__(self, map_scale=[10, 10], voxel_scale=0.05, texture_enabled=False, max_disp_particles=(1024 * 1024), num_voxel_per_blk_axis=16, max_ray_length=10, min_ray_length=0.3, internal_voxels=10, max_submap_num=1024, is_global_map=False, disp_ceiling=1.8, disp_floor=(- 0.3), recast_step=2, color_same_proj=True):
super(DenseTSDF, self).__init__(voxel_scale)
self.map_size_xy = map_scale[0]
self.map_size_z = map_scale[1]
self.num_voxel_per_blk_axis = num_voxel_per_blk_axis
self.voxel_scale = voxel_scale
self.N = (math.ceil(((map_scale[0] / voxel_scale) / num_voxel_per_blk_axis)) * num_voxel_per_blk_axis)
self.Nz = (math.ceil(((map_scale[1] / voxel_scale) / num_voxel_per_blk_axis)) * num_voxel_per_blk_axis)
self.block_num_xy = math.ceil(((map_scale[0] / voxel_scale) / num_voxel_per_blk_axis))
self.block_num_z = math.ceil(((map_scale[1] / voxel_scale) / num_voxel_per_blk_axis))
self.map_size_xy = (voxel_scale * self.N)
self.map_size_z = (voxel_scale * self.Nz)
self.max_disp_particles = max_disp_particles
self.enable_texture = texture_enabled
self.max_ray_length = max_ray_length
self.min_ray_length = min_ray_length
self.tsdf_surface_thres = (self.voxel_scale * 1.8)
self.internal_voxels = internal_voxels
self.max_submap_num = max_submap_num
self.is_global_map = is_global_map
self.disp_ceiling = disp_ceiling
self.disp_floor = disp_floor
self.recast_step = recast_step
self.color_same_proj = color_same_proj
self.initialize_fields()
print(f'TSDF map initialized blocks {self.block_num_xy}x{self.block_num_xy}x{self.block_num_z}')
def initialize_fields(self):
self.num_export_particles = ti.field(dtype=ti.i32, shape=())
self.num_TSDF_particles = ti.field(dtype=ti.i32, shape=())
self.num_export_ESDF_particles = ti.field(dtype=ti.i32, shape=())
self.export_x = ti.Vector.field(3, dtype=ti.f32, shape=self.max_disp_particles)
self.export_color = ti.Vector.field(3, dtype=ti.f32, shape=self.max_disp_particles)
self.export_TSDF = ti.field(dtype=ti.f32, shape=self.max_disp_particles)
self.export_TSDF_xyz = ti.Vector.field(3, dtype=ti.f32, shape=self.max_disp_particles)
self.NC_ = ti.Vector([(self.N // 2), (self.N // 2), (self.Nz // 2)], ti.i32)
self.new_pcl_count = ti.field(dtype=ti.i32)
self.new_pcl_sum_pos = ti.Vector.field(3, dtype=ti.f16)
self.new_pcl_z = ti.field(dtype=ti.f16)
grp_block_num = max(int((((3.2 * self.max_ray_length) / self.num_voxel_per_blk_axis) / self.voxel_scale)), 1)
(self.PCL, self.PCLroot) = self.data_structures_grouped(grp_block_num, grp_block_num, self.num_voxel_per_blk_axis)
offset = [(((- self.num_voxel_per_blk_axis) * grp_block_num) // 2), (((- self.num_voxel_per_blk_axis) * grp_block_num) // 2), (((- self.num_voxel_per_blk_axis) * grp_block_num) // 2)]
self.PCL.place(self.new_pcl_count, self.new_pcl_sum_pos, self.new_pcl_z, offset=offset)
self.slice_z = ti.field(dtype=ti.f16, shape=())
self.initialize_sdf_fields()
if self.enable_texture:
self.new_pcl_sum_color = ti.Vector.field(3, dtype=ti.f16)
self.PCL.place(self.new_pcl_sum_color, offset=offset)
self.init_fields()
self.initialize_submap_fields(self.max_submap_num)
def initialize_sdf_fields(self):
block_num_xy = self.block_num_xy
block_num_z = self.block_num_z
num_voxel_per_blk_axis = self.num_voxel_per_blk_axis
submap_num = self.max_submap_num
if self.is_global_map:
submap_num = 1
offset = [0, ((- self.N) // 2), ((- self.N) // 2), ((- self.Nz) // 2)]
self.TSDF = ti.field(dtype=ti.f16)
self.W_TSDF = ti.field(dtype=ti.f16)
self.TSDF_observed = ti.field(dtype=ti.i8)
self.occupy = ti.field(dtype=ti.i8)
if self.enable_texture:
self.color = ti.Vector.field(3, dtype=ti.f16)
else:
self.color = None
(self.B, self.Broot) = self.data_structures(submap_num, block_num_xy, block_num_z, num_voxel_per_blk_axis)
self.B.place(self.W_TSDF, self.TSDF, self.TSDF_observed, self.occupy, offset=offset)
if self.enable_texture:
self.B.place(self.color, offset=offset)
self.mem_per_voxel = (((2 + 2) + 1) + 1)
if self.enable_texture:
self.mem_per_voxel += 6
def data_structures(self, submap_num, block_num_xy, block_num_z, num_voxel_per_blk_axis):
if (num_voxel_per_blk_axis < 1):
print('num_voxel_per_blk_axis must be greater than 1')
exit(0)
if self.is_global_map:
Broot = ti.root.pointer(ti.ijkl, (1, block_num_xy, block_num_xy, block_num_z))
B = Broot.dense(ti.ijkl, (1, num_voxel_per_blk_axis, num_voxel_per_blk_axis, num_voxel_per_blk_axis))
else:
Broot = ti.root.pointer(ti.i, submap_num).pointer(ti.ijkl, (1, block_num_xy, block_num_xy, block_num_z))
B = Broot.dense(ti.ijkl, (1, num_voxel_per_blk_axis, num_voxel_per_blk_axis, num_voxel_per_blk_axis))
return (B, Broot)
def data_structures_grouped(self, block_num_xy, block_num_z, num_voxel_per_blk_axis):
if (num_voxel_per_blk_axis > 1):
Broot = ti.root.pointer(ti.ijk, (block_num_xy, block_num_xy, block_num_z))
B = Broot.dense(ti.ijk, (num_voxel_per_blk_axis, num_voxel_per_blk_axis, num_voxel_per_blk_axis))
else:
B = ti.root.dense(ti.ijk, (block_num_xy, block_num_xy, block_num_z))
Broot = B
return (B, Broot)
def init_fields(self):
for i in range(self.max_disp_particles):
self.export_color[i] = ti.Vector([0.5, 0.5, 0.5], ti.f32)
self.export_x[i] = ti.Vector([(- 100000), (- 100000), (- 100000)], ti.f32)
self.export_TSDF_xyz[i] = ti.Vector([(- 100000), (- 100000), (- 100000)], ti.f32)
def init_sphere(self):
voxels = 30
radius = (self.voxel_scale * 3)
for i in range(((self.N / 2) - (voxels / 2)), ((self.N / 2) + (voxels / 2))):
for j in range(((self.N / 2) - (voxels / 2)), ((self.N / 2) + (voxels / 2))):
for k in range(((self.Nz / 2) - (voxels / 2)), ((self.Nz / 2) + (voxels / 2))):
p = self.ijk_to_xyz([i, j, k])
self.TSDF[(i, j, k)] = (p.norm() - radius)
self.TSDF_observed[(i, j, k)] = 1
self.color[(i, j, k)] = self.colormap[int(((((p[2] - 0.5) / radius) * 0.5) * 1024))]
def is_unobserved(self, sijk):
return (self.TSDF_observed[sijk] == 0)
def is_occupy(self, sijk):
occ2 = (self.TSDF[sijk] < self.tsdf_surface_thres)
return occ2
def recast_pcl_to_map(self, R, T, xyz_array, rgb_array):
self.PCLroot.deactivate_all()
self.set_pose(R, T)
self.recast_pcl_to_map_kernel(xyz_array, rgb_array)
def recast_depth_to_map(self, R, T, depthmap, texture):
self.PCLroot.deactivate_all()
self.set_pose(R, T)
self.recast_depth_to_map_kernel(depthmap, texture)
def recast_pcl_to_map_kernel(self, xyz_array: ti.types.ndarray(), rgb_array: ti.types.ndarray()):
n = xyz_array.shape[0]
for index in range(n):
pt = ti.Vector([xyz_array[(index, 0)], xyz_array[(index, 1)], xyz_array[(index, 2)]], ti.f32)
pt = (self.input_R[None] pt)
pt_length = pt.norm()
if (pt_length < ti.static(self.max_ray_length)):
if ti.static(self.enable_texture):
rgb = ti.Vector([rgb_array[(index, 0)], rgb_array[(index, 1)], rgb_array[(index, 2)]], ti.f16)
self.process_point(pt, pt.norm(), rgb)
else:
self.process_point(pt, pt.norm())
self.process_new_pcl()
def recast_depth_to_map_kernel(self, depthmap: ti.types.ndarray(), texture: ti.types.ndarray()):
h = depthmap.shape[0]
w = depthmap.shape[1]
for jj in range(0, (h / ti.static(self.recast_step))):
j = (jj * ti.static(self.recast_step))
for ii in range(0, (w / ti.static(self.recast_step))):
i = (ii * ti.static(self.recast_step))
if (depthmap[(j, i)] == 0):
continue
if ((depthmap[(j, i)] > ti.static((self.max_ray_length * 1000))) or (depthmap[(j, i)] < ti.static((self.min_ray_length * 1000)))):
continue
dep = (ti.cast(depthmap[(j, i)], ti.f32) / 1000.0)
pt = self.unproject_point_dep(i, j, dep)
pt_map = (self.input_R[None] pt)
if ti.static(self.enable_texture):
if ti.static(self.color_same_proj):
color = [texture[(j, i, 0)], texture[(j, i, 1)], texture[(j, i, 2)]]
self.process_point(pt_map, dep, color)
else:
(color_j, color_i) = self.color_ind_from_depth_pt(i, j, texture.shape[1], texture.shape[0])
color = [texture[(color_j, color_i, 0)], texture[(color_j, color_i, 1)], texture[(color_j, color_i, 2)]]
self.process_point(pt_map, dep, color)
else:
self.process_point(pt_map, dep)
self.process_new_pcl()
def w_x_p(self, d, z):
epi = ti.static(self.voxel_scale)
theta = ti.static((self.voxel_scale * 4))
ret = 0.0
if (d > ti.static((- epi))):
ret = (1.0 / (z * z))
elif (d > ti.static((- theta))):
ret = ((d + theta) / ((z * z) * (theta - epi)))
return ret
def process_point(self, pt, z, rgb=None):
pti = self.xyz_to_ijk(pt)
self.new_pcl_count[pti] += 1
self.new_pcl_sum_pos[pti] += pt
self.new_pcl_z[pti] += z
if ti.static(self.enable_texture):
self.new_pcl_sum_color[pti] += rgb
def process_new_pcl(self):
submap_id = self.active_submap_id[None]
for (i, j, k) in self.new_pcl_count:
if (self.new_pcl_count[(i, j, k)] == 0):
continue
c = ti.cast(self.new_pcl_count[(i, j, k)], ti.f16)
pos_s2p = (self.new_pcl_sum_pos[(i, j, k)] / c)
len_pos_s2p = pos_s2p.norm()
d_s2p = (pos_s2p / len_pos_s2p)
pos_p = (pos_s2p + self.input_T[None])
z = (self.new_pcl_z[(i, j, k)] / c)
self.occupy[self.sxyz_to_ijk(submap_id, pos_p)] = 1
ray_cast_voxels = ti.min(((len_pos_s2p / self.voxel_scale) + ti.static(self.internal_voxels)), (self.max_ray_length / self.voxel_scale))
j_f = 0.0
for _j in range(ray_cast_voxels):
j_f += 1.0
x_ = (((d_s2p * j_f) * self.voxel_scale) + self.input_T[None])
xi = self.sxyz_to_ijk(submap_id, x_)
v2p = (pos_p - x_)
d_x_p = v2p.norm()
d_x_p_s = (d_x_p * sign(v2p.dot(pos_s2p)))
w_x_p = self.w_x_p(d_x_p, z)
self.TSDF[xi] = (((self.TSDF[xi] * self.W_TSDF[xi]) + (w_x_p * d_x_p_s)) / (self.W_TSDF[xi] + w_x_p))
self.TSDF_observed[xi] = 1
self.W_TSDF[xi] = ti.min((self.W_TSDF[xi] + w_x_p), Wmax)
if ti.static(self.enable_texture):
self.color[xi] = ((self.new_pcl_sum_color[(i, j, k)] / c) / 255.0)
self.new_pcl_count[(i, j, k)] = 0
def fuse_with_interploation(self, ijk, tsdf, w_tsdf, occ, color):
w_new = (w_tsdf + self.W_TSDF[ijk])
self.TSDF[ijk] = (((self.W_TSDF[ijk] * self.TSDF[ijk]) + (w_tsdf * tsdf)) / w_new)
if ti.static(self.enable_texture):
self.color[ijk] = (((self.W_TSDF[ijk] * self.color[ijk]) + (w_tsdf * color)) / w_new)
self.W_TSDF[ijk] = w_new
self.TSDF_observed[ijk] = 1
self.occupy[ijk] = (self.occupy[ijk] + occ)
def fuse_submaps_kernel(self, num_submaps: ti.i32, TSDF: ti.template(), W_TSDF: ti.template(), TSDF_observed: ti.template(), OCC: ti.template(), COLOR: ti.template(), submaps_base_R_np: ti.types.ndarray(), submaps_base_T_np: ti.types.ndarray()):
for s in range(num_submaps):
for i in range(3):
self.submaps_base_T[s][i] = submaps_base_T_np[(s, i)]
for j in range(3):
self.submaps_base_R[s][(i, j)] = submaps_base_R_np[(s, i, j)]
for (s, i, j, k) in TSDF:
if (TSDF_observed[(s, i, j, k)] > 0):
xyz = self.submap_i_j_k_to_xyz(s, i, j, k)
ijk = (xyz / self.voxel_scale_)
ijk_ = ti.Vector([0, ijk[0], ijk[1], ijk[2]], ti.f32)
ijk_low = ti.floor(ijk_, ti.i32)
for di in ti.static(range(2)):
for dj in ti.static(range(2)):
for dk in ti.static(range(2)):
if (((di + dj) + dk) != 0):
coord = (ijk_low + ti.Vector([0, di, dj, dk]))
coord_f32 = ti.cast(coord, ti.f32)
weight = (((1 - ti.abs((coord_f32[1] - ijk[0]))) * (1 - ti.abs((coord_f32[2] - ijk[1])))) * (1 - ti.abs((coord_f32[3] - ijk[2]))))
if ti.static(self.enable_texture):
self.fuse_with_interploation(coord, TSDF[(s, i, j, k)], (W_TSDF[(s, i, j, k)] * weight), OCC[(s, i, j, k)], COLOR[(s, i, j, k)])
else:
self.fuse_with_interploation(coord, TSDF[(s, i, j, k)], (W_TSDF[(s, i, j, k)] * weight), OCC[(s, i, j, k)], 0)
def reset(self):
self.B.parent().deactivate_all()
def fuse_submaps(self, submaps):
self.reset()
t = time.time()
self.fuse_submaps_kernel(submaps.active_submap_id[None], submaps.TSDF, submaps.W_TSDF, submaps.TSDF_observed, submaps.occupy, submaps.color, self.submaps_base_R_np, self.submaps_base_T_np)
print(f'[DenseTSDF] Fuse submaps {((time.time() - t) * 1000):.1f}ms, active local: {submaps.active_submap_id[None]} remote: {submaps.remote_submap_num[None]}')
def cvt_occupy_to_voxels(self):
self.cvt_TSDF_surface_to_voxels()
def cvt_TSDF_surface_to_voxels(self):
self.cvt_TSDF_surface_to_voxels_kernel(self.num_TSDF_particles, self.export_TSDF_xyz, self.export_color, self.max_disp_particles, False)
def cvt_TSDF_surface_to_voxels_to(self, num_TSDF_particles, max_disp_particles, export_TSDF_xyz, export_color):
self.cvt_TSDF_surface_to_voxels_kernel(num_TSDF_particles, export_TSDF_xyz, export_color, max_disp_particles, True)
def clear_last_output(num, export_TSDF_xyz, export_color):
for i in range(num[None]):
export_color[i] = ti.Vector([0.5, 0.5, 0.5], ti.f32)
export_TSDF_xyz[i] = ti.Vector([(- 100000), (- 100000), (- 100000)], ti.f32)
num[None] = 0
def cvt_TSDF_surface_to_voxels_kernel(self, num_TSDF_particles: ti.template(), export_TSDF_xyz: ti.template(), export_color: ti.template(), max_disp_particles: ti.template(), add_to_cur: ti.template()):
if (not add_to_cur):
num_TSDF_particles[None] = 0
(disp_floor, disp_ceiling) = ti.static(self.disp_floor, self.disp_ceiling)
for (s, i, j, k) in self.TSDF:
if (s == self.active_submap_id[None]):
if (self.TSDF_observed[(s, i, j, k)] == 1):
if (ti.abs(self.TSDF[(s, i, j, k)]) < self.tsdf_surface_thres):
xyz = ti.Vector([0.0, 0.0, 0.0], ti.f32)
if ti.static(self.is_global_map):
xyz = self.i_j_k_to_xyz(i, j, k)
else:
xyz = self.submap_i_j_k_to_xyz(s, i, j, k)
if ((xyz[2] > disp_ceiling) or (xyz[2] < disp_floor)):
continue
index = ti.atomic_add(num_TSDF_particles[None], 1)
if (num_TSDF_particles[None] < max_disp_particles):
if ti.static(self.enable_texture):
export_color[index] = self.color[(s, i, j, k)]
export_TSDF_xyz[index] = xyz
else:
export_color[index] = self.color_from_colomap(xyz[2], disp_floor, disp_ceiling)
export_TSDF_xyz[index] = xyz
def cvt_TSDF_to_voxels_slice_kernel(self, dz: ti.template(), clear_last: ti.template()):
z = self.slice_z[None]
_index = int((z / self.voxel_scale))
if clear_last:
self.num_TSDF_particles[None] = 0
for (s, i, j, k) in self.TSDF:
if (s == self.active_submap_id[None]):
if (self.TSDF_observed[(s, i, j, k)] > 0):
if ((_index - dz) < k < (_index + dz)):
index = ti.atomic_add(self.num_TSDF_particles[None], 1)
if (self.num_TSDF_particles[None] < self.max_disp_particles):
self.export_TSDF[index] = self.TSDF[(s, i, j, k)]
if ti.static(self.is_global_map):
self.export_TSDF_xyz[index] = self.i_j_k_to_xyz(i, j, k)
else:
self.export_TSDF_xyz[index] = self.submap_i_j_k_to_xyz(s, i, j, k)
self.export_color[index] = self.color_from_colomap(self.TSDF[(s, i, j, k)], (- 0.5), 0.5)
def cvt_TSDF_to_voxels_slice(self, z, dz=0.5, clear_last=True):
self.slice_z[None] = z
self.cvt_TSDF_to_voxels_slice_kernel(dz, clear_last)
def get_voxels_occupy(self):
self.get_occupy_to_voxels()
return (self.export_x.to_numpy(), self.export_color.to_numpy())
def get_voxels_TSDF_surface(self):
self.cvt_TSDF_surface_to_voxels()
if self.enable_texture:
return (self.export_TSDF_xyz.to_numpy(), self.export_TSDF.to_numpy(), self.export_color.to_numpy())
else:
return (self.export_TSDF_xyz.to_numpy(), self.export_TSDF.to_numpy(), None)
def get_voxels_TSDF_slice(self, z):
self.cvt_TSDF_to_voxels_slice(z)
return (self.export_ESDF_xyz.to_numpy(), self.export_TSDF.to_numpy())
def finalization_current_submap(self):
pass
def count_active_func(self):
count = 0
for (s, i, j, k) in self.TSDF:
if (s == self.active_submap_id[None]):
if (self.TSDF_observed[(s, i, j, k)] > 0):
ti.atomic_add(count, 1)
return count
def count_active(self) -> ti.i32:
return self.count_active_func()
def to_numpy(self, data_indices: ti.types.ndarray(element_dim=1), data_tsdf: ti.types.ndarray(), data_wtsdf: ti.types.ndarray(), data_occ: ti.types.ndarray(), data_color: ti.types.ndarray()):
count = 0
for (s, i, j, k) in self.TSDF:
if (s == self.active_submap_id[None]):
if (self.TSDF_observed[(s, i, j, k)] > 0):
_count = ti.atomic_add(count, 1)
data_indices[_count] = [i, j, k]
data_tsdf[_count] = self.TSDF[(s, i, j, k)]
data_wtsdf[_count] = self.W_TSDF[(s, i, j, k)]
data_occ[_count] = self.occupy[(s, i, j, k)]
if ti.static(self.enable_texture):
data_color[(_count, 0)] = self.color[(s, i, j, k)][0]
data_color[(_count, 1)] = self.color[(s, i, j, k)][1]
data_color[(_count, 2)] = self.color[(s, i, j, k)][2]
def load_numpy(self, submap_id: ti.i32, data_indices: ti.types.ndarray(element_dim=1), data_tsdf: ti.types.ndarray(), data_wtsdf: ti.types.ndarray(), data_occ: ti.types.ndarray(), data_color: ti.types.ndarray()):
for i in range(data_tsdf.shape[0]):
ind = data_indices[i]
sijk = (submap_id, ind[0], ind[1], ind[2])
self.TSDF[sijk] = data_tsdf[i]
self.W_TSDF[sijk] = data_wtsdf[i]
self.occupy[sijk] = data_occ[i]
if ti.static(self.enable_texture):
self.color[sijk][0] = data_color[(i, 0)]
self.color[sijk][1] = data_color[(i, 1)]
self.color[sijk][2] = data_color[(i, 2)]
self.TSDF_observed[sijk] = 1
def export_submap(self):
s = time.time()
num = self.count_active()
indices = np.zeros((num, 3), np.int16)
tsdf = np.zeros(num, np.float16)
w_tsdf = np.zeros(num, np.float16)
occupy = np.zeros(num, np.int8)
if self.enable_texture:
color = np.zeros((num, 3), np.float16)
else:
color = np.array([])
self.to_numpy(indices, tsdf, w_tsdf, occupy, color)
obj = {'indices': indices, 'TSDF': tsdf, 'W_TSDF': w_tsdf, 'color': color, 'occupy': occupy, 'map_scale': [self.map_size_xy, self.map_size_z], 'voxel_scale': self.voxel_scale, 'texture_enabled': self.enable_texture, 'num_voxel_per_blk_axis': self.num_voxel_per_blk_axis}
print(f'Export submap {self.active_submap_id[None]} to numpy, voxels {(num / 1024):.1f}k, time: {(1000 * (time.time() - s)):.1f}ms')
return obj
def saveMap(self, filename):
obj = self.export_submap()
np.save(filename, obj)
def loadMap(filename):
obj = np.load(filename, allow_pickle=True).item()
TSDF = obj['TSDF']
W_TSDF = obj['W_TSDF']
color = obj['color']
indices = obj['indices']
occupy = obj['occupy']
mapping = DenseTSDF(map_scale=obj['map_scale'], voxel_scale=obj['voxel_scale'], texture_enabled=obj['texture_enabled'], num_voxel_per_blk_axis=obj['num_voxel_per_blk_axis'], is_global_map=True)
mapping.load_numpy(0, indices, TSDF, W_TSDF, occupy, color)
print(f'[SubmapMapping] Loaded {TSDF.shape[0]} voxels from {filename}')
return mapping
def input_remote_submap(self, submap):
self.remote_submap_num[None] = (self.remote_submap_num[None] + 1)
idx = (self.max_submap_num - self.remote_submap_num[None])
tsdf = submap['TSDF']
w_tsdf = submap['W_TSDF']
indices = submap['indices']
occupy = submap['occupy']
(R, T) = submap['pose']
if self.enable_texture:
color = submap['color']
else:
color = np.array([])
self.load_numpy(idx, indices, tsdf, w_tsdf, occupy, color)
self.set_base_pose_submap(idx, R, T)
return idx |
class CheckDummiesTester(unittest.TestCase):
def test_clean_code(self):
self.assertEqual(clean_code('"""\nDocstring\n"""\ncode\n"""Long string"""\ncode\n'), 'code\ncode')
self.assertEqual(clean_code("'''\nDocstring\n'''\ncode\n'''Long string'''\ncode\n'''"), 'code\ncode')
self.assertEqual(clean_code('code\n# Comment\ncode'), 'code\ncode')
self.assertEqual(clean_code('code # inline comment\ncode'), 'code \ncode')
def test_checkout_commit(self):
repo = Repo(git_repo_path)
self.assertNotEqual(repo.head.commit.hexsha, GIT_TEST_SHA)
with checkout_commit(repo, GIT_TEST_SHA):
self.assertEqual(repo.head.commit.hexsha, GIT_TEST_SHA)
self.assertNotEqual(repo.head.commit.hexsha, GIT_TEST_SHA)
def test_get_module_dependencies(self):
bert_module = os.path.join(transformers_path, 'models', 'bert', 'modeling_bert.py')
expected_deps = ['activations.py', 'modeling_outputs.py', 'modeling_utils.py', 'pytorch_utils.py', 'models/bert/configuration_bert.py']
expected_deps = {os.path.join(transformers_path, f) for f in expected_deps}
repo = Repo(git_repo_path)
with checkout_commit(repo, GIT_TEST_SHA):
deps = get_module_dependencies(bert_module)
deps = {os.path.expanduser(f) for f in deps}
self.assertEqual(deps, expected_deps) |
.parametrize('found_file', ['build/pdf.js', 'build/pdf.mjs'])
def test_get_pdfjs_js_path(found_file: str, monkeypatch: pytest.MonkeyPatch):
def fake_pdfjs_res(requested):
if requested.endswith(found_file):
return
raise pdfjs.PDFJSNotFound(requested)
monkeypatch.setattr(pdfjs, 'get_pdfjs_res', fake_pdfjs_res)
assert (pdfjs.get_pdfjs_js_path() == found_file) |
class UniformSuperpostionIJFirstQuantization(Bloq):
eta: int
num_bits_rot_aa: int
adjoint: int = False
_property
def signature(self) -> Signature:
n_eta = (self.eta - 1).bit_length()
return Signature.build(i=n_eta, j=n_eta)
def build_call_graph(self, ssa: 'SympySymbolAllocator') -> Set['BloqCountT']:
n_eta = (self.eta - 1).bit_length()
return {(Toffoli(), (((7 * n_eta) + (4 * self.num_bits_rot_aa)) - 18))} |
class VGG(nn.Module):
def __init__(self, features, num_classes=10, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(nn.Linear(((512 * 7) * 7), 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(True), nn.Dropout(), nn.Linear(4096, num_classes))
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0) |
class DummySparseLabelOp(SparseLabelOp):
def register_length(self) -> (int | None):
return None
def _new_instance(self, data: Mapping[(str, complex)], *, other: (SparseLabelOp | None)=None) -> SparseLabelOp:
return self.__class__(data, copy=False)
def _validate_keys(self, keys: Collection[str]) -> None:
pass
def _validate_polynomial_tensor_key(cls, keys: Collection[str]) -> None:
pass
def from_polynomial_tensor(cls, tensor: PolynomialTensor) -> SparseLabelOp:
pass
def terms(self) -> Iterator[tuple[(list[tuple[(str, int)]], complex)]]:
pass
def from_terms(cls, terms: Sequence[tuple[(list[tuple[(str, int)]], _TCoeff)]]) -> SparseLabelOp:
pass
def _permute_term(self, term: list[tuple[(str, int)]], permutation: Sequence[int]) -> list[tuple[(str, int)]]:
pass
def transpose(self) -> SparseLabelOp:
return self
def compose(self, other, qargs=None, front=False) -> SparseLabelOp:
return self
def tensor(self, other) -> SparseLabelOp:
return self
def expand(self, other) -> SparseLabelOp:
return self
def simplify(self, atol: (float | None)=None) -> SparseLabelOp:
return self |
def _conv2d_wrapper(x, w, stride=1, padding=0, groups=1, transpose=False, flip_weight=True):
(_out_channels, _in_channels_per_group, kh, kw) = _get_weight_shape(w)
if ((not flip_weight) and ((kw > 1) or (kh > 1))):
w = w.flip([2, 3])
op = (conv2d_gradfix.conv_transpose2d if transpose else conv2d_gradfix.conv2d)
return op(x, w, stride=stride, padding=padding, groups=groups) |
class TestExtensions():
def test_no_extensions(self, backend):
cert = _load_cert(os.path.join('x509', 'verisign_md2_root.pem'), x509.load_pem_x509_certificate)
ext = cert.extensions
assert (len(ext) == 0)
assert (list(ext) == [])
with pytest.raises(x509.ExtensionNotFound) as exc:
ext.get_extension_for_oid(ExtensionOID.BASIC_CONSTRAINTS)
assert (exc.value.oid == ExtensionOID.BASIC_CONSTRAINTS)
def test_one_extension(self, backend):
cert = _load_cert(os.path.join('x509', 'custom', 'basic_constraints_not_critical.pem'), x509.load_pem_x509_certificate)
ext = cert.extensions.get_extension_for_class(x509.BasicConstraints)
assert (ext is not None)
assert (ext.value.ca is False)
def test_duplicate_extension(self, backend):
cert = _load_cert(os.path.join('x509', 'custom', 'two_basic_constraints.pem'), x509.load_pem_x509_certificate)
with pytest.raises(x509.DuplicateExtension) as exc:
cert.extensions
assert (exc.value.oid == ExtensionOID.BASIC_CONSTRAINTS)
def test_unsupported_critical_extension(self, backend):
cert = _load_cert(os.path.join('x509', 'custom', 'unsupported_extension_critical.pem'), x509.load_pem_x509_certificate)
ext = cert.extensions.get_extension_for_oid(x509.ObjectIdentifier('1.2.3.4'))
assert isinstance(ext.value, x509.UnrecognizedExtension)
assert (ext.value.value == b'value')
def test_unsupported_extension(self, backend):
cert = _load_cert(os.path.join('x509', 'custom', 'unsupported_extension_2.pem'), x509.load_pem_x509_certificate)
extensions = cert.extensions
assert (len(extensions) == 2)
assert (extensions[0].critical is False)
assert (extensions[0].oid == x509.ObjectIdentifier('1.3.6.1.4.1.41482.2'))
assert (extensions[0].value == x509.UnrecognizedExtension(x509.ObjectIdentifier('1.3.6.1.4.1.41482.2'), b'1.3.6.1.4.1.41482.1.2'))
assert (extensions[1].critical is False)
assert (extensions[1].oid == x509.ObjectIdentifier('1.3.6.1.4.1.45724.2.1.1'))
assert (extensions[1].value == x509.UnrecognizedExtension(x509.ObjectIdentifier('1.3.6.1.4.1.45724.2.1.1'), b'\x03\x02\x040'))
def test_no_extensions_get_for_class(self, backend):
cert = _load_cert(os.path.join('x509', 'cryptography.io.pem'), x509.load_pem_x509_certificate)
exts = cert.extensions
with pytest.raises(x509.ExtensionNotFound) as exc:
exts.get_extension_for_class(x509.IssuerAlternativeName)
assert (exc.value.oid == ExtensionOID.ISSUER_ALTERNATIVE_NAME)
def test_unrecognized_extension_for_class(self):
exts = x509.Extensions([])
with pytest.raises(TypeError):
exts.get_extension_for_class(x509.UnrecognizedExtension)
def test_indexing(self, backend):
cert = _load_cert(os.path.join('x509', 'cryptography.io.pem'), x509.load_pem_x509_certificate)
exts = cert.extensions
assert (exts[(- 1)] == exts[7])
assert (exts[2:6:2] == [exts[2], exts[4]])
def test_one_extension_get_for_class(self, backend):
cert = _load_cert(os.path.join('x509', 'custom', 'basic_constraints_not_critical.pem'), x509.load_pem_x509_certificate)
ext = cert.extensions.get_extension_for_class(x509.BasicConstraints)
assert (ext is not None)
def test_repr(self, backend):
cert = _load_cert(os.path.join('x509', 'custom', 'basic_constraints_not_critical.pem'), x509.load_pem_x509_certificate)
assert (repr(cert.extensions) == '<Extensions([<Extension(oid=<ObjectIdentifier(oid=2.5.29.19, name=basicConstraints)>, critical=False, value=<BasicConstraints(ca=False, path_length=None)>)>])>') |
def batch_by_size(indices, num_tokens_fn, max_tokens=None, max_sentences=None, required_batch_size_multiple=1, distributed=False):
max_tokens = (max_tokens if (max_tokens is not None) else sys.maxsize)
max_sentences = (max_sentences if (max_sentences is not None) else sys.maxsize)
bsz_mult = required_batch_size_multiple
if isinstance(indices, types.GeneratorType):
indices = np.fromiter(indices, dtype=np.int64, count=(- 1))
sample_len = 0
sample_lens = []
batch = []
batches = []
for i in range(len(indices)):
idx = indices[i]
num_tokens = num_tokens_fn(idx)
sample_lens.append(num_tokens)
sample_len = max(sample_len, num_tokens)
assert (sample_len <= max_tokens), 'sentence at index {} of size {} exceeds max_tokens limit of {}!'.format(idx, sample_len, max_tokens)
num_tokens = ((len(batch) + 1) * sample_len)
if _is_batch_full(batch, num_tokens, max_tokens, max_sentences):
mod_len = max((bsz_mult * (len(batch) // bsz_mult)), (len(batch) % bsz_mult))
batches.append(batch[:mod_len])
batch = batch[mod_len:]
sample_lens = sample_lens[mod_len:]
sample_len = (max(sample_lens) if (len(sample_lens) > 0) else 0)
batch.append(idx)
if (len(batch) > 0):
batches.append(batch)
return batches |
class ScientificInput(Input, QtWidgets.QDoubleSpinBox):
def __init__(self, parameter, parent=None, **kwargs):
super().__init__(parameter=parameter, parent=parent, **kwargs)
if parameter.step:
self.setButtonSymbols(QtWidgets.QAbstractSpinBox.ButtonSymbols.UpDownArrows)
self.setSingleStep(parameter.step)
self.setEnabled(True)
else:
self.setButtonSymbols(QtWidgets.QAbstractSpinBox.ButtonSymbols.NoButtons)
def set_parameter(self, parameter):
self._parameter = parameter
self.validator = QtGui.QDoubleValidator(parameter.minimum, parameter.maximum, parameter.decimals, self)
self.setDecimals(parameter.decimals)
self.setMinimum(parameter.minimum)
self.setMaximum(parameter.maximum)
self.validator.setNotation(QtGui.QDoubleValidator.Notation.ScientificNotation)
super().set_parameter(parameter)
def validate(self, text, pos):
if self._parameter.units:
text = text[:(- (len(self._parameter.units) + 1))]
result = self.validator.validate(text, pos)
return (result[0], (result[1] + (' %s' % self._parameter.units)), result[2])
else:
return self.validator.validate(text, pos)
def fixCase(self, text):
self.lineEdit().setText(text.toLower())
def valueFromText(self, text):
try:
if self._parameter.units:
return float(str(text)[:(- (len(self._parameter.units) + 1))])
else:
return float(str(text))
except ValueError:
return self._parameter.default
def textFromValue(self, value):
string = f'{value:g}'.replace('e+', 'e')
string = re.sub('e(-?)0*(\\d+)', 'e\\1\\2', string)
return string
def stepEnabled(self):
if self.parameter.step:
return (QtWidgets.QAbstractSpinBox.StepEnabledFlag.StepUpEnabled | QtWidgets.QAbstractSpinBox.StepEnabledFlag.StepDownEnabled)
else:
return QtWidgets.QAbstractSpinBox.StepEnabledFlag.StepNone |
class LOGSSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(LOGSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if ((channel == self.channel) and (self.window.data.type() == img1.data.type())):
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _logssim(img1, img2, window, self.window_size, channel, self.size_average) |
def evaluate(best_sum_loss, best_final_loss, best_flag, lr):
print('\n > evalute the model')
STEPS = 100
x = np.arange(STEPS)
Adam = 'Adam'
LSTM_learner = Learner(f, LSTM_optimizer, STEPS, eval_flag=True, reset_theta=True, retain_graph_flag=True)
SGD_Learner = Learner(f, SGD, STEPS, eval_flag=True, reset_theta=True)
RMS_Learner = Learner(f, RMS, STEPS, eval_flag=True, reset_theta=True)
Adam_Learner = Learner(f, Adam, STEPS, eval_flag=True, reset_theta=True)
(sgd_losses, sgd_sum_loss) = SGD_Learner()
(rms_losses, rms_sum_loss) = RMS_Learner()
(adam_losses, adam_sum_loss) = Adam_Learner()
(lstm_losses, lstm_sum_loss) = LSTM_learner()
(p1,) = plt.plot(x, sgd_losses, label='SGD')
(p2,) = plt.plot(x, rms_losses, label='RMS')
(p3,) = plt.plot(x, adam_losses, label='Adam')
(p4,) = plt.plot(x, lstm_losses, label='LSTM')
plt.yscale('log')
plt.legend(handles=[p1, p2, p3, p4])
plt.title('Losses')
plt.pause(1.5)
print('sum_loss:sgd={},rms={},adam={},lstm={}'.format(sgd_sum_loss, rms_sum_loss, adam_sum_loss, lstm_sum_loss))
plt.close()
torch.save(LSTM_optimizer.state_dict(), 'current_LSTM_optimizer_ckpt.pth')
try:
best = torch.load('best_loss.txt')
except IOError:
print('can not find best_loss.txt')
now_sum_loss = lstm_sum_loss.cpu()
now_final_loss = lstm_losses[(- 1)].cpu()
pass
else:
best_sum_loss = best[0].cpu()
best_final_loss = best[1].cpu()
now_sum_loss = lstm_sum_loss.cpu()
now_final_loss = lstm_losses[(- 1)].cpu()
print(' ==> History: sum loss = [{:.1f}] \t| final loss = [{:.2f}]'.format(best_sum_loss, best_final_loss))
print(' ==> Current: sum loss = [{:.1f}] \t| final loss = [{:.2f}]'.format(now_sum_loss, now_final_loss))
if (now_final_loss < best_final_loss):
best_final_loss = now_final_loss
best_sum_loss = now_sum_loss
print('\n\n===> update new best of final LOSS[{}]: = {}, best_sum_loss ={}'.format(STEPS, best_final_loss, best_sum_loss))
torch.save(LSTM_optimizer.state_dict(), 'best_LSTM_optimizer.pth')
torch.save([best_sum_loss, best_final_loss, lr], 'best_loss.txt')
best_flag = True
return (best_sum_loss, best_final_loss, best_flag) |
def cifar10_testdata(batch_size):
transform_test = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
testset = torchvision.datasets.CIFAR10(root='../cifar10', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=4)
return testloader |
class TestGetTableCommentFromExplain(unittest.TestCase):
def setUpClass(cls):
cls.spark = SparkSession.builder.getOrCreate()
cls.llm_mock = MagicMock(spec=BaseLanguageModel)
cls.spark_ai = SparkAI(llm=cls.llm_mock, spark_session=cls.spark)
def tearDownClass(cls):
cls.spark.stop()
def create_and_read_table(self, table_name, data, comment=''):
self.spark.createDataFrame(data, ['col1', 'col2']).write.saveAsTable(table_name)
if (comment != ''):
self.spark.sql(f"ALTER TABLE {table_name} SET TBLPROPERTIES ('comment' = '{comment}')")
return self.spark.sql(f'SELECT * FROM {table_name}')
def test_single_table(self):
table_name = 'spark_catalog.default.test_table1'
comment = 'comment1'
try:
df = self.create_and_read_table(table_name, [(1, 'foo'), (2, 'bar')], comment)
tables = SparkAI._get_tables_from_explain(df)
self.assertEqual(tables, [table_name])
self.assertEqual(self.spark_ai._get_table_comment(df), 'which represents comment1')
finally:
self.spark.sql(f'DROP TABLE IF EXISTS {table_name}')
def test_multiple_tables(self):
table_names = ['spark_catalog.default.test_table1', 'spark_catalog.default.test_table2']
try:
dfs = [self.create_and_read_table(name, [(1, 'foo'), (2, 'bar')]) for name in table_names]
df = dfs[0].join(dfs[1], 'col1')
tables = SparkAI._get_tables_from_explain(df)
self.assertEqual(tables, table_names)
self.assertEqual(self.spark_ai._get_table_comment(df), '')
finally:
for name in table_names:
self.spark.sql(f'DROP TABLE IF EXISTS {name}')
def test_no_table(self):
df = self.spark.createDataFrame([(1, 'foo'), (2, 'bar')], ['col1', 'col2'])
tables = SparkAI._get_tables_from_explain(df)
self.assertEqual(tables, [])
self.assertEqual(self.spark_ai._get_table_comment(df), '') |
def cleanup():
global __debug_file_handle
global gdb_process
global gdb_threads
for t in gdb_threads:
t.join(get_setting('gdb_timeout', 20))
gdb_threads = []
gdb_process = None
if get_setting('close_views', True):
for view in gdb_views:
view.close()
if get_setting('push_pop_layout', True):
gdb_bkp_window.set_layout(gdb_bkp_layout)
gdb_bkp_window.focus_view(gdb_bkp_view)
if (__debug_file_handle is not None):
if (__debug_file_handle != sys.stdout):
__debug_file_handle.close()
__debug_file_handle = None |
class _PSPHead(nn.Module):
def __init__(self, in_channels, nclass, norm_layer=nn.BatchNorm2d, norm_kwargs=None, **kwargs):
super(_PSPHead, self).__init__()
self.psp = _PyramidPooling(in_channels, norm_layer=norm_layer, norm_kwargs=norm_kwargs)
if (in_channels == 512):
out_channels = 128
elif (in_channels == 2048):
out_channels = 512
else:
raise
self.block = nn.Sequential(nn.Conv2d((in_channels * 2), out_channels, 3, padding=1, bias=False), norm_layer(out_channels, **({} if (norm_kwargs is None) else norm_kwargs)), nn.ReLU(True), nn.Dropout(0.1))
self.classifier = nn.Conv2d(out_channels, nclass, 1)
def forward(self, x):
x = self.psp(x)
x = self.block(x)
feature = x
x = self.classifier(x)
return (x, feature) |
class Params(object):
def __init__(self, **kwargs):
self.shift_scale = 6.0
self.min_shift = 0.5
self.shift_distribution = ShiftDistribution.UNIFORM
self.deformator_lr = 0.0001
self.shift_predictor_lr = 0.0001
self.n_steps = int(100000.0)
self.batch_size = 32
self.directions_count = None
self.max_latent_dim = None
self.label_weight = 1.0
self.shift_weight = 0.25
self.steps_per_log = 10
self.steps_per_save = 10000
self.steps_per_img_log = 1000
self.steps_per_backup = 1000
self.truncation = None
for (key, val) in kwargs.items():
if (val is not None):
self.__dict__[key] = val |
class GroupLDAPGroupLink(RESTObject):
_repr_attr = 'provider'
def _get_link_attrs(self) -> Dict[(str, str)]:
data = {'provider': self.provider}
if self.cn:
data['cn'] = self.cn
else:
data['filter'] = self.filter
return data
def delete(self, **kwargs: Any) -> None:
if TYPE_CHECKING:
assert isinstance(self.manager, DeleteMixin)
self.manager.delete(self.encoded_id, query_data=self._get_link_attrs(), **kwargs) |
class ModulatedDeformConv2d(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, deformable_groups=1, im2col_step=64, bias=True):
super(ModulatedDeformConv2d, self).__init__()
if ((in_channels % groups) != 0):
raise ValueError('in_channels {} must be divisible by groups {}'.format(in_channels, groups))
if ((out_channels % groups) != 0):
raise ValueError('out_channels {} must be divisible by groups {}'.format(out_channels, groups))
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.deformable_groups = deformable_groups
self.im2col_step = im2col_step
self.use_bias = bias
self.weight = nn.Parameter(torch.Tensor(out_channels, (in_channels // groups), *self.kernel_size))
self.bias = nn.Parameter(torch.Tensor(out_channels))
self.reset_parameters()
if (not self.use_bias):
self.bias.requires_grad = False
def reset_parameters(self):
n = self.in_channels
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if (self.bias is not None):
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = (1 / math.sqrt(fan_in))
init.uniform_(self.bias, (- bound), bound)
def forward(self, input, offset, mask):
assert ((((2 * self.deformable_groups) * self.kernel_size[0]) * self.kernel_size[1]) == offset.shape[1])
assert (((self.deformable_groups * self.kernel_size[0]) * self.kernel_size[1]) == mask.shape[1])
return ModulatedDeformConv2dFunction.apply(input, offset, mask, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups, self.deformable_groups, self.im2col_step) |
class Messages(DeleteMessages, EditMessageCaption, EditMessageReplyMarkup, EditMessageMedia, EditMessageText, ForwardMessages, GetMediaGroup, GetMessages, SendAudio, SendChatAction, SendContact, SendDocument, SendAnimation, SendLocation, SendMediaGroup, SendMessage, SendPhoto, SendSticker, SendVenue, SendVideo, SendVideoNote, SendVoice, SendPoll, VotePoll, StopPoll, RetractVote, DownloadMedia, GetChatHistory, SendCachedMedia, GetChatHistoryCount, ReadChatHistory, EditInlineText, EditInlineCaption, EditInlineMedia, EditInlineReplyMarkup, SendDice, SearchMessages, SearchGlobal, CopyMessage, CopyMediaGroup, SearchMessagesCount, SearchGlobalCount, GetDiscussionMessage, SendReaction, GetDiscussionReplies, GetDiscussionRepliesCount, StreamMedia, GetCustomEmojiStickers):
pass |
(cls=ColoredCommand)
def markers(**raw_config: Any) -> NoReturn:
raw_config['command'] = 'markers'
pm = storage.get()
try:
config = pm.hook.pytask_configure(pm=pm, raw_config=raw_config)
session = Session.from_config(config)
except (ConfigurationError, Exception):
console.print_exception()
session = Session(exit_code=ExitCode.CONFIGURATION_FAILED)
else:
table = Table('Marker', 'Description', leading=1)
for (name, description) in config['markers'].items():
table.add_row(f'pytask.mark.{name}', description)
console.print(table)
session.hook.pytask_unconfigure(session=session)
sys.exit(session.exit_code) |
def replace_vars(a, table):
if (is_ast(a) and (not is_literal(a))):
if (type(a) == name_e):
if (a.id in table):
return table[a.id]
else:
return a
if (type(a) == call_e):
return call_e(a.func, *[replace_vars(x, table) for x in a[1:]])
if (type(a) == attribute_e):
return attribute_e(replace_vars(a.value, table), a.attribute)
for (i, x) in enumerate(a):
if is_ast(x):
a = a._replace(**{a._fields[i]: replace_vars(x, table)})
elif (type(x) == list):
for (j, y) in enumerate(x):
x[j] = replace_vars(y, table)
return a |
def test_add_permissions(tmpfolder):
_ = tmpfolder
with temp_umask(219):
path = uniqpath()
create(path, 'contents', {})
assert (stat.S_IMODE(path.stat().st_mode) == 292)
path = uniqpath()
if (os.name == 'posix'):
add_permissions(stat.S_IXOTH)(path, 'contents', {})
assert (stat.S_IMODE(path.stat().st_mode) == 293)
else:
add_permissions(stat.S_IWRITE)(path, 'contents', {})
assert (stat.S_IMODE(path.stat().st_mode) == 438) |
class DonutImageProcessingTester(unittest.TestCase):
def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_thumbnail=True, do_align_axis=False, do_pad=True, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = (size if (size is not None) else {'height': 18, 'width': 20})
self.do_thumbnail = do_thumbnail
self.do_align_axis = do_align_axis
self.do_pad = do_pad
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {'do_resize': self.do_resize, 'size': self.size, 'do_thumbnail': self.do_thumbnail, 'do_align_long_axis': self.do_align_axis, 'do_pad': self.do_pad, 'do_normalize': self.do_normalize, 'image_mean': self.image_mean, 'image_std': self.image_std} |
(RandomStateNumbaType)
def box_random_state(typ, val, c):
(pos, state_list) = _helperlib.rnd_get_state(_helperlib.rnd_get_np_state_ptr())
rng = RandomState()
rng.set_state(('MT19937', state_list, pos))
class_obj = c.pyapi.unserialize(c.pyapi.serialize_object(rng))
return class_obj |
((torch.cuda.device_count() < 2), 'test requires 2 GPUs')
class TestBMUF(unittest.TestCase):
def bmuf_process(self, cfg, args, iterations):
processes = []
results = Manager().dict()
ctx = torch.multiprocessing.get_context('spawn')
for rank in range(args.distributed_world_size):
p = ctx.Process(target=single_gpu_training, args=(cfg, args, rank, iterations, results))
p.start()
processes.append(p)
for p in processes:
p.join()
return results
def test_bmuf_sync(self):
(cfg, args) = setup_args()
iterations = 1
results = self.bmuf_process(cfg, args, iterations)
assert (len(results) == 2)
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync(self):
(cfg, args) = setup_args()
args.warmup_iterations = 20
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
assert (len(results) == 2)
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync_bmuf_sync(self):
(cfg, args) = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
cfg.bmuf.warmup_iterations = args.warmup_iterations
cfg.bmuf.global_sync_iter = args.global_sync_iter
iterations = 25
results = self.bmuf_process(cfg, args, iterations)
assert (len(results) == 2)
self.assertAlmostEqual(results[0], results[1])
def test_single_gpu_bmuf(self):
(cfg, args) = setup_args()
args.distributed_world_size = 1
args.warmup_iterations = 5
cfg.distributed_training.distributed_world_size = args.distributed_world_size
cfg.bmuf.distributed_world_size = args.distributed_world_size
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
assert (len(results) == 1)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), 'size mismatch')
self.assertLess((t1 - t2).abs().max(), 0.0001) |
class BalancedRemoteExpert(nn.Module):
def __init__(self, *, dht: DHT, uid_prefix: str, grid_size: Tuple[(int, ...)], forward_timeout: Optional[float]=None, backward_timeout: Optional[float]=None, update_period: float=30.0, backward_task_size_multiplier: float=2.5, **kwargs):
super().__init__()
if uid_prefix.endswith('.0.'):
logger.warning(f'BalancedRemoteExperts will look for experts under prefix {self.uid_prefix}0.')
assert ((len(grid_size) == 2) and (grid_size[0] == 1)), 'only 1xN grids are supported'
(self.dht, self.uid_prefix, self.grid_size) = (dht, uid_prefix, grid_size)
(self.forward_timeout, self.backward_timeout) = (forward_timeout, backward_timeout)
self.backward_task_size_multiplier = backward_task_size_multiplier
self.expert_balancer = ExpertBalancer(dht, key=f'{self.uid_prefix}0.', update_period=update_period, **kwargs)
self._expert_info = None
def forward(self, *args: torch.Tensor, **kwargs: torch.Tensor):
assert (len(kwargs) == len(self.info['keyword_names'])), f"Keyword args should be {self.info['keyword_names']}"
kwargs = {key: kwargs[key] for key in self.info['keyword_names']}
if (self._expert_info is None):
raise NotImplementedError()
forward_inputs = (args, kwargs)
if (not nested_compare(forward_inputs, self.info['forward_schema'])):
raise TypeError(f'Inputs do not match expert input schema. Did you pass the right number of parameters?')
flat_inputs = list(nested_flatten(forward_inputs))
forward_task_size = flat_inputs[0].shape[0]
flat_outputs = _BalancedRemoteModuleCall.apply(DUMMY, self.expert_balancer, self.info, self.forward_timeout, self.backward_timeout, forward_task_size, (forward_task_size * self.backward_task_size_multiplier), *flat_inputs)
return nested_pack(flat_outputs, structure=self.info['outputs_schema'])
def info(self):
while (self._expert_info is None):
try:
with self.expert_balancer.use_another_expert(1) as chosen_expert:
self._expert_info = chosen_expert.info
except BaseException as e:
logger.error(f'Tried to get expert info from {chosen_expert} but caught {repr(e)}')
return self._expert_info |
def inference(input_x, input_x_field, zeroWeights, oneDimWeights, thirdWeight):
secondValue = tf.reduce_sum(tf.multiply(oneDimWeights, input_x, name='secondValue'))
firstTwoValue = tf.add(zeroWeights, secondValue, name='firstTwoValue')
thirdValue = tf.Variable(0.0, dtype=tf.float32)
input_shape = input_x_size
for i in range(input_shape):
featureIndex1 = i
fieldIndex1 = int(input_x_field[i])
for j in range((i + 1), input_shape):
featureIndex2 = j
fieldIndex2 = int(input_x_field[j])
vectorLeft = tf.convert_to_tensor([[featureIndex1, fieldIndex2, i] for i in range(vector_dimension)])
weightLeft = tf.gather_nd(thirdWeight, vectorLeft)
weightLeftAfterCut = tf.squeeze(weightLeft)
vectorRight = tf.convert_to_tensor([[featureIndex2, fieldIndex1, i] for i in range(vector_dimension)])
weightRight = tf.gather_nd(thirdWeight, vectorRight)
weightRightAfterCut = tf.squeeze(weightRight)
tempValue = tf.reduce_sum(tf.multiply(weightLeftAfterCut, weightRightAfterCut))
indices2 = [i]
indices3 = [j]
xi = tf.squeeze(tf.gather_nd(input_x, indices2))
xj = tf.squeeze(tf.gather_nd(input_x, indices3))
product = tf.reduce_sum(tf.multiply(xi, xj))
secondItemVal = tf.multiply(tempValue, product)
tf.assign(thirdValue, tf.add(thirdValue, secondItemVal))
return tf.add(firstTwoValue, thirdValue) |
class _GettextCompiler(_Compiler):
compile_i = _Compiler.compile_n
compile_v = compile_zero
compile_w = compile_zero
compile_f = compile_zero
compile_t = compile_zero
def compile_relation(self, method, expr, range_list):
rv = []
expr = self.compile(expr)
for item in range_list[1]:
if (item[0] == item[1]):
rv.append(f'({expr} == {self.compile(item[0])})')
else:
(min, max) = map(self.compile, item)
rv.append(f'({expr} >= {min} && {expr} <= {max})')
return f"({' || '.join(rv)})" |
(inspect_parser)
def do_inspect(args: argparse.Namespace) -> None:
response = request(args.status_file, 'inspect', show=args.show, location=args.location, verbosity=args.verbose, limit=args.limit, include_span=args.include_span, include_kind=args.include_kind, include_object_attrs=args.include_object_attrs, union_attrs=args.union_attrs, force_reload=args.force_reload)
check_output(response, verbose=False, junit_xml=None, perf_stats_file=None) |
def export_opml(user):
root = Element('opml', {'version': '1.0'})
head = SubElement(root, 'head')
title = SubElement(head, 'title')
title.text = '{0} subscriptions'.format(user.username)
body = SubElement(root, 'body')
for feed in user.feed_set.all():
item = SubElement(body, 'outline', {'type': 'rss', 'text': feed.title, 'title': feed.title, 'xmlUrl': feed.feed_url})
if feed.site_url:
item.set('htmlUrl', feed.site_url)
buf = BytesIO()
ElementTree(root).write(buf, encoding='UTF-8')
return buf.getvalue() |
class F19Handler(BaseHandler):
version = F19
commandMap = {'auth': commands.authconfig.FC3_Authconfig, 'authconfig': commands.authconfig.FC3_Authconfig, 'autopart': commands.autopart.F18_AutoPart, 'autostep': commands.autostep.FC3_AutoStep, 'bootloader': commands.bootloader.F19_Bootloader, 'btrfs': commands.btrfs.F17_BTRFS, 'cdrom': commands.cdrom.FC3_Cdrom, 'clearpart': commands.clearpart.F17_ClearPart, 'cmdline': commands.displaymode.FC3_DisplayMode, 'device': commands.device.F8_Device, 'deviceprobe': commands.deviceprobe.FC3_DeviceProbe, 'dmraid': commands.dmraid.FC6_DmRaid, 'driverdisk': commands.driverdisk.F14_DriverDisk, 'fcoe': commands.fcoe.F13_Fcoe, 'firewall': commands.firewall.F14_Firewall, 'firstboot': commands.firstboot.FC3_Firstboot, 'graphical': commands.displaymode.FC3_DisplayMode, 'group': commands.group.F12_Group, 'halt': commands.reboot.F18_Reboot, 'harddrive': commands.harddrive.FC3_HardDrive, 'ignoredisk': commands.ignoredisk.F14_IgnoreDisk, 'install': commands.upgrade.F11_Upgrade, 'iscsi': commands.iscsi.F17_Iscsi, 'iscsiname': commands.iscsiname.FC6_IscsiName, 'keyboard': commands.keyboard.F18_Keyboard, 'lang': commands.lang.F19_Lang, 'liveimg': commands.liveimg.F19_Liveimg, 'logging': commands.logging.FC6_Logging, 'logvol': commands.logvol.F18_LogVol, 'mediacheck': commands.mediacheck.FC4_MediaCheck, 'method': commands.method.F19_Method, 'multipath': commands.multipath.FC6_MultiPath, 'network': commands.network.F19_Network, 'nfs': commands.nfs.FC6_NFS, 'part': commands.partition.F18_Partition, 'partition': commands.partition.F18_Partition, 'poweroff': commands.reboot.F18_Reboot, 'raid': commands.raid.F19_Raid, 'realm': commands.realm.F19_Realm, 'reboot': commands.reboot.F18_Reboot, 'repo': commands.repo.F15_Repo, 'rescue': commands.rescue.F10_Rescue, 'rootpw': commands.rootpw.F18_RootPw, 'selinux': commands.selinux.FC3_SELinux, 'services': commands.services.FC6_Services, 'shutdown': commands.reboot.F18_Reboot, 'skipx': commands.skipx.FC3_SkipX, 'sshpw': commands.sshpw.F13_SshPw, 'text': commands.displaymode.FC3_DisplayMode, 'timezone': commands.timezone.F18_Timezone, 'updates': commands.updates.F7_Updates, 'upgrade': commands.upgrade.F11_Upgrade, 'url': commands.url.F18_Url, 'user': commands.user.F19_User, 'vnc': commands.vnc.F9_Vnc, 'volgroup': commands.volgroup.F16_VolGroup, 'xconfig': commands.xconfig.F14_XConfig, 'zerombr': commands.zerombr.F9_ZeroMbr, 'zfcp': commands.zfcp.F14_ZFCP}
dataMap = {'BTRFSData': commands.btrfs.F17_BTRFSData, 'DriverDiskData': commands.driverdisk.F14_DriverDiskData, 'DeviceData': commands.device.F8_DeviceData, 'DmRaidData': commands.dmraid.FC6_DmRaidData, 'FcoeData': commands.fcoe.F13_FcoeData, 'GroupData': commands.group.F12_GroupData, 'IscsiData': commands.iscsi.F17_IscsiData, 'LogVolData': commands.logvol.F20_LogVolData, 'MultiPathData': commands.multipath.FC6_MultiPathData, 'NetworkData': commands.network.F19_NetworkData, 'PartData': commands.partition.F18_PartData, 'RaidData': commands.raid.F18_RaidData, 'RepoData': commands.repo.F15_RepoData, 'SshPwData': commands.sshpw.F13_SshPwData, 'UserData': commands.user.F19_UserData, 'VolGroupData': commands.volgroup.F16_VolGroupData, 'ZFCPData': commands.zfcp.F14_ZFCPData} |
class VisibleLengthSetting(Object):
class __T(TBase):
def regularize_extra(self, val):
if isinstance(val, list):
return self._cls(key=val[0], value=val[1])
return val
def to_save(self, val):
return (val.key, val.value)
def to_save_xml(self, val):
raise NotImplementedError()
key = String.T()
value = Float.T() |
_BOX_PREDICTOR.register('FPNPredictorNeighbor')
class FPNPredictorNeighbor(nn.Module):
def __init__(self, cfg):
super(FPNPredictorNeighbor, self).__init__()
num_classes = cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES
representation_size = cfg.MODEL.ROI_BOX_HEAD.NONLOCAL_OUT_CHANNELS
self.cls_score = nn.Linear(representation_size, num_classes)
num_bbox_reg_classes = (2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else num_classes)
self.bbox_pred = nn.Linear(representation_size, (num_bbox_reg_classes * 4))
nn.init.normal_(self.cls_score.weight, std=0.01)
nn.init.normal_(self.bbox_pred.weight, std=0.001)
for l in [self.cls_score, self.bbox_pred]:
nn.init.constant_(l.bias, 0)
representation_size_fc = cfg.MODEL.ROI_BOX_HEAD.MLP_HEAD_DIM
self.cls_score_fc = nn.Linear(representation_size_fc, num_classes)
self.bbox_pred_fc = nn.Linear(representation_size_fc, (num_bbox_reg_classes * 4))
nn.init.normal_(self.cls_score_fc.weight, std=0.01)
nn.init.normal_(self.bbox_pred_fc.weight, std=0.001)
for l in [self.cls_score_fc, self.bbox_pred_fc]:
nn.init.constant_(l.bias, 0)
def forward(self, x):
scores = self.cls_score(x[0])
bbox_deltas = self.bbox_pred(x[1])
x_fc_cls = x[2]
x_fc_reg = x[2]
scores_fc = self.cls_score_fc(x_fc_cls)
bbox_deltas_fc = self.bbox_pred_fc(x_fc_reg)
return (scores, bbox_deltas, scores_fc, bbox_deltas_fc) |
class ShardedQuantFeatureProcessedEmbeddingBagCollection(ShardedQuantEmbeddingBagCollection):
def __init__(self, module: EmbeddingBagCollectionInterface, table_name_to_parameter_sharding: Dict[(str, ParameterSharding)], env: ShardingEnv, fused_params: Optional[Dict[(str, Any)]]=None, device: Optional[torch.device]=None, feature_processor: Optional[FeatureProcessorsCollection]=None) -> None:
super().__init__(module, table_name_to_parameter_sharding, env, fused_params, device)
assert (feature_processor is not None)
device_type: str = (self._device.type if (self._device is not None) else 'cuda')
self.feature_processors_per_rank: nn.ModuleList = torch.nn.ModuleList()
for i in range(env.world_size):
self.feature_processors_per_rank.append((feature_processor if (device_type == 'meta') else copy_to_device(feature_processor, feature_processor.device, torch.device(f'{device_type}:{i}'))))
def apply_feature_processor(self, kjt_list: KJTList) -> KJTList:
l: List[KeyedJaggedTensor] = []
for i in range(len(self.feature_processors_per_rank)):
l.append(self.feature_processors_per_rank[i](kjt_list[i]))
return KJTList(l)
def compute(self, ctx: NullShardedModuleContext, dist_input: ListOfKJTList) -> List[List[torch.Tensor]]:
return [lookup.forward(self.apply_feature_processor(dist_input[i])) for (i, lookup) in enumerate(self._lookups)] |
class PNGImageEncoder(ImageEncoder):
def get_file_extensions(self):
return ['.png']
def encode(self, image, filename, file):
image = image.get_image_data()
has_alpha = ('A' in image.format)
greyscale = (len(image.format) < 3)
if has_alpha:
if greyscale:
image.format = 'LA'
else:
image.format = 'RGBA'
elif greyscale:
image.format = 'L'
else:
image.format = 'RGB'
image.pitch = (- (image.width * len(image.format)))
writer = pypng.Writer(image.width, image.height, greyscale=greyscale, alpha=has_alpha)
data = array.array('B')
data.frombytes(image.get_data(image.format, image.pitch))
writer.write_array(file, data) |
class TritonGrammar(object):
def __init__(self, vars: List[Tuple[(Char, BitSize)]], ops: List[BvOp]):
self.ops = ops
self.vars_dict = {x[0]: x[1] for x in vars}
self.vars = list(self.vars_dict.keys())
self.size = self.vars_dict[self.vars[0]]
def non_terminal_operators(self) -> List[Operator]:
return [OPERATORS[x] for x in self.ops]
def gen_test_inputs(self, n: int) -> List[Input]:
return [{var: random.getrandbits(self.vars_dict[var]) for var in self.vars} for _ in range(n)]
def str_to_expr(self, s: str, *args) -> TritonAst:
expr = args[0]
return expr.normalized_str_to_ast(s)
def to_dict(self) -> Dict:
return dict(vars=[(n, sz) for (n, sz) in self.vars_dict.items()], operators=[x.name for x in self.ops])
def from_dict(g_dict: Dict) -> 'TritonGrammar':
return TritonGrammar(g_dict['vars'], [BvOp[x] for x in g_dict['operators']]) |
class NIN(nn.Module):
def __init__(self, pooling):
super(NIN, self).__init__()
if (pooling == 'max'):
pool2d = nn.MaxPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True)
elif (pooling == 'avg'):
pool2d = nn.AvgPool2d((3, 3), (2, 2), (0, 0), ceil_mode=True)
self.features = nn.Sequential(nn.Conv2d(3, 96, (11, 11), (4, 4)), nn.ReLU(inplace=True), nn.Conv2d(96, 96, (1, 1)), nn.ReLU(inplace=True), nn.Conv2d(96, 96, (1, 1)), nn.ReLU(inplace=True), pool2d, nn.Conv2d(96, 256, (5, 5), (1, 1), (2, 2)), nn.ReLU(inplace=True), nn.Conv2d(256, 256, (1, 1)), nn.ReLU(inplace=True), nn.Conv2d(256, 256, (1, 1)), nn.ReLU(inplace=True), pool2d, nn.Conv2d(256, 384, (3, 3), (1, 1), (1, 1)), nn.ReLU(inplace=True), nn.Conv2d(384, 384, (1, 1)), nn.ReLU(inplace=True), nn.Conv2d(384, 384, (1, 1)), nn.ReLU(inplace=True), pool2d, nn.Dropout(0.5), nn.Conv2d(384, 1024, (3, 3), (1, 1), (1, 1)), nn.ReLU(inplace=True), nn.Conv2d(1024, 1024, (1, 1)), nn.ReLU(inplace=True), nn.Conv2d(1024, 1000, (1, 1)), nn.ReLU(inplace=True), nn.AvgPool2d((6, 6), (1, 1), (0, 0), ceil_mode=True), nn.Softmax()) |
def _bigint_from_bytes(bytes):
sizeof_int = 4
padding = (sizeof_int - (len(bytes) % sizeof_int))
bytes += (b'\x00' * padding)
int_count = int((len(bytes) / sizeof_int))
unpacked = struct.unpack('{}I'.format(int_count), bytes)
accum = 0
for (i, val) in enumerate(unpacked):
accum += ((2 ** ((sizeof_int * 8) * i)) * val)
return accum |
def stage_partition_from_file_paths(namespace: str, file_paths: List[str], *args, **kwargs) -> Partition:
ds.create_namespace(namespace, {}, **kwargs)
table_name = '-'.join(file_paths).replace('/', '_')
ds.create_table_version(namespace, table_name, '1', **kwargs)
stream = ds.get_stream(namespace, table_name, '1', **kwargs)
staged_partition = ds.stage_partition(stream, [], **kwargs)
return staged_partition |
(persist=eval(os.getenv('PERSISTENT')))
def rank_matrices(matrix):
sorted_matrix = np.sort(matrix.flatten())[::(- 1)].reshape(matrix.shape)
rank = 1
ranked_matrix = np.zeros(matrix.shape)
pbar = tqdm.tqdm(total=int(((matrix.shape[0] ** 2) / 2)))
for i in range(matrix.shape[0]):
for j in range(matrix.shape[0]):
temp = sorted_matrix[i][j]
flag = 0
if (i > j):
continue
else:
pbar.update(1)
for k in range(matrix.shape[0]):
for p in range(matrix.shape[0]):
if (matrix[k][p] == temp):
rank += 1
ranked_matrix[k][p] = rank
flag = 1
break
if (flag == 1):
break
return ranked_matrix |
def view_area_command(sub_parsers):
parser: ArgumentParser = sub_parsers.add_parser('view-area', help='View information about an area.', formatter_class=argparse.MetavarTypeHelpFormatter)
parser.add_argument('--simplify', action='store_true', help='Simplify the RequirementSets')
parser.add_argument('region', type=str, help='The name of the region that contains the area.')
parser.add_argument('area', type=str, help='The name of the area.')
parser.set_defaults(func=view_area_command_logic) |
class FileListModel(QAbstractListModel):
numberPopulated = pyqtSignal(int)
def __init__(self, parent=None):
super(FileListModel, self).__init__(parent)
self.fileCount = 0
self.fileList = []
def rowCount(self, parent=QModelIndex()):
return self.fileCount
def data(self, index, role=Qt.DisplayRole):
if (not index.isValid()):
return None
if ((index.row() >= len(self.fileList)) or (index.row() < 0)):
return None
if (role == Qt.DisplayRole):
return self.fileList[index.row()]
if (role == Qt.BackgroundRole):
batch = ((index.row() // 100) % 2)
if (batch == 0):
return QApplication.palette().base()
return QApplication.palette().alternateBase()
return None
def canFetchMore(self, index):
return (self.fileCount < len(self.fileList))
def fetchMore(self, index):
remainder = (len(self.fileList) - self.fileCount)
itemsToFetch = min(100, remainder)
self.beginInsertRows(QModelIndex(), self.fileCount, (self.fileCount + itemsToFetch))
self.fileCount += itemsToFetch
self.endInsertRows()
self.numberPopulated.emit(itemsToFetch)
def setDirPath(self, path):
dir = QDir(path)
self.beginResetModel()
self.fileList = dir.entryList()
self.fileCount = 0
self.endResetModel() |
class resnet8x4_resnet8x4(nn.Module):
def __init__(self, num_classes):
super(resnet8x4_resnet8x4, self).__init__()
self.net1 = resnet8x4_aux(num_classes=num_classes)
self.net2 = resnet8x4_aux(num_classes=num_classes)
def forward(self, x, grad=True):
(logit1, ss_logits1) = self.net1(x, grad=grad)
(logit2, ss_logits2) = self.net2(x, grad=grad)
return ([logit1, logit2], [ss_logits1, ss_logits2]) |
def properties(validator, properties, instance, schema):
if (not validator.is_type(instance, 'object')):
return
for (property, subschema) in properties.items():
if (property in instance):
(yield from validator.descend(instance[property], subschema, path=property, schema_path=property)) |
def merge_encodings(default_encoding: Dict[(str, Dict[(str, Any)])], overrides: Dict[(str, Dict[(str, Any)])]) -> Dict[(str, Dict[(str, Any)])]:
merged = {}
for (var, d) in default_encoding.items():
if (var in overrides):
merged[var] = {**d, **overrides[var]}
else:
merged[var] = d
for (var, d) in overrides.items():
if (var not in merged):
merged[var] = d
return merged |
class SentWebAppMessage(TelegramObject):
__slots__ = ('inline_message_id',)
def __init__(self, inline_message_id: Optional[str]=None, *, api_kwargs: Optional[JSONDict]=None):
super().__init__(api_kwargs=api_kwargs)
self.inline_message_id: Optional[str] = inline_message_id
self._id_attrs = (self.inline_message_id,)
self._freeze() |
def train():
cmd = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve')
cmd.add_argument('--seed', default=1, type=int, help='The random seed.')
cmd.add_argument('--gpu', default=(- 1), type=int, help='Use id of gpu, -1 if cpu.')
cmd.add_argument('--train_path', required=True, help='The path to the training file.')
cmd.add_argument('--valid_path', help='The path to the development file.')
cmd.add_argument('--test_path', help='The path to the testing file.')
cmd.add_argument('--config_path', required=True, help='the path to the config file.')
cmd.add_argument('--word_embedding', help='The path to word vectors.')
cmd.add_argument('--optimizer', default='sgd', choices=['sgd', 'adam', 'adagrad'], help='the type of optimizer: valid options=[sgd, adam, adagrad]')
cmd.add_argument('--lr', type=float, default=0.01, help='the learning rate.')
cmd.add_argument('--lr_decay', type=float, default=0, help='the learning rate decay.')
cmd.add_argument('--model', required=True, help='path to save model')
cmd.add_argument('--batch_size', '--batch', type=int, default=32, help='the batch size.')
cmd.add_argument('--max_epoch', type=int, default=100, help='the maximum number of iteration.')
cmd.add_argument('--clip_grad', type=float, default=5, help='the tense of clipped grad.')
cmd.add_argument('--max_sent_len', type=int, default=20, help='maximum sentence length.')
cmd.add_argument('--min_count', type=int, default=5, help='minimum word count.')
cmd.add_argument('--max_vocab_size', type=int, default=150000, help='maximum vocabulary size.')
cmd.add_argument('--save_classify_layer', default=False, action='store_true', help='whether to save the classify layer')
cmd.add_argument('--valid_size', type=int, default=0, help="size of validation dataset when there's no valid.")
cmd.add_argument('--eval_steps', required=False, type=int, help='report every xx batches.')
opt = cmd.parse_args(sys.argv[2:])
with open(opt.config_path, 'r') as fin:
config = json.load(fin)
print(opt)
print(config)
torch.manual_seed(opt.seed)
random.seed(opt.seed)
if (opt.gpu >= 0):
torch.cuda.set_device(opt.gpu)
if (opt.seed > 0):
torch.cuda.manual_seed(opt.seed)
use_cuda = ((opt.gpu >= 0) and torch.cuda.is_available())
token_embedder_name = config['token_embedder']['name'].lower()
token_embedder_max_chars = config['token_embedder'].get('max_characters_per_token', None)
if (token_embedder_name == 'cnn'):
train_data = read_corpus(opt.train_path, token_embedder_max_chars, opt.max_sent_len)
elif (token_embedder_name == 'lstm'):
train_data = read_corpus(opt.train_path, opt.max_sent_len)
else:
raise ValueError('Unknown token embedder name: {}'.format(token_embedder_name))
logging.info('training instance: {}, training tokens: {}.'.format(len(train_data), sum([(len(s) - 1) for s in train_data])))
if (opt.valid_path is not None):
if (token_embedder_name == 'cnn'):
valid_data = read_corpus(opt.valid_path, token_embedder_max_chars, opt.max_sent_len)
elif (token_embedder_name == 'lstm'):
valid_data = read_corpus(opt.valid_path, opt.max_sent_len)
else:
raise ValueError('Unknown token embedder name: {}'.format(token_embedder_name))
logging.info('valid instance: {}, valid tokens: {}.'.format(len(valid_data), sum([(len(s) - 1) for s in valid_data])))
elif (opt.valid_size > 0):
(train_data, valid_data) = divide(train_data, opt.valid_size)
logging.info('training instance: {}, training tokens after division: {}.'.format(len(train_data), sum([(len(s) - 1) for s in train_data])))
logging.info('valid instance: {}, valid tokens: {}.'.format(len(valid_data), sum([(len(s) - 1) for s in valid_data])))
else:
valid_data = None
if (opt.test_path is not None):
if (token_embedder_name == 'cnn'):
test_data = read_corpus(opt.test_path, token_embedder_max_chars, opt.max_sent_len)
elif (token_embedder_name == 'lstm'):
test_data = read_corpus(opt.test_path, opt.max_sent_len)
else:
raise ValueError('Unknown token embedder name: {}'.format(token_embedder_name))
logging.info('testing instance: {}, testing tokens: {}.'.format(len(test_data), sum([(len(s) - 1) for s in test_data])))
else:
test_data = None
if (opt.word_embedding is not None):
embs = load_embedding(opt.word_embedding)
word_lexicon = {word: i for (i, word) in enumerate(embs[0])}
else:
embs = None
word_lexicon = {}
vocab = get_truncated_vocab(train_data, opt.min_count)
for special_word in ['<oov>', '<bos>', '<eos>', '<pad>']:
if (special_word not in word_lexicon):
word_lexicon[special_word] = len(word_lexicon)
for (word, _) in vocab:
if (word not in word_lexicon):
word_lexicon[word] = len(word_lexicon)
if (config['token_embedder']['word_dim'] > 0):
word_emb_layer = EmbeddingLayer(config['token_embedder']['word_dim'], word_lexicon, fix_emb=False, embs=embs)
logging.info('Word embedding size: {0}'.format(len(word_emb_layer.word2id)))
else:
word_emb_layer = None
logging.info('Vocabulary size: {0}'.format(len(word_lexicon)))
if (config['token_embedder']['char_dim'] > 0):
char_lexicon = {}
for sentence in train_data:
for word in sentence:
for ch in word:
if (ch not in char_lexicon):
char_lexicon[ch] = len(char_lexicon)
for special_char in ['<bos>', '<eos>', '<oov>', '<pad>', '<bow>', '<eow>']:
if (special_char not in char_lexicon):
char_lexicon[special_char] = len(char_lexicon)
char_emb_layer = EmbeddingLayer(config['token_embedder']['char_dim'], char_lexicon, fix_emb=False)
logging.info('Char embedding size: {0}'.format(len(char_emb_layer.word2id)))
else:
char_lexicon = None
char_emb_layer = None
train = create_batches(train_data, opt.batch_size, word_lexicon, char_lexicon, config, use_cuda=use_cuda)
if (opt.eval_steps is None):
opt.eval_steps = len(train[0])
logging.info('Evaluate every {0} batches.'.format(opt.eval_steps))
if (valid_data is not None):
valid = create_batches(valid_data, opt.batch_size, word_lexicon, char_lexicon, config, sort=False, shuffle=False, use_cuda=use_cuda)
else:
valid = None
if (test_data is not None):
test = create_batches(test_data, opt.batch_size, word_lexicon, char_lexicon, config, sort=False, shuffle=False, use_cuda=use_cuda)
else:
test = None
label_to_ix = word_lexicon
logging.info('vocab size: {0}'.format(len(label_to_ix)))
nclasses = len(label_to_ix)
model = Model(config, word_emb_layer, char_emb_layer, nclasses, use_cuda)
logging.info(str(model))
if use_cuda:
model = model.cuda()
need_grad = (lambda x: x.requires_grad)
if (opt.optimizer.lower() == 'adam'):
optimizer = optim.Adam(filter(need_grad, model.parameters()), lr=opt.lr)
elif (opt.optimizer.lower() == 'sgd'):
optimizer = optim.SGD(filter(need_grad, model.parameters()), lr=opt.lr)
elif (opt.optimizer.lower() == 'adagrad'):
optimizer = optim.Adagrad(filter(need_grad, model.parameters()), lr=opt.lr)
else:
raise ValueError('Unknown optimizer {}'.format(opt.optimizer.lower()))
try:
os.makedirs(opt.model)
except OSError as exception:
if (exception.errno != errno.EEXIST):
raise
if (config['token_embedder']['char_dim'] > 0):
with codecs.open(os.path.join(opt.model, 'char.dic'), 'w', encoding='utf-8') as fpo:
for (ch, i) in char_emb_layer.word2id.items():
print('{0}\t{1}'.format(ch, i), file=fpo)
with codecs.open(os.path.join(opt.model, 'word.dic'), 'w', encoding='utf-8') as fpo:
for (w, i) in word_lexicon.items():
print('{0}\t{1}'.format(w, i), file=fpo)
json.dump(vars(opt), codecs.open(os.path.join(opt.model, 'config_rnn.json'), 'w', encoding='utf-8'))
best_train = .0
best_valid = .0
test_result = .0
for epoch in range(opt.max_epoch):
(best_train, best_valid, test_result) = train_model(epoch, opt, model, optimizer, train, valid, test, best_train, best_valid, test_result)
if (opt.lr_decay > 0):
optimizer.param_groups[0]['lr'] *= opt.lr_decay
if (valid_data is None):
logging.info('best train ppl: {:.6f}.'.format(best_train))
elif (test_data is None):
logging.info('best train ppl: {:.6f}, best valid ppl: {:.6f}.'.format(best_train, best_valid))
else:
logging.info('best train ppl: {:.6f}, best valid ppl: {:.6f}, test ppl: {:.6f}.'.format(best_train, best_valid, test_result)) |
.parametrize('name,path,extras,constraint,expected', [('my-package', SAMPLE_PROJECT, None, None, f'my-package (*) {SAMPLE_PROJECT.as_uri()}'), ('my-package', SAMPLE_PROJECT, ['db'], '1.2', f'my-package[db] (1.2) {SAMPLE_PROJECT.as_uri()}')])
def test_directory_dependency_string_representation(name: str, path: Path, extras: (list[str] | None), constraint: (str | None), expected: str) -> None:
dependency = DirectoryDependency(name=name, path=path, extras=extras)
if constraint:
dependency.constraint = constraint
assert (str(dependency) == expected) |
def cpu_count():
if (sys.platform == 'win32'):
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif (('bsd' in sys.platform) or (sys.platform == 'darwin')):
comm = '/sbin/sysctl -n hw.ncpu'
if (sys.platform == 'darwin'):
comm = ('/usr' + comm)
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if (num >= 1):
return num
else:
raise NotImplementedError('cannot determine number of cpus') |
class ReLuBlurBlock(nn.Module):
def __init__(self, in_filters, temp=6.0, sfilter=(1, 1), pad_mode='constant', **kwargs):
super(ReLuBlurBlock, self).__init__()
self.temp = temp
self.blur = layers.blur(in_filters, sfilter=sfilter, pad_mode=pad_mode)
def forward(self, x):
x = torch.clamp(x, 0.0, self.temp)
x = self.blur(x)
return x
def extra_repr(self):
return ('thr=%.3e' % self.thr) |
class FindEntryServerTestCase(unittest.TestCase):
def setUp(self):
self.server = server.Server()
self.pocket = '0'
self.entry_id = self.server.run('add', name='Hiking boots', value=(- 111.11), pocket=self.pocket)['id']
def test_get_pocket(self):
pocket = self.server._get_pocket(self.pocket)
self.assertEqual(pocket.name, self.pocket)
another_pocket = 'foo'
pocket = self.server._get_pocket(another_pocket)
self.assertEqual(pocket.name, another_pocket)
pocket = self.server._get_pocket(None)
self.assertEqual(pocket.name, DEFAULT_POCKET_NAME)
def test_get_nonexisting_entry(self):
response = self.server.run('get', pocket=self.pocket, eid=(- 1))
error = response['error']
self.assertIsInstance(error, exceptions.PocketEntryNotFound)
self.assertEqual(str(error), 'Entry not found.')
def test_invalid_update(self):
response = self.server.run('update', pocket=self.pocket, value='one', eid=1)
error = response['error']
self.assertIsInstance(error, exceptions.PocketValidationFailure)
self.assertEqual(str(error), 'Invalid input data:\nvalue: Not a valid number.')
def test_query_and_reset_response(self):
category = entries.CategoryEntry.DEFAULT_NAME
response = self.server.run('list', pocket=self.pocket, filters={'category': category})
self.assertGreater(len(response), 0)
self.assertIsInstance(response, dict)
self.assertIsInstance(response['elements'], dict)
self.assertIsInstance(response['elements'][DEFAULT_TABLE], dict)
self.assertIsInstance(response['elements'][RECURRENT_TABLE], dict)
def test_response_is_none(self):
response = self.server.run('get', pocket=self.pocket, eid=self.entry_id)
self.assertIn('element', response)
self.assertEqual(response['element'].doc_id, self.entry_id)
response = self.server.run('remove', pocket=self.pocket, eid=self.entry_id)
self.assertEqual(response['id'], self.entry_id)
response = self.server.run('list', pocket=self.pocket, filters={'name': 'Hiking boots', 'category': entries.CategoryEntry.DEFAULT_NAME})
self.assertDictEqual(response['elements'][DEFAULT_TABLE], {})
self.assertDictEqual(response['elements'][RECURRENT_TABLE], {})
def test_update(self):
new_category = 'Outdoorsy shit'
self.server.run('update', eid=self.entry_id, pocket=self.pocket, category=new_category)
element = self.server.run('get', eid=self.entry_id, pocket=self.pocket)['element']
self.assertEqual(element['category'], new_category.lower())
def test_copy(self):
destination_pocket = '1'
response = self.server.run('copy', source_pocket=self.pocket, destination_pocket=destination_pocket, eid=self.entry_id)
copied_entry_id = response['id']
self.assertEqual(copied_entry_id, 1)
source_entry = self.server.run('get', pocket=self.pocket, eid=self.entry_id)['element']
destination_entry = self.server.run('get', pocket=destination_pocket, eid=copied_entry_id)['element']
self.assertDictEqual(source_entry, destination_entry)
def test_unsuccessful_copy(self):
self.assertRaises(exceptions.PocketEntryNotFound, self.server._copy_entry, source_pocket=self.pocket, destination_pocket='1', eid=42) |
_sample_fn.register(ptr.RandIntRV)
_sample_fn.register(ptr.IntegersRV)
_sample_fn.register(ptr.UniformRV)
def jax_sample_fn_uniform(op):
name = op.name
if isinstance(op, ptr.IntegersRV):
name = 'randint'
jax_op = getattr(jax.random, name)
def sample_fn(rng, size, dtype, *parameters):
rng_key = rng['jax_state']
(rng_key, sampling_key) = jax.random.split(rng_key, 2)
(minval, maxval) = parameters
sample = jax_op(sampling_key, shape=size, dtype=dtype, minval=minval, maxval=maxval)
rng['jax_state'] = rng_key
return (rng, sample)
return sample_fn |
def get_tfds(train_file: str, eval_file: str, test_file: str, tokenizer: PreTrainedTokenizer, label_column_id: int, max_seq_length: Optional[int]=None):
files = {}
if (train_file is not None):
files[datasets.Split.TRAIN] = [train_file]
if (eval_file is not None):
files[datasets.Split.VALIDATION] = [eval_file]
if (test_file is not None):
files[datasets.Split.TEST] = [test_file]
ds = datasets.load_dataset('csv', data_files=files)
features_name = list(ds[list(files.keys())[0]].features.keys())
label_name = features_name.pop(label_column_id)
label_list = list(set(ds[list(files.keys())[0]][label_name]))
label2id = {label: i for (i, label) in enumerate(label_list)}
input_names = tokenizer.model_input_names
transformed_ds = {}
if (len(features_name) == 1):
for k in files.keys():
transformed_ds[k] = ds[k].map((lambda example: tokenizer.batch_encode_plus(example[features_name[0]], truncation=True, max_length=max_seq_length, padding='max_length')), batched=True)
elif (len(features_name) == 2):
for k in files.keys():
transformed_ds[k] = ds[k].map((lambda example: tokenizer.batch_encode_plus((example[features_name[0]], example[features_name[1]]), truncation=True, max_length=max_seq_length, padding='max_length')), batched=True)
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
d = {k: v for (k, v) in ex.items() if (k in input_names)}
label = label2id[ex[label_name]]
(yield (d, label))
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
d = {k: v for (k, v) in ex.items() if (k in input_names)}
label = label2id[ex[label_name]]
(yield (d, label))
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
d = {k: v for (k, v) in ex.items() if (k in input_names)}
label = label2id[ex[label_name]]
(yield (d, label))
train_ds = (tf.data.Dataset.from_generator(gen_train, ({k: tf.int32 for k in input_names}, tf.int64), ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([]))) if (datasets.Split.TRAIN in transformed_ds) else None)
if (train_ds is not None):
train_ds = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN])))
val_ds = (tf.data.Dataset.from_generator(gen_val, ({k: tf.int32 for k in input_names}, tf.int64), ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([]))) if (datasets.Split.VALIDATION in transformed_ds) else None)
if (val_ds is not None):
val_ds = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION])))
test_ds = (tf.data.Dataset.from_generator(gen_test, ({k: tf.int32 for k in input_names}, tf.int64), ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([]))) if (datasets.Split.TEST in transformed_ds) else None)
if (test_ds is not None):
test_ds = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST])))
return (train_ds, val_ds, test_ds, label2id) |
class BaseOneDSpectrum(LowerDimensionalObject, MaskableArrayMixinClass, SpectralAxisMixinClass):
def __new__(cls, value, unit=None, dtype=None, copy=True, wcs=None, meta=None, mask=None, header=None, spectral_unit=None, fill_value=np.nan, wcs_tolerance=0.0):
if (np.asarray(value).ndim != 1):
raise ValueError('value should be a 1-d array')
if ((wcs is not None) and (wcs.wcs.naxis != 1)):
raise ValueError('wcs should have one dimension')
self = u.Quantity.__new__(cls, value, unit=unit, dtype=dtype, copy=copy).view(cls)
self._wcs = wcs
self._meta = ({} if (meta is None) else meta)
self._wcs_tolerance = wcs_tolerance
self._initial_set_mask(mask)
self._fill_value = fill_value
if (header is not None):
self._header = header
else:
self._header = Header()
self._spectral_unit = spectral_unit
if (spectral_unit is None):
if ('CUNIT1' in self._header):
self._spectral_unit = u.Unit(self._header['CUNIT1'])
elif (self._wcs is not None):
self._spectral_unit = u.Unit(self._wcs.wcs.cunit[0])
return self
def __repr__(self):
prefixstr = (('<' + self.__class__.__name__) + ' ')
arrstr = np.array2string(self.filled_data[:].value, separator=',', prefix=prefixstr)
return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
def from_hdu(hdu, ext=0):
if isinstance(hdu, HDUList):
hdul = hdu
hdu = hdul[ext]
else:
hdul = HDUList([hdu])
if (not (len(hdu.data.shape) == 1)):
raise ValueError('HDU must contain one-dimensional data.')
meta = {}
mywcs = wcs.WCS(hdu.header)
if ('BUNIT' in hdu.header):
unit = cube_utils.convert_bunit(hdu.header['BUNIT'])
meta['BUNIT'] = hdu.header['BUNIT']
else:
unit = None
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FITSWarning)
beam = cube_utils.try_load_beams(hdul)
try:
beams = beam
_ = len(beams)
except TypeError:
beams = None
if (beams is not None):
self = VaryingResolutionOneDSpectrum(hdu.data, unit=unit, wcs=mywcs, meta=meta, header=hdu.header, beams=beams)
else:
beam = cube_utils.try_load_beam(hdu.header)
self = OneDSpectrum(hdu.data, unit=unit, wcs=mywcs, meta=meta, header=hdu.header, beam=beam)
return self
def header(self):
header = super(BaseOneDSpectrum, self).header
if (('CUNIT1' in header) and (self._spectral_unit != u.Unit(header['CUNIT1']))):
spectral_scale = spectral_axis.wcs_unit_scale(self._spectral_unit)
header['CDELT1'] *= spectral_scale
header['CRVAL1'] *= spectral_scale
header['CUNIT1'] = self.spectral_axis.unit.to_string(format='FITS')
return header
def spectral_axis(self):
if (self._wcs is None):
spec_axis = (np.arange(self.size) * u.one)
else:
spec_axis = (self.wcs.wcs_pix2world(np.arange(self.size), 0)[0] * u.Unit(self.wcs.wcs.cunit[0]))
if (self._spectral_unit is not None):
spec_axis = spec_axis.to(self._spectral_unit)
return spec_axis
def quicklook(self, filename=None, drawstyle='steps-mid', **kwargs):
from matplotlib import pyplot
ax = pyplot.gca()
ax.plot(self.spectral_axis, self.filled_data[:].value, drawstyle=drawstyle, **kwargs)
ax.set_xlabel(self.spectral_axis.unit.to_string(format='latex'))
ax.set_ylabel(self.unit)
if (filename is not None):
pyplot.gcf().savefig(filename)
def with_spectral_unit(self, unit, velocity_convention=None, rest_value=None):
(newwcs, newmeta) = self._new_spectral_wcs(unit, velocity_convention=velocity_convention, rest_value=rest_value)
newheader = self._nowcs_header.copy()
newheader.update(newwcs.to_header())
wcs_cunit = u.Unit(newheader['CUNIT1'])
newheader['CUNIT1'] = unit.to_string(format='FITS')
newheader['CDELT1'] *= wcs_cunit.to(unit)
if (self._mask is not None):
newmask = self._mask.with_spectral_unit(unit, velocity_convention=velocity_convention, rest_value=rest_value)
newmask._wcs = newwcs
else:
newmask = None
return self._new_spectrum_with(wcs=newwcs, spectral_unit=unit, mask=newmask, meta=newmeta, header=newheader)
def __getitem__(self, key, **kwargs):
try:
kwargs['beams'] = self.beams[key]
except (AttributeError, TypeError):
pass
new_qty = super(BaseOneDSpectrum, self).__getitem__(key)
if isinstance(key, slice):
new = self.__class__(value=new_qty.value, unit=new_qty.unit, copy=False, wcs=wcs_utils.slice_wcs(self._wcs, key, shape=self.shape), meta=self._meta, mask=(self._mask[key] if (self._mask is not nomask) else nomask), header=self._header, wcs_tolerance=self._wcs_tolerance, fill_value=self.fill_value, **kwargs)
return new
else:
if (self._mask is not nomask):
bad = self._mask.exclude()[key]
if isinstance(bad, da.Array):
bad = bad.compute()
new_qty[bad] = np.nan
return new_qty
def __getattribute__(self, attrname):
if (attrname in ('min', 'max', 'std', 'mean', 'sum', 'cumsum', 'nansum', 'ptp', 'var')):
return getattr(self.quantity, attrname)
else:
return super(BaseOneDSpectrum, self).__getattribute__(attrname)
def spectral_interpolate(self, spectral_grid, suppress_smooth_warning=False, fill_value=None):
assert (spectral_grid.ndim == 1)
inaxis = self.spectral_axis.to(spectral_grid.unit)
indiff = np.mean(np.diff(inaxis))
outdiff = np.mean(np.diff(spectral_grid))
if (outdiff < 0):
spectral_grid = spectral_grid[::(- 1)]
outdiff = np.mean(np.diff(spectral_grid))
outslice = slice(None, None, (- 1))
else:
outslice = slice(None, None, 1)
specslice = (slice(None) if (indiff >= 0) else slice(None, None, (- 1)))
inaxis = inaxis[specslice]
indiff = np.mean(np.diff(inaxis))
if ((indiff < 0) or (outdiff < 0)):
raise ValueError('impossible.')
assert np.all((np.diff(spectral_grid) > 0))
assert np.all((np.diff(inaxis) > 0))
np.testing.assert_allclose(np.diff(spectral_grid), outdiff, err_msg='Output grid must be linear')
if ((outdiff > (2 * indiff)) and (not suppress_smooth_warning)):
warnings.warn('Input grid has too small a spacing. The data should be smoothed prior to resampling.', SmoothingWarning)
newspec = np.empty([spectral_grid.size], dtype=self.dtype)
newmask = np.empty([spectral_grid.size], dtype='bool')
newspec[outslice] = np.interp(spectral_grid.value, inaxis.value, self.filled_data[specslice].value, left=fill_value, right=fill_value)
mask = self.mask.include()
if all(mask):
newmask = np.ones([spectral_grid.size], dtype='bool')
else:
interped = (np.interp(spectral_grid.value, inaxis.value, mask[specslice]) > 0)
newmask[outslice] = interped
newwcs = self.wcs.deepcopy()
newwcs.wcs.crpix[0] = 1
newwcs.wcs.crval[0] = (spectral_grid[0].value if (outslice.step > 0) else spectral_grid[(- 1)].value)
newwcs.wcs.cunit[0] = spectral_grid.unit.to_string(format='FITS')
newwcs.wcs.cdelt[0] = (outdiff.value if (outslice.step > 0) else (- outdiff.value))
newwcs.wcs.set()
newheader = self._nowcs_header.copy()
newheader.update(newwcs.to_header())
wcs_cunit = u.Unit(newheader['CUNIT1'])
newheader['CUNIT1'] = spectral_grid.unit.to_string(format='FITS')
newheader['CDELT1'] *= wcs_cunit.to(spectral_grid.unit)
newbmask = BooleanArrayMask(newmask, wcs=newwcs)
return self._new_spectrum_with(data=newspec, wcs=newwcs, mask=newbmask, header=newheader, spectral_unit=spectral_grid.unit)
def spectral_smooth(self, kernel, convolve=convolution.convolve, **kwargs):
newspec = convolve(self.value, kernel, normalize_kernel=True, **kwargs)
return self._new_spectrum_with(data=newspec)
def to(self, unit, equivalencies=[]):
return super(BaseOneDSpectrum, self).to(unit, equivalencies, freq=None)
def with_fill_value(self, fill_value):
return self._new_spectrum_with(fill_value=fill_value)
def _new_thing_with(self):
return self._new_spectrum_with
def _new_spectrum_with(self, data=None, wcs=None, mask=None, meta=None, fill_value=None, spectral_unit=None, unit=None, header=None, wcs_tolerance=None, **kwargs):
data = (self._data if (data is None) else data)
if ((unit is None) and hasattr(data, 'unit')):
if (data.unit != self.unit):
raise u.UnitsError("New data unit '{0}' does not match unit '{1}'. You can override this by specifying the `unit` keyword.".format(data.unit, self.unit))
unit = data.unit
elif (unit is None):
unit = self.unit
elif (unit is not None):
if (not isinstance(unit, u.Unit)):
unit = u.Unit(unit)
if hasattr(data, 'unit'):
if (u.Unit(unit) != data.unit):
raise u.UnitsError("The specified new cube unit '{0}' does not match the input unit '{1}'.".format(unit, data.unit))
else:
data = u.Quantity(data, unit=unit, copy=False)
wcs = (self._wcs if (wcs is None) else wcs)
mask = (self._mask if (mask is None) else mask)
if (meta is None):
meta = {}
meta.update(self._meta)
if (unit is not None):
meta['BUNIT'] = unit.to_string(format='FITS')
fill_value = (self._fill_value if (fill_value is None) else fill_value)
spectral_unit = (self._spectral_unit if (spectral_unit is None) else u.Unit(spectral_unit))
spectrum = self.__class__(value=data, wcs=wcs, mask=mask, meta=meta, unit=unit, fill_value=fill_value, header=(header or self._header), wcs_tolerance=(wcs_tolerance or self._wcs_tolerance), **kwargs)
spectrum._spectral_unit = spectral_unit
return spectrum |
def test_hover_move_event_not_in_handles(view, item):
view.scene.addItem(item)
item.setSelected(True)
event = MagicMock()
event.pos.return_value = QtCore.QPointF(50, 50)
with patch.object(item, 'bounding_rect_unselected', return_value=QtCore.QRectF(0, 0, 1000, 800)):
item.hoverMoveEvent(event)
assert (item.cursor() == Qt.CursorShape.ArrowCursor) |
def handle_options_and_args(_command, arguments, options):
for argument in arguments:
if isinstance(argument, dict):
_command = click.argument(list(argument.keys())[0], **handle_option_and_arg_data(list(argument.values())[0]))(_command)
else:
_command = click.argument(argument)(_command)
if isinstance(options, dict):
for (name, data) in options.items():
data = handle_option_and_arg_data(data)
_command = click.option(name, **data)(_command)
else:
for name in options:
_command = click.option(name)(_command)
return _command |
class ObjectiveFcn():
class Lagrange(FcnEnum):
CUSTOM = (PenaltyFunctionAbstract.Functions.custom,)
MINIMIZE_ANGULAR_MOMENTUM = (PenaltyFunctionAbstract.Functions.minimize_angular_momentum,)
MINIMIZE_COM_ACCELERATION = (PenaltyFunctionAbstract.Functions.minimize_com_acceleration,)
MINIMIZE_COM_POSITION = (PenaltyFunctionAbstract.Functions.minimize_com_position,)
MINIMIZE_COM_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_com_velocity,)
MINIMIZE_CONTACT_FORCES = (PenaltyFunctionAbstract.Functions.minimize_contact_forces,)
MINIMIZE_CONTROL = (PenaltyFunctionAbstract.Functions.minimize_controls,)
MINIMIZE_FATIGUE = (PenaltyFunctionAbstract.Functions.minimize_fatigue,)
MINIMIZE_LINEAR_MOMENTUM = (PenaltyFunctionAbstract.Functions.minimize_linear_momentum,)
MINIMIZE_MARKERS = (PenaltyFunctionAbstract.Functions.minimize_markers,)
MINIMIZE_MARKERS_ACCELERATION = (PenaltyFunctionAbstract.Functions.minimize_markers_acceleration,)
MINIMIZE_MARKERS_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_markers_velocity,)
MINIMIZE_POWER = (PenaltyFunctionAbstract.Functions.minimize_power,)
MINIMIZE_QDDOT = (PenaltyFunctionAbstract.Functions.minimize_qddot,)
MINIMIZE_SEGMENT_ROTATION = (PenaltyFunctionAbstract.Functions.minimize_segment_rotation,)
MINIMIZE_SEGMENT_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_segment_velocity,)
MINIMIZE_SOFT_CONTACT_FORCES = (PenaltyFunctionAbstract.Functions.minimize_soft_contact_forces,)
MINIMIZE_STATE = (PenaltyFunctionAbstract.Functions.minimize_states,)
MINIMIZE_TIME = (ObjectiveFunction.LagrangeFunction.Functions.minimize_time,)
PROPORTIONAL_CONTROL = (PenaltyFunctionAbstract.Functions.proportional_controls,)
PROPORTIONAL_STATE = (PenaltyFunctionAbstract.Functions.proportional_states,)
MINIMIZE_ALGEBRAIC_STATES = (PenaltyFunctionAbstract.Functions.minimize_algebraic_states,)
STOCHASTIC_MINIMIZE_EXPECTED_FEEDBACK_EFFORTS = (PenaltyFunctionAbstract.Functions.stochastic_minimize_expected_feedback_efforts,)
SUPERIMPOSE_MARKERS = (PenaltyFunctionAbstract.Functions.superimpose_markers,)
TRACK_CONTACT_FORCES = (PenaltyFunctionAbstract.Functions.minimize_contact_forces,)
TRACK_CONTROL = (PenaltyFunctionAbstract.Functions.minimize_controls,)
TRACK_MARKER_WITH_SEGMENT_AXIS = (PenaltyFunctionAbstract.Functions.track_marker_with_segment_axis,)
TRACK_MARKERS = (PenaltyFunctionAbstract.Functions.minimize_markers,)
TRACK_MARKERS_ACCELERATION = (PenaltyFunctionAbstract.Functions.minimize_markers_acceleration,)
TRACK_MARKERS_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_markers_velocity,)
TRACK_POWER = (PenaltyFunctionAbstract.Functions.minimize_power,)
TRACK_SEGMENT_WITH_CUSTOM_RT = (PenaltyFunctionAbstract.Functions.track_segment_with_custom_rt,)
TRACK_SOFT_CONTACT_FORCES = (PenaltyFunctionAbstract.Functions.minimize_soft_contact_forces,)
TRACK_STATE = (PenaltyFunctionAbstract.Functions.minimize_states,)
TRACK_VECTOR_ORIENTATIONS_FROM_MARKERS = (PenaltyFunctionAbstract.Functions.track_vector_orientations_from_markers,)
def get_type() -> Callable:
return ObjectiveFunction.LagrangeFunction
class Mayer(FcnEnum):
STATE_CONTINUITY = (PenaltyFunctionAbstract.Functions.state_continuity,)
CUSTOM = (PenaltyFunctionAbstract.Functions.custom,)
MINIMIZE_ANGULAR_MOMENTUM = (PenaltyFunctionAbstract.Functions.minimize_angular_momentum,)
MINIMIZE_COM_ACCELERATION = (PenaltyFunctionAbstract.Functions.minimize_com_acceleration,)
MINIMIZE_COM_POSITION = (PenaltyFunctionAbstract.Functions.minimize_com_position,)
MINIMIZE_COM_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_com_velocity,)
MINIMIZE_CONTROL = (PenaltyFunctionAbstract.Functions.minimize_controls,)
MINIMIZE_FATIGUE = (PenaltyFunctionAbstract.Functions.minimize_fatigue,)
MINIMIZE_LINEAR_MOMENTUM = (PenaltyFunctionAbstract.Functions.minimize_linear_momentum,)
MINIMIZE_MARKERS = (PenaltyFunctionAbstract.Functions.minimize_markers,)
MINIMIZE_MARKERS_ACCELERATION = (PenaltyFunctionAbstract.Functions.minimize_markers_acceleration,)
MINIMIZE_MARKERS_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_markers_velocity,)
MINIMIZE_POWER = (PenaltyFunctionAbstract.Functions.minimize_power,)
MINIMIZE_PREDICTED_COM_HEIGHT = (PenaltyFunctionAbstract.Functions.minimize_predicted_com_height,)
MINIMIZE_QDDOT = (PenaltyFunctionAbstract.Functions.minimize_qddot,)
MINIMIZE_SEGMENT_ROTATION = (PenaltyFunctionAbstract.Functions.minimize_segment_rotation,)
MINIMIZE_SEGMENT_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_segment_velocity,)
MINIMIZE_STATE = (PenaltyFunctionAbstract.Functions.minimize_states,)
MINIMIZE_TIME = (ObjectiveFunction.MayerFunction.Functions.minimize_time,)
PROPORTIONAL_STATE = (PenaltyFunctionAbstract.Functions.proportional_states,)
MINIMIZE_ALGEBRAIC_STATE = (PenaltyFunctionAbstract.Functions.minimize_algebraic_states,)
SUPERIMPOSE_MARKERS = (PenaltyFunctionAbstract.Functions.superimpose_markers,)
SUPERIMPOSE_MARKERS_VELOCITY = (PenaltyFunctionAbstract.Functions.superimpose_markers_velocity,)
TRACK_MARKER_WITH_SEGMENT_AXIS = (PenaltyFunctionAbstract.Functions.track_marker_with_segment_axis,)
TRACK_MARKERS = (PenaltyFunctionAbstract.Functions.minimize_markers,)
TRACK_MARKERS_ACCELERATION = (PenaltyFunctionAbstract.Functions.minimize_markers_acceleration,)
TRACK_MARKERS_VELOCITY = (PenaltyFunctionAbstract.Functions.minimize_markers_velocity,)
TRACK_POWER = (PenaltyFunctionAbstract.Functions.minimize_power,)
TRACK_SEGMENT_WITH_CUSTOM_RT = (PenaltyFunctionAbstract.Functions.track_segment_with_custom_rt,)
TRACK_STATE = (PenaltyFunctionAbstract.Functions.minimize_states,)
TRACK_VECTOR_ORIENTATIONS_FROM_MARKERS = (PenaltyFunctionAbstract.Functions.track_vector_orientations_from_markers,)
def get_type() -> Callable:
return ObjectiveFunction.MayerFunction
class Multinode(FcnEnum):
CUSTOM = (PenaltyFunctionAbstract.Functions.custom,)
def get_type() -> Callable:
return ObjectiveFunction.MultinodeFunction
class Parameter(FcnEnum):
MINIMIZE_PARAMETER = (PenaltyFunctionAbstract.Functions.minimize_parameter,)
CUSTOM = (PenaltyFunctionAbstract.Functions.custom,)
def get_type() -> Callable:
return ObjectiveFunction.ParameterFunction |
.parametrize(['debugging_module', 'debugging_set_trace'], [('pdb', 'set_trace()'), pytest.param('ipdb', 'set_trace()', marks=pytest.mark.xfail(reason='waiting on to allow proper testing')), pytest.param('pydevd', 'settrace(port=4678)', marks=pytest.mark.xfail(reason='in need of way to setup pydevd server'))])
_spawn
def test_suppresses_timeout_when_debugger_is_entered(testdir, debugging_module, debugging_set_trace):
p1 = testdir.makepyfile('\n import pytest, {debugging_module}\n\n .timeout(1)\n def test_foo():\n {debugging_module}.{debugging_set_trace}\n '.format(debugging_module=debugging_module, debugging_set_trace=debugging_set_trace))
child = testdir.spawn_pytest(str(p1))
child.expect('test_foo')
time.sleep(0.2)
child.send('c\n')
child.sendeof()
result = child.read().decode().lower()
if child.isalive():
child.terminate(force=True)
assert ('timeout >1.0s' not in result)
assert ('fail' not in result) |
def imagenet_iterator(cfg, kv):
val_rec = os.path.join(cfg.dataset.data_dir, 'val_256_q95.rec')
if (cfg.dataset.aug_level == 1):
train_rec = os.path.join(cfg.dataset.data_dir, 'train_256_q95.rec')
else:
train_rec = os.path.join(cfg.dataset.data_dir, 'train_480_q95.rec')
train = mx.io.ImageRecordIter(path_imgrec=train_rec, label_width=1, data_name='data', label_name='softmax_label', data_shape=(3, 224, 224), batch_size=cfg.batch_size, pad=0, fill_value=127, rand_crop=(True if (cfg.dataset.aug_level > 0) else False), max_random_scale=1.0, min_random_scale=(1.0 if (cfg.dataset.aug_level <= 1) else 0.533), max_aspect_ratio=(0 if (cfg.dataset.aug_level <= 1) else 0.25), random_h=(0 if (cfg.dataset.aug_level <= 1) else 36), random_s=(0 if (cfg.dataset.aug_level <= 1) else 50), random_l=(0 if (cfg.dataset.aug_level <= 1) else 50), max_rotate_angle=(0 if (cfg.dataset.aug_level <= 2) else 10), max_shear_ratio=(0 if (cfg.dataset.aug_level <= 2) else 0.1), rand_mirror=(True if (cfg.dataset.aug_level > 0) else False), shuffle=(True if (cfg.dataset.aug_level >= 0) else False), num_parts=kv.num_workers, part_index=kv.rank)
val = mx.io.ImageRecordIter(path_imgrec=val_rec, label_width=1, data_name='data', label_name='softmax_label', batch_size=cfg.batch_size, data_shape=(3, 224, 224), rand_crop=False, rand_mirror=False, num_parts=kv.num_workers, part_index=kv.rank)
return (train, val) |
def test_json_package_page() -> None:
s = SimpleAPI(Storage(), SimpleFormat.JSON, [], SimpleDigest.SHA256, False, None)
p = Package('69')
p._metadata = SIXTYNINE_METADATA
assert (EXPECTED_SIMPLE_SIXTYNINE_JSON_1_1 == s.generate_json_simple_page(p))
assert (EXPECTED_SIMPLE_SIXTYNINE_JSON_PRETTY_1_1 == s.generate_json_simple_page(p, pretty=True)) |
def test_config_file_errors_html(errors):
html = errors.to_html()
assert (textwrap.dedent(html) == textwrap.dedent('\n Errors occurred while reading config.py:\n\n <ul>\n\n <li>\n <b>Error text 1</b>: Exception 1\n\n </li>\n\n <li>\n <b>Error text 2</b>: Exception 2\n\n <pre>\nFake traceback\n </pre>\n\n </li>\n\n </ul>\n '))
assert ('<pre>\nFake traceback\n' in html) |
def validate_(args, device_id, pt, step):
device = ('cpu' if (args.visible_gpus == '-1') else 'cuda')
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info(('Loading checkpoint from %s' % test_from))
checkpoint = torch.load(test_from, map_location=(lambda storage, loc: storage))
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
tokenizer = BertTokenizer.from_pretrained(args.bert_dir)
model = Summarizer(args, device, tokenizer.vocab, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'val', shuffle=False), args.test_batch_size, args.test_batch_ex_size, device, shuffle=False, is_test=True)
predictor = build_predictor(args, tokenizer, model, logger)
rouge = predictor.validate(valid_iter, step)
return rouge |
_default_category('Basic Completion')
class BasicCompletionCommandSet(CommandSet):
food_item_strs = ['Pizza', 'Ham', 'Ham Sandwich', 'Potato']
sport_item_strs = ['Bat', 'Basket', 'Basketball', 'Football', 'Space Ball']
file_strs = ['/home/user/file.db', '/home/user/file space.db', '/home/user/another.db', '/home/other user/maps.db', '/home/other user/tests.db']
def do_flag_based(self, cmd: Cmd, statement: Statement):
self._cmd.poutput('Args: {}'.format(statement.args))
def complete_flag_based(self, cmd: Cmd, text: str, line: str, begidx: int, endidx: int) -> List[str]:
flag_dict = {'-f': self.food_item_strs, '--food': self.food_item_strs, '-s': self.sport_item_strs, '--sport': self.sport_item_strs, '-p': cmd.path_complete, '--path': cmd.path_complete}
return cmd.flag_based_complete(text, line, begidx, endidx, flag_dict=flag_dict)
def do_index_based(self, cmd: Cmd, statement: Statement):
self._cmd.poutput('Args: {}'.format(statement.args))
def complete_index_based(self, cmd: Cmd, text: str, line: str, begidx: int, endidx: int) -> List[str]:
index_dict = {1: self.food_item_strs, 2: self.sport_item_strs, 3: cmd.path_complete}
return cmd.index_based_complete(text, line, begidx, endidx, index_dict=index_dict)
def do_delimiter_complete(self, cmd: Cmd, statement: Statement):
self._cmd.poutput('Args: {}'.format(statement.args))
def complete_delimiter_complete(self, cmd: Cmd, text: str, line: str, begidx: int, endidx: int) -> List[str]:
return cmd.delimiter_complete(text, line, begidx, endidx, match_against=self.file_strs, delimiter='/')
def do_raise_error(self, cmd: Cmd, statement: Statement):
self._cmd.poutput('Args: {}'.format(statement.args))
def complete_raise_error(self, cmd: Cmd, text: str, line: str, begidx: int, endidx: int) -> List[str]:
raise CompletionError('This is how a CompletionError behaves')
_category('Not Basic Completion')
def do_custom_category(self, cmd: Cmd, statement: Statement):
self._cmd.poutput('Demonstrates a command that bypasses the default category') |
.django_db
def test_send_speaker_voucher_email(speaker_voucher_factory):
speaker_voucher = speaker_voucher_factory(voucher_code='ABC123', pretix_voucher_id=2)
with patch('domain_events.publisher.publish_message') as mock_publish:
send_speaker_voucher_email(speaker_voucher)
mock_publish.assert_called_once_with('SpeakerVoucherEmailSent', body={'speaker_voucher_id': speaker_voucher.id}, deduplication_id=ANY) |
class Stem(nn.Module):
def __init__(self, in_chs: int, out_chs: int, kernel_size: int=3, act_layer: str='gelu', norm_layer: str='batchnorm2d', norm_eps: float=1e-05):
super().__init__()
if (not isinstance(out_chs, (list, tuple))):
out_chs = to_2tuple(out_chs)
norm_act_layer = partial(get_norm_act_layer(norm_layer, act_layer), eps=norm_eps)
self.out_chs = out_chs[(- 1)]
self.stride = 2
self.conv1 = create_conv2d(in_chs, out_chs[0], kernel_size, stride=2)
self.norm1 = norm_act_layer(out_chs[0])
self.conv2 = create_conv2d(out_chs[0], out_chs[1], kernel_size, stride=1)
def init_weights(self, scheme=''):
named_apply(partial(_init_conv, scheme=scheme), self)
def forward(self, x):
x = self.conv1(x)
x = self.norm1(x)
x = self.conv2(x)
return x |
def sa_logp_target(target: float) -> GoalDirectedBenchmark:
specification = uniform_specification(1, 10, 100)
benchmark_object = logP_benchmark(target)
sa_biased = ScoringFunctionSAWrapper(benchmark_object.objective, SAScoreModifier())
return GoalDirectedBenchmark(name='SA_logP_target', objective=sa_biased, contribution_specification=specification) |
def _tempfile(reader, suffix='', *, _os_remove=os.remove):
(fd, raw_path) = tempfile.mkstemp(suffix=suffix)
try:
try:
os.write(fd, reader())
finally:
os.close(fd)
del reader
(yield pathlib.Path(raw_path))
finally:
try:
_os_remove(raw_path)
except FileNotFoundError:
pass |
def get_model(name, num_classes=10, stem=False, verbose=True, **block_kwargs):
if (name in ['alexnet_dnn', 'alexnet']):
model = alexnet.dnn(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['alexnet_mcdo']):
model = alexnet.mcdo(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['alexnet_dnn_smoothing']):
model = alexnet.dnn_smooth(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['alexnet_mcdo_smoothing']):
model = alexnet.mcdo_smooth(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['vgg_dnn_11', 'vgg_11']):
model = vgg.dnn_11(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_mcdo_11']):
model = vgg.mcdo_11(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_dnn_smoothing_11']):
model = vgg.dnn_smooth_11(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_mcdo_smoothing_11']):
model = vgg.mcdo_smooth_11(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_dnn_13', 'vgg_13']):
model = vgg.dnn_13(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_mcdo_13']):
model = vgg.mcdo_13(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_dnn_smoothing_13']):
model = vgg.dnn_smooth_13(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_mcdo_smoothing_13']):
model = vgg.mcdo_smooth_13(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_dnn_16', 'vgg_16']):
model = vgg.dnn_16(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_mcdo_16']):
model = vgg.mcdo_16(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_dnn_smoothing_16']):
model = vgg.dnn_smooth_16(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_mcdo_smoothing_16']):
model = vgg.mcdo_smooth_16(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_dnn_19', 'vgg_19']):
model = vgg.dnn_19(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_mcdo_19']):
model = vgg.mcdo_19(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_dnn_smoothing_19']):
model = vgg.dnn_smooth_19(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vgg_mcdo_smoothing_19']):
model = vgg.mcdo_smooth_19(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_dnn_11', 'prevgg_11']):
model = prevgg.dnn_11(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_mcdo_11']):
model = prevgg.mcdo_11(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_dnn_smoothing_11']):
model = prevgg.dnn_smooth_11(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_mcdo_smoothing_11']):
model = prevgg.mcdo_smooth_11(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_dnn_13', 'prevgg_13']):
model = prevgg.dnn_13(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_mcdo_13']):
model = prevgg.mcdo_13(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_dnn_smoothing_13']):
model = prevgg.dnn_smooth_13(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_mcdo_smoothing_13']):
model = prevgg.mcdo_smooth_13(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_dnn_16', 'prevgg_16']):
model = prevgg.dnn_16(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_mcdo_16']):
model = prevgg.mcdo_16(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_dnn_smoothing_16']):
model = prevgg.dnn_smooth_16(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_mcdo_smoothing_16']):
model = prevgg.mcdo_smooth_16(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_dnn_19', 'prevgg_19']):
model = prevgg.dnn_19(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_mcdo_19']):
model = prevgg.mcdo_19(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_dnn_smoothing_19']):
model = prevgg.dnn_smooth_19(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['prevgg_mcdo_smoothing_19']):
model = prevgg.mcdo_smooth_19(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['resnet_dnn_18', 'resnet_18']):
model = resnet.dnn_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_18']):
model = resnet.mcdo_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_dnn_smoothing_18']):
model = resnet.dnn_smooth_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_smoothing_18']):
model = resnet.mcdo_smooth_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_dnn_34', 'resnet_34']):
model = resnet.dnn_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_34']):
model = resnet.mcdo_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_dnn_smoothing_34']):
model = resnet.dnn_smooth_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_smoothing_34']):
model = resnet.mcdo_smooth_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_dnn_50', 'resnet_50']):
model = resnet.dnn_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_50']):
model = resnet.mcdo_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_dnn_smoothing_50']):
model = resnet.dnn_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_smoothing_50']):
model = resnet.mcdo_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_dnn_101', 'resnet_101']):
model = resnet.dnn_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_101']):
model = resnet.mcdo_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_dnn_smoothing_101']):
model = resnet.dnn_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_smoothing_101']):
model = resnet.mcdo_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_dnn_152', 'resnet_152']):
model = resnet.dnn_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_152']):
model = resnet.mcdo_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_dnn_smoothing_152']):
model = resnet.dnn_smooth_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnet_mcdo_smoothing_152']):
model = resnet.mcdo_smooth_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_18', 'preresnet_18']):
model = preresnet.dnn_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_18']):
model = preresnet.mcdo_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_smoothing_18']):
model = preresnet.dnn_smooth_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_smoothing_18']):
model = preresnet.mcdo_smooth_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_34', 'preresnet_34']):
model = preresnet.dnn_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_34']):
model = preresnet.mcdo_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_smoothing_34']):
model = preresnet.dnn_smooth_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_smoothing_34']):
model = preresnet.mcdo_smooth_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_50', 'preresnet_50']):
model = preresnet.dnn_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_50']):
model = preresnet.mcdo_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_smoothing_50']):
model = preresnet.dnn_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_smoothing_50']):
model = preresnet.mcdo_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_101', 'preresnet_101']):
model = preresnet.dnn_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_101']):
model = preresnet.mcdo_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_smoothing_101']):
model = preresnet.dnn_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_smoothing_101']):
model = preresnet.mcdo_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_152', 'preresnet_152']):
model = preresnet.dnn_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_152']):
model = preresnet.mcdo_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_dnn_smoothing_152']):
model = preresnet.dnn_smooth_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['preresnet_mcdo_smoothing_152']):
model = preresnet.mcdo_smooth_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnext_dnn_50', 'resnext_50']):
model = resnext.dnn_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnext_mcdo_50']):
model = resnext.mcdo_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnext_dnn_smoothing_50']):
model = resnext.dnn_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnext_mcdo_smoothing_50']):
model = resnext.mcdo_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnext_dnn_101', 'resnext_101']):
model = resnext.dnn_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnext_mcdo_101']):
model = resnext.mcdo_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnext_dnn_smoothing_101']):
model = resnext.dnn_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['resnext_mcdo_smoothing_101']):
model = resnext.mcdo_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['wideresnet_dnn_50', 'wideresnet_50']):
model = wideresnet.dnn_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['wideresnet_mcdo_50']):
model = wideresnet.mcdo_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['wideresnet_dnn_smoothing_50']):
model = wideresnet.dnn_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['wideresnet_mcdo_smoothing_50']):
model = wideresnet.mcdo_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['wideresnet_dnn_101', 'wideresnet_101']):
model = wideresnet.dnn_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['wideresnet_mcdo_101']):
model = wideresnet.mcdo_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['wideresnet_dnn_smoothing_101']):
model = wideresnet.dnn_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['wideresnet_mcdo_smoothing_101']):
model = wideresnet.mcdo_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_18', 'seresnet_18']):
model = seresnet.dnn_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_18']):
model = seresnet.mcdo_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_smoothing_18']):
model = seresnet.dnn_smooth_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_smoothing_18']):
model = seresnet.mcdo_smooth_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_34', 'seresnet_34']):
model = seresnet.dnn_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_34']):
model = seresnet.mcdo_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_smoothing_34']):
model = seresnet.dnn_smooth_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_smoothing_34']):
model = seresnet.mcdo_smooth_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_50', 'seresnet_50']):
model = seresnet.dnn_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_50']):
model = seresnet.mcdo_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_smoothing_50']):
model = seresnet.dnn_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_smoothing_50']):
model = seresnet.mcdo_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_101', 'seresnet_101']):
model = seresnet.dnn_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_101']):
model = seresnet.mcdo_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_smoothing_101']):
model = seresnet.dnn_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_smoothing_101']):
model = seresnet.mcdo_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_152', 'seresnet_152']):
model = seresnet.dnn_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_152']):
model = seresnet.mcdo_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_dnn_smoothing_152']):
model = seresnet.dnn_smooth_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['seresnet_mcdo_smoothing_152']):
model = seresnet.mcdo_smooth_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_18', 'cbamresnet_18']):
model = cbamresnet.dnn_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_18']):
model = cbamresnet.mcdo_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_smoothing_18']):
model = cbamresnet.dnn_smooth_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_smoothing_18']):
model = cbamresnet.mcdo_smooth_18(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_34', 'cbamresnet_34']):
model = cbamresnet.dnn_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_34']):
model = cbamresnet.mcdo_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_smoothing_34']):
model = cbamresnet.dnn_smooth_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_smoothing_34']):
model = cbamresnet.mcdo_smooth_34(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_50', 'cbamresnet_50']):
model = cbamresnet.dnn_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_50']):
model = cbamresnet.mcdo_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_smoothing_50']):
model = cbamresnet.dnn_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_smoothing_50']):
model = cbamresnet.mcdo_smooth_50(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_101', 'cbamresnet_101']):
model = cbamresnet.dnn_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_101']):
model = cbamresnet.mcdo_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_smoothing_101']):
model = cbamresnet.dnn_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_smoothing_101']):
model = cbamresnet.mcdo_smooth_101(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_152', 'cbamresnet_152']):
model = cbamresnet.dnn_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_152']):
model = cbamresnet.mcdo_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_dnn_smoothing_152']):
model = cbamresnet.dnn_smooth_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['cbamresnet_mcdo_smoothing_152']):
model = cbamresnet.mcdo_smooth_152(num_classes=num_classes, stem=stem, name=name, **block_kwargs)
elif (name in ['vit_ti']):
model = vit.tiny(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vit_s']):
model = vit.small(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vit_b']):
model = vit.base(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vit_l']):
model = vit.large(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['vit_h']):
model = vit.huge(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['pit_ti']):
model = pit.tiny(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['pit_xs']):
model = pit.xsmall(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['pit_s']):
model = pit.small(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['pit_b']):
model = pit.base(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['mixer_ti']):
model = mixer.tiny(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['mixer_s']):
model = mixer.small(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['mixer_b']):
model = mixer.base(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['mixer_l']):
model = mixer.large(num_classes=num_classes, name=name, **block_kwargs)
elif (name in ['mixer_h']):
model = mixer.huge(num_classes=num_classes, name=name, **block_kwargs)
else:
raise NotImplementedError
if (verbose and ('image_size' in block_kwargs)):
image_size = block_kwargs['image_size']
stats(model, torch.randn([3, 3, image_size, image_size]))
elif (verbose and stem):
stats(model, torch.randn([3, 3, 224, 224]))
elif verbose:
stats(model, torch.randn([3, 3, 32, 32]))
return model |
def weekly_check_color_results(val):
failures = ['Unverified Treatment', 'Partial Treatment', 'Treatment Overridden', 'New Field Delivered', 'Prescription Altered', 'Site Setup Altered']
failure_flag = 0
for failure in failures:
if (failure in set(val)):
failure_flag += 1
else:
failure_flag += 0
if (failure_flag == 0):
return (['background-color: #C1FFC1'] * len(val))
else:
return (['background-color: #EE6363'] * len(val)) |
class QuantizableBasicConv2d(BasicConv2d):
def __init__(self, *args, **kwargs):
super(QuantizableBasicConv2d, self).__init__(*args, **kwargs)
self.relu = nn.ReLU()
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
def fuse_model(self):
torch.quantization.fuse_modules(self, ['conv', 'bn', 'relu'], inplace=True) |
def pose_ren_net(net_type, iter_idx, output_dir, test_id=0):
dataset = 'msra'
n = caffe.NetSpec()
(fx_, fy_, ux_, uy_) = util.get_param(dataset)
point_num_ = util.get_joint_num(dataset)
root_folder_ = config.msra_data_dir
if (net_type == 'train'):
image_source_ = '{}/cache/train_image_{}_s{}.txt'.format(output_dir, test_id, iter_idx)
label_source_ = '{}/cache/train_label_{}_s{}.txt'.format(output_dir, test_id, iter_idx)
pose_data_param_train = dict(image_source=image_source_, label_source=label_source_, root_folder=root_folder_, batch_size=128, shuffle=True, new_height=96, new_width=96, point_num=(point_num_ * 2), point_dim=3, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.MSRA)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=0), transform_param=dict(is_trans=True, trans_dx=10, trans_dy=10, is_rotate=True, rotate_deg=180, is_zoom=True, zoom_scale=0.1), pose_data_param=pose_data_param_train, ntop=2)
(n.pose, n.prev_pose) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=(point_num_ * 3)), include=dict(phase=0), ntop=2)
first_layer = str(n.to_proto())
pose_data_param_test = dict(image_source='{}/cache/test_image_{}.txt'.format(output_dir, test_id), label_source='{}/cache/test_label_{}_s{}.txt'.format(output_dir, test_id, iter_idx), root_folder=root_folder_, batch_size=128, shuffle=False, new_height=96, new_width=96, point_num=(point_num_ * 2), point_dim=3, output_center=True, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.MSRA)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=1), transform_param=dict(is_trans=False, is_rotate=False, is_zoom=False), pose_data_param=pose_data_param_test, ntop=2)
(n.pose, n.prev_pose, n.center) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=[(point_num_ * 3), (point_num_ * 6)]), include=dict(phase=1), ntop=3)
elif (net_type == 'test-train'):
label_source_ = '{}/cache/train_label_{}_s{}_single.txt'.format(output_dir, test_id, iter_idx)
pose_data_param_test = dict(image_source='{}/cache/train_image_{}.txt'.format(output_dir, test_id), label_source=label_source_, root_folder=root_folder_, batch_size=128, shuffle=False, new_height=96, new_width=96, point_num=(point_num_ * 2), point_dim=3, output_center=True, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.MSRA)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=1), transform_param=dict(is_trans=False, is_rotate=False, is_zoom=False), pose_data_param=pose_data_param_test, ntop=2)
(n.pose, n.prev_pose, n.center) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=[(point_num_ * 3), (point_num_ * 6)]), include=dict(phase=1), ntop=3)
elif (net_type == 'test-test'):
label_source_ = '{}/cache/test_label_{}_s{}.txt'.format(output_dir, test_id, iter_idx)
pose_data_param_test = dict(image_source='{}/cache/test_image_{}.txt'.format(output_dir, test_id), label_source=label_source_, root_folder=root_folder_, batch_size=128, shuffle=False, new_height=96, new_width=96, point_num=(point_num_ * 2), point_dim=3, output_center=True, cube_length=150, fx=fx_, fy=fy_, dataset=P.PoseData.MSRA)
(n.data, n.label) = L.PoseData(name='data', include=dict(phase=1), transform_param=dict(is_trans=False, is_rotate=False, is_zoom=False), pose_data_param=pose_data_param_test, ntop=2)
(n.pose, n.prev_pose, n.center) = L.Slice(n.label, slice_param=dict(slice_dim=1, slice_point=[(point_num_ * 3), (point_num_ * 6)]), include=dict(phase=1), ntop=3)
(n.conv0, n.relu0) = conv_relu(n.data, 16)
n.conv1 = conv(n.relu0, 16)
n.pool1 = max_pool(n.conv1)
n.relu1 = L.ReLU(n.pool1, in_place=True)
(n.conv2_0, n.relu2_0) = conv_relu(n.pool1, 32, ks=1, pad=0)
(n.conv2, n.relu2) = conv_relu(n.relu2_0, 32)
n.conv3 = conv(n.relu2, 32)
n.res1 = L.Eltwise(n.conv2_0, n.conv3)
n.pool2 = max_pool(n.res1)
n.relu3 = L.ReLU(n.pool2, in_place=True)
(n.conv3_0, n.relu3_0) = conv_relu(n.relu3, 64, ks=1, pad=0)
(n.conv4, n.relu4) = conv_relu(n.relu3_0, 64)
n.conv5 = conv(n.relu4, 64)
n.res2 = L.Eltwise(n.conv3_0, n.conv5)
n.pool3 = max_pool(n.res2)
n.relu5 = L.ReLU(n.pool3, in_place=True)
for idx in xrange(point_num_):
if ((((idx - 1) % 4) == 0) or (((idx - 3) % 4) == 0)):
continue
rois = 'rois_{}'.format(idx)
n[rois] = L.Python(n.prev_pose, module='python_layers.py_generate_roi_layer', layer='PyGenerateROILayer', ntop=1, param_str=str(dict(joint_idx=idx, roi_h=6, roi_w=6, img_h=96, img_w=96, spatial_mul=8)))
roipool = 'roi_pool_{}'.format(idx)
n[roipool] = L.ROIPooling(n.pool3, n[rois], roi_pooling_param=dict(pooled_w=7, pooled_h=7, spatial_scale=0.125))
fc1 = 'fc1_{}'.format(idx)
relu6 = 'relu6_{}'.format(idx)
drop1 = 'drop1_{}'.format(idx)
(n[fc1], n[relu6], n[drop1]) = fc_relu_dropout(n[roipool], 2048, 0.5)
connect_structure_1 = [[0, 2, 4], [0, 6, 8], [0, 10, 12], [0, 14, 16], [0, 18, 20]]
concate_bottom_final = []
for idx in xrange(len(connect_structure_1)):
concate_bottom = []
for jdx in xrange(len(connect_structure_1[idx])):
drop1 = 'drop1_{}'.format(connect_structure_1[idx][jdx])
concate_bottom.append(n[drop1])
concate_1 = 'concate_1_{}'.format(idx)
n[concate_1] = L.Concat(*concate_bottom)
fc2 = 'fc2_{}'.format(idx)
relu7 = 'relu7_{}'.format(idx)
drop2 = 'drop2_{}'.format(idx)
(n[fc2], n[relu7], n[drop2]) = fc_relu_dropout(n[concate_1], 2048, 0.5)
concate_bottom_final.append(n[drop2])
n.fc_concat = L.Concat(*concate_bottom_final)
n.fc3_0 = fc(n.fc_concat, (point_num_ * 3))
if (net_type == 'train'):
n.loss = L.SmoothL1Loss(n.fc3_0, n.pose, smooth_l1_loss_param=dict(sigma=10), loss_weight=1)
n.distance = L.PoseDistance(n.fc3_0, n.pose, n.center, loss_weight=0, pose_distance_param=dict(cube_length=150, fx=fx_, fy=fy_, ux=ux_, uy=uy_), include=dict(phase=1))
return (first_layer + str(n.to_proto()))
else:
(n.error, n.output) = L.PoseDistance(n.fc3_0, n.pose, n.center, pose_distance_param=dict(cube_length=150, fx=fx_, fy=fy_, ux=ux_, uy=uy_, output_pose=True), include=dict(phase=1), ntop=2)
return str(n.to_proto()) |
class Cnn14_DecisionLevelMax(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax, classes_num, interpolate_mode='nearest'):
super(Cnn14_DecisionLevelMax, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.interpolate_ratio = 32
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size, win_length=window_size, window=window, center=center, pad_mode=pad_mode, freeze_parameters=True)
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size, n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db, freeze_parameters=True)
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2, freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
self.interpolator = Interpolator(ratio=self.interpolate_ratio, interpolate_mode=interpolate_mode)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
x = self.spectrogram_extractor(input)
x = self.logmel_extractor(x)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
if (self.training and (mixup_lambda is not None)):
x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
x = (x1 + x2)
x = F.dropout(x, p=0.5, training=self.training)
x = x.transpose(1, 2)
x = F.relu_(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
segmentwise_output = torch.sigmoid(self.fc_audioset(x))
(clipwise_output, _) = torch.max(segmentwise_output, dim=1)
framewise_output = self.interpolator(segmentwise_output)
framewise_output = pad_framewise_output(framewise_output, frames_num)
output_dict = {'framewise_output': framewise_output, 'clipwise_output': clipwise_output}
return output_dict |
def test_simple_opt_vectors_search():
fixture_records = generate_fixtures(skip_vectors=True)
searcher = TestSimpleSearcher()
local_client = init_local()
init_client(local_client, fixture_records)
remote_client = init_remote()
init_client(remote_client, fixture_records)
compare_client_results(local_client, remote_client, searcher.simple_search_text)
compare_client_results(local_client, remote_client, searcher.simple_search_image)
compare_client_results(local_client, remote_client, searcher.simple_search_code)
compare_client_results(local_client, remote_client, searcher.simple_search_text_offset)
compare_client_results(local_client, remote_client, searcher.simple_search_text_with_vector)
compare_client_results(local_client, remote_client, searcher.search_score_threshold)
compare_client_results(local_client, remote_client, searcher.simple_search_text_select_payload)
compare_client_results(local_client, remote_client, searcher.simple_search_image_select_vector)
compare_client_results(local_client, remote_client, searcher.search_payload_exclude)
for i in range(100):
query_filter = one_random_filter_please()
try:
compare_client_results(local_client, remote_client, searcher.filter_search_text, query_filter=query_filter)
except AssertionError as e:
print(f'''
Failed with filter {query_filter}''')
raise e |
.parametrize('test_args, expected', [(['0'], '0'), (['100'], '100'), (['-100'], '-100'), (['1000'], '1.0 thousand'), (['12400'], '12.4 thousand'), (['12490'], '12.5 thousand'), (['1000000'], '1.0 million'), (['-1000000'], '-1.0 million'), (['1200000'], '1.2 million'), (['1290000'], '1.3 million'), ([''], '1.0 billion'), ([''], '1.0 billion'), (['-'], '-1.0 billion'), ([''], '2.0 billion'), ([''], '1.0 trillion'), ([''], '1.0 trillion'), ([''], '6.0 trillion'), (['-'], '-6.0 trillion'), ([''], '1.0 quadrillion'), ([''], '1.0 quadrillion'), ([''], '1.3 quadrillion'), (['-'], '-1.3 quadrillion'), ([''], '3.5 sextillion'), ([''], '8.1 decillion'), (['-'], '-8.1 decillion'), ([], '1000.0 decillion'), ([], '1100.0 decillion'), ([], '2100.0 decillion'), ([None], 'None'), (['1230000', '%0.2f'], '1.23 million'), ([(10 ** 101)], ('1' + ('0' * 101))), ([math.nan], 'NaN'), ([math.inf], '+Inf'), ([(- math.inf)], '-Inf'), (['nan'], 'NaN'), (['-inf'], '-Inf')])
def test_intword(test_args: list[str], expected: str) -> None:
assert (humanize.intword(*test_args) == expected) |
def test_insert_items(view):
view.scene.update_selection = MagicMock()
view.scene.max_z = 5
item1 = BeePixmapItem(QtGui.QImage())
view.scene.addItem(item1)
item2 = BeePixmapItem(QtGui.QImage())
item2.setPos(50, 40)
command = commands.InsertItems(view.scene, [item2])
command.redo()
assert (list(view.scene.items_for_save()) == [item1, item2])
assert (item1.isSelected() is False)
assert (item2.isSelected() is True)
assert (item2.pos() == QtCore.QPointF(50, 40))
(item2.zValue() > 5)
command.undo()
assert (list(view.scene.items_for_save()) == [item1])
assert (item1.isSelected() is False)
assert (item2.pos() == QtCore.QPointF(50, 40)) |
class Loss(pystiche.Module, ABC):
def __init__(self, *, encoder: Optional[enc.Encoder]=None, input_guide: Optional[torch.Tensor]=None, score_weight: float=1.0) -> None:
super().__init__()
self._encoder = encoder
self._input_guide: Optional[torch.Tensor]
self._input_enc_guide: Optional[torch.Tensor]
if input_guide:
self.set_input_guide(input_guide)
else:
for name in ('_input_guide', '_input_enc_guide'):
self.register_buffer(name, None, persistent=False)
self.score_weight = score_weight
def forward(self, input_image: torch.Tensor) -> Union[(torch.Tensor, LossDict)]:
pass
def encoder(self) -> Optional[enc.Encoder]:
return self._encoder
def input_guide(self) -> Optional[torch.Tensor]:
return self._input_guide
def set_input_guide(self, guide: torch.Tensor) -> None:
self.register_buffer('_input_guide', guide, persistent=False)
self.register_buffer('_input_enc_guide', (self.encoder.propagate_guide(guide) if self.encoder else guide), persistent=False)
def _named_losses(self) -> Iterator[Tuple[(str, 'Loss')]]:
for (name, child) in self.named_children():
if isinstance(child, Loss):
(yield (name, child))
def _losses(self) -> Iterator['Loss']:
for (_, loss) in self._named_losses():
(yield loss)
def _build_repr(self, name: Optional[str]=None, properties: Optional[Dict[(str, str)]]=None, named_children: Optional[Sequence[Tuple[(str, Any)]]]=None) -> str:
if (named_children is None):
named_children = [((name if (name != '_encoder') else 'encoder'), child) for (name, child) in self.named_children()]
return super()._build_repr(name=name, properties=properties, named_children=named_children)
def _properties(self) -> Dict[(str, Any)]:
dct = super()._properties()
if (not math.isclose(self.score_weight, 1.0)):
dct['score_weight'] = f'{self.score_weight:g}'
return dct |
def parse_args():
parser = argparse.ArgumentParser(description='PyTorch Training', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--msg', default=False, type=distutils.util.strtobool, help='display message')
parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')
parser.add_argument('--use-gpu', default=torch.cuda.is_available(), type=distutils.util.strtobool, help='Use GPU or not')
parser.add_argument('-j', '--num-workers', default=16, type=int, help='num of fetching threads')
parser.add_argument('--result-path', default='./results', help='result path')
parser.add_argument('--checkpoint-path', default='./checkpoints', help='checkpoint path')
parser.add_argument('--checkpoint-epoch', default=(- 1), type=int, help='epochs to save checkpoint ')
parser.add_argument('--print-freq', default=20, type=int, help='print freq')
parser.add_argument('--seed', default=0, type=int, help='random seed')
parser.add_argument('--optimizer', default='SGD', help='optimizer(SGD|Adam|AMSGrad)')
parser.add_argument('--lr', default=0.1, type=float, help='learning rate')
parser.add_argument('--lr-scheduler', default='cosine', help='learning rate scheduler(multistep|cosine)')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--wd', '--weight-decay', default=0.001, type=float, metavar='W', help='weight decay (default: 5e-4)', dest='weight_decay')
parser.add_argument('-b', '--batch-size', default=128, type=int, help='batch size')
parser.add_argument('--epochs', default=20, type=int, help='training epochs')
parser.add_argument('--milestone', default=0.4, type=float, help='milestone in multistep scheduler')
parser.add_argument('--multistep-gamma', default=0.1, type=float, help='the gamma parameter in multistep|plateau scheduler')
parser.add_argument('-a', '--arch', default='vgg11', help='architecture')
parser.add_argument('--dataset', default='cifar10', help='dataset(cifar10|cifar100|svhn|stl10|mnist)')
parser.add_argument('--init', default='kaiming_1', help='initialization method (casnet|xavier|kaiming_1||kaiming_2)')
parser.add_argument('--save-plot', default=True, type=distutils.util.strtobool, help='save plots with matplotlib')
parser.add_argument('--tensorboard', default=True, type=distutils.util.strtobool, help='use tensorboard')
parser.add_argument('--loss', default='CE', type=str, help='loss: CE/L2')
parser.add_argument('--method', default=3, type=int, help='method/model type')
parser.add_argument('--batchnorm', default=True, type=distutils.util.strtobool, help='turns on or off batch normalization')
parser.add_argument('--deconv', default=False, type=distutils.util.strtobool, help='use deconv')
parser.add_argument('--delinear', default=True, type=distutils.util.strtobool, help='use decorrelated linear')
parser.add_argument('--block-fc', '--num-groups-final', default=0, type=int, help='number of groups in the fully connected layers')
parser.add_argument('--block', '--num-groups', default=64, type=int, help='block size in deconv')
parser.add_argument('--deconv-iter', default=5, type=int, help='number of iters in deconv')
parser.add_argument('--eps', default=1e-05, type=float, help='for regularization')
parser.add_argument('--bias', default=True, type=distutils.util.strtobool, help='use bias term in deconv')
parser.add_argument('--stride', default=3, type=int, help='sampling stride in deconv')
parser.add_argument('--freeze', default=False, type=distutils.util.strtobool, help='freeze the deconv updates')
args = parser.parse_args()
return args |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.