code stringlengths 281 23.7M |
|---|
class TestRecordBatchTables(unittest.TestCase):
def setUp(self) -> None:
self.column_names = ['pk', 'sk']
def test_single_table_with_batches_and_remainder(self):
min_records_batch = 8
bt = RecordBatchTables(min_records_batch)
col1 = pa.array([i for i in range(10)])
col2 = pa.array((['test'] * 10))
test_table = pa.Table.from_arrays([col1, col2], names=self.column_names)
bt.append(test_table)
self.assertTrue(bt.has_batches())
self.assertEqual(bt.batched_record_count, 8)
self.assertTrue(_is_gte_batch_size_and_divisible(bt, min_records_batch))
self.assertTrue(bt.has_remaining())
self.assertEqual(bt.remaining_record_count, 2)
self.assertTrue(_is_sorted(bt, self.column_names[0]))
def test_single_table_with_no_remainder(self):
min_records_batch = 5
bt = RecordBatchTables(min_records_batch)
col1 = pa.array([i for i in range(min_records_batch)])
col2 = pa.array((['test'] * min_records_batch))
test_table = pa.Table.from_arrays([col1, col2], names=self.column_names)
bt.append(test_table)
self.assertFalse(bt.has_remaining())
self.assertTrue(_is_sorted(bt, self.column_names[0]))
def test_single_table_with_only_batches(self):
min_records_batch = 10
bt = RecordBatchTables(min_records_batch)
col1 = pa.array([i for i in range(min_records_batch)])
col2 = pa.array((['test'] * min_records_batch))
test_table = pa.Table.from_arrays([col1, col2], names=self.column_names)
bt.append(test_table)
self.assertTrue(bt.has_batches())
self.assertTrue(_is_gte_batch_size_and_divisible(bt, min_records_batch))
self.assertFalse(bt.has_remaining())
self.assertEqual(bt.batched_record_count, 10)
self.assertEqual(bt.remaining_record_count, 0)
self.assertTrue(_is_sorted(bt, self.column_names[0]))
def test_single_table_with_only_remainder(self):
min_records_batch = 11
bt = RecordBatchTables(min_records_batch)
col1 = pa.array([i for i in range(10)])
col2 = pa.array((['test'] * 10))
test_table = pa.Table.from_arrays([col1, col2], names=self.column_names)
bt.append(test_table)
self.assertFalse(bt.has_batches())
self.assertTrue(bt.has_remaining())
self.assertEqual(bt.batched_record_count, 0)
self.assertEqual(bt.remaining_record_count, 10)
self.assertTrue(_is_sorted(bt, self.column_names[0]))
def test_grouped_tables_with_only_remainder(self):
min_records_batch = 600
test_table_num_records = 100
grouped_tables = [pa.Table.from_arrays([pa.array([i for i in range((i * test_table_num_records), ((i + 1) * test_table_num_records))]), pa.array((['foo'] * test_table_num_records))], names=self.column_names) for i in range(5)]
bt = RecordBatchTables(min_records_batch)
for table in grouped_tables:
bt.append(table)
self.assertFalse(bt.has_batches())
self.assertTrue(bt.has_remaining())
self.assertEqual(bt.remaining_record_count, 500)
self.assertLess(bt.remaining_record_count, min_records_batch)
self.assertTrue(_is_sorted(bt, self.column_names[0]))
def test_grouped_tables_with_batches_and_remainder(self):
min_records_batch = 450
test_table_num_records = 100
grouped_tables = [pa.Table.from_arrays([pa.array([i for i in range((i * test_table_num_records), ((i + 1) * test_table_num_records))]), pa.array((['foo'] * 100))], names=self.column_names) for i in range(5)]
bt = RecordBatchTables(min_records_batch)
for table in grouped_tables:
bt.append(table)
self.assertTrue(bt.has_batches())
self.assertTrue(_is_gte_batch_size_and_divisible(bt, min_records_batch))
self.assertTrue(bt.has_remaining())
self.assertEqual(bt.batched_record_count, 450)
self.assertEqual(bt.remaining_record_count, 50)
self.assertTrue(((bt.batched_record_count % min_records_batch) == 0))
self.assertLess(bt.remaining_record_count, min_records_batch)
self.assertTrue(_is_sorted(bt, self.column_names[0]))
def test_grouped_tables_with_smaller_batch_size_than_table_records(self):
min_records_batch = 5
test_table_num_records = 39
grouped_tables = [pa.Table.from_arrays([pa.array([i for i in range((i * test_table_num_records), ((i + 1) * test_table_num_records))]), pa.array((['foo'] * test_table_num_records))], names=self.column_names) for i in range(3)]
bt = RecordBatchTables(min_records_batch)
for table in grouped_tables:
bt.append(table)
self.assertTrue(_is_sorted(bt, self.column_names[0]))
self.assertTrue(bt.has_batches())
self.assertTrue(_is_gte_batch_size_and_divisible(bt, min_records_batch))
self.assertEqual(bt.batched_record_count, 115)
self.assertTrue(((bt.batched_record_count % min_records_batch) == 0))
self.assertTrue(bt.has_remaining())
self.assertEqual(bt.remaining_record_count, 2)
self.assertLess(bt.remaining_record_count, min_records_batch)
self.assertTrue(_is_sorted(bt, self.column_names[0]))
def test_batched_tables_factory_from_input_tables(self):
min_records_batch = 5
test_table_num_records = 39
grouped_tables = [pa.Table.from_arrays([pa.array([i for i in range((i * test_table_num_records), ((i + 1) * test_table_num_records))]), pa.array((['foo'] * test_table_num_records))], names=self.column_names) for i in range(3)]
bt = RecordBatchTables.from_tables(grouped_tables, min_records_batch)
self.assertTrue(type(bt), RecordBatchTables)
self.assertTrue(bt.has_batches())
self.assertTrue(_is_gte_batch_size_and_divisible(bt, min_records_batch))
self.assertEqual(bt.batched_record_count, 115)
self.assertTrue(((bt.batched_record_count % min_records_batch) == 0))
self.assertTrue(bt.has_remaining())
self.assertEqual(bt.remaining_record_count, 2)
self.assertLess(bt.remaining_record_count, min_records_batch)
self.assertTrue(_is_sorted(bt, self.column_names[0]))
def test_clear(self):
min_records_batch = 8
bt = RecordBatchTables(min_records_batch)
col1 = pa.array([i for i in range(10)])
col2 = pa.array((['test'] * 10))
test_table = pa.Table.from_arrays([col1, col2], names=self.column_names)
bt.append(test_table)
self.assertTrue(bt.has_batches())
self.assertTrue(_is_gte_batch_size_and_divisible(bt, min_records_batch))
self.assertEqual(bt.batched_record_count, 8)
bt.clear_batches()
self.assertFalse(bt.has_batches())
self.assertEqual(bt.batched_record_count, 0)
def test_append_after_clear(self):
min_records_batch = 8
bt = RecordBatchTables(min_records_batch)
col1 = pa.array([i for i in range(10)])
col2 = pa.array((['test'] * 10))
test_table = pa.Table.from_arrays([col1, col2], names=self.column_names)
bt.append(test_table)
self.assertTrue(bt.has_batches())
self.assertTrue(_is_gte_batch_size_and_divisible(bt, min_records_batch))
self.assertEqual(bt.batched_record_count, 8)
prev_remainder_records = bt.remaining_record_count
self.assertEqual(bt.remaining_record_count, 2)
bt.clear_batches()
self.assertFalse(bt.has_batches())
self.assertEqual(bt.batched_record_count, 0)
col1 = pa.array([i for i in range(10, 20)])
col2 = pa.array((['test'] * 10))
test_table = pa.Table.from_arrays([col1, col2], names=self.column_names)
bt.append(test_table)
self.assertEqual(bt.batched_record_count, 8)
self.assertEqual(bt.remaining_record_count, 4)
self.assertNotEqual(prev_remainder_records, bt.remaining_record_count)
self.assertTrue(_is_sorted(bt, self.column_names[0]))
bt.clear_remaining()
self.assertFalse(bt.has_remaining())
self.assertTrue((bt.remaining_record_count == 0))
def test_evict(self):
min_records_batch = 8
bt = RecordBatchTables(min_records_batch)
col1 = pa.array([i for i in range(10)])
col2 = pa.array((['test'] * 10))
test_table = pa.Table.from_arrays([col1, col2], names=self.column_names)
bt.append(test_table)
self.assertTrue(bt.has_batches())
self.assertTrue(_is_gte_batch_size_and_divisible(bt, min_records_batch))
self.assertTrue(bt.has_remaining())
self.assertEqual(bt.batched_record_count, 8)
self.assertEqual(bt.remaining_record_count, 2)
prev_batched_records = bt.batched_record_count
evicted_tables = bt.evict()
self.assertFalse(bt.has_batches())
self.assertTrue(bt.has_remaining())
self.assertEqual(bt.batched_record_count, 0)
self.assertEqual(bt.remaining_record_count, 2)
self.assertEqual(sum([len(t) for t in evicted_tables]), prev_batched_records) |
.functions
def test_deconcatenate_column_string_no_sep(dataframe):
with pytest.raises(ValueError):
df_orig = dataframe.concatenate_columns(column_names=['a', 'decorated-elephant'], sep='-', new_column_name='index')
df = df_orig.deconcatenate_column(column_name='index', new_column_names=['A', 'B']) |
class Merge(nn.Module):
def __init__(self, block_spec, norm_cfg, alpha, filter_size_scale):
super(Merge, self).__init__()
out_channels = int((FILTER_SIZE_MAP[block_spec.level] * filter_size_scale))
if (block_spec.block_fn == Bottleneck):
out_channels *= 4
self.block = block_spec.block_fn
self.resample_ops = nn.ModuleList()
for spec_idx in block_spec.input_offsets:
spec = BlockSpec(*SPINENET_BLOCK_SPECS[spec_idx])
in_channels = int((FILTER_SIZE_MAP[spec.level] * filter_size_scale))
scale = (2 ** (spec.level - block_spec.level))
self.resample_ops.append(Resample(in_channels, out_channels, scale, spec.block_fn, norm_cfg, alpha))
def forward(self, inputs):
assert (len(inputs) == len(self.resample_ops))
parent0_feat = self.resample_ops[0](inputs[0])
parent1_feat = self.resample_ops[1](inputs[1])
target_feat = (parent0_feat + parent1_feat)
return target_feat |
class NeuralBuilder(nn.Module):
def __init__(self, gnp):
super(NeuralBuilder, self).__init__()
self.gnp = gnp
def get_param_g(self):
return self.gnp
def generate_batches(self, train_insts, batch_size):
pass
def build_nn_graph(self, instance):
pass
def build_nn_graph_batch(self, batch_input_seqs):
pass
def get_nn_score(self, network, parent_k):
pass
def get_nn_score_batch(self, network, parent_k):
pass
def build_node2nn_output(self, network):
pass
def build_node2nn_output_batch(self, network):
pass |
def main(input_file, enable_trace=False):
ql = Qiling(['./arm_fuzz'], '../../rootfs/arm_qnx', console=enable_trace)
ql.os.stdin = pipe.SimpleInStream(sys.stdin.fileno())
if (not enable_trace):
ql.os.stdout = pipe.NullOutStream(sys.stdout.fileno())
ql.os.stderr = pipe.NullOutStream(sys.stderr.fileno())
def place_input_callback(ql: Qiling, input: bytes, _: int):
ql.os.stdin.write(input)
return True
def start_afl(_ql: Qiling):
ql_afl_fuzz(_ql, input_file=input_file, place_input_callback=place_input_callback, exits=[ql.os.exit_point])
LIBC_BASE = int(ql.profile.get('OS32', 'interp_address'), 16)
ql.hook_address(callback=(lambda x: os.abort()), address=(LIBC_BASE + 229744))
main_addr =
ql.hook_address(callback=start_afl, address=main_addr)
if enable_trace:
md = ql.arch.disassembler
count = [0]
def spaced_hex(data):
return b' '.join((hexlify(data)[i:(i + 2)] for i in range(0, len(hexlify(data)), 2))).decode('utf-8')
def disasm(count, ql, address, size):
buf = ql.mem.read(address, size)
try:
for i in md.disasm(buf, address):
return '{:08X}\t{:08X}: {:24s} {:10s} {:16s}'.format(count[0], i.address, spaced_hex(buf), i.mnemonic, i.op_str)
except:
import traceback
print(traceback.format_exc())
def trace_cb(ql, address, size, count):
rtn = '{:100s}'.format(disasm(count, ql, address, size))
print(rtn)
count[0] += 1
ql.hook_code(trace_cb, count)
ql.run()
os._exit(0) |
class TestTransformerEqualizer(unittest.TestCase):
def test_default(self):
tfm = new_transformer()
tfm.equalizer(500.0, 2, 3)
actual_args = tfm.effects
expected_args = ['equalizer', '500.000000', '2.000000q', '3.000000']
self.assertEqual(expected_args, actual_args)
actual_log = tfm.effects_log
expected_log = ['equalizer']
self.assertEqual(expected_log, actual_log)
actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE)
expected_res = True
self.assertEqual(expected_res, actual_res)
tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm)
def test_frequency_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.equalizer((- 20), 2, 3)
def test_width_q_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.equalizer(500.0, 0, 3)
def test_gain_db_invalid(self):
tfm = new_transformer()
with self.assertRaises(ValueError):
tfm.equalizer(500.0, 0.5, None) |
def attempt_distribution(factor, num, denum, out_type):
(pos_terms, neg_terms) = local_add_canonizer.get_num_denum(factor)
if ((len(pos_terms) == 1) and (not neg_terms)):
return (False, factor, num, denum)
pos_pairs = list(map(local_mul_canonizer.get_num_denum, pos_terms))
neg_pairs = list(map(local_mul_canonizer.get_num_denum, neg_terms))
change = False
for n in list(num):
(success, pos_pairs, neg_pairs) = distribute_greedy(pos_pairs, neg_pairs, [n], [], out_type)
if success:
change = True
num.remove(n)
for d in list(denum):
(success, pos_pairs, neg_pairs) = distribute_greedy(pos_pairs, neg_pairs, [], [d], out_type)
if success:
change = True
denum.remove(d)
if (not change):
return (change, factor, num, denum)
else:
return (change, local_add_canonizer.merge_num_denum(list(itertools.starmap(local_mul_canonizer.merge_num_denum, pos_pairs)), list(itertools.starmap(local_mul_canonizer.merge_num_denum, neg_pairs))), num, denum) |
class CreateTargetAssignerTest(tf.test.TestCase):
def test_create_target_assigner(self):
corners = [[0.0, 0.0, 1.0, 1.0]]
groundtruth = box_list.BoxList(tf.constant(corners))
priors = box_list.BoxList(tf.constant(corners))
prior_stddevs = tf.constant([[1.0, 1.0, 1.0, 1.0]])
priors.add_field('stddev', prior_stddevs)
multibox_ta = targetassigner.create_target_assigner('Multibox', stage='proposal')
multibox_ta.assign(priors, groundtruth)
anchors = box_list.BoxList(tf.constant(corners))
faster_rcnn_proposals_ta = targetassigner.create_target_assigner('FasterRCNN', stage='proposal')
faster_rcnn_proposals_ta.assign(anchors, groundtruth)
fast_rcnn_ta = targetassigner.create_target_assigner('FastRCNN')
fast_rcnn_ta.assign(anchors, groundtruth)
faster_rcnn_detection_ta = targetassigner.create_target_assigner('FasterRCNN', stage='detection')
faster_rcnn_detection_ta.assign(anchors, groundtruth)
with self.assertRaises(ValueError):
targetassigner.create_target_assigner('InvalidDetector', stage='invalid_stage') |
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
send_example_telemetry('run_glue', model_args, data_args, framework='tensorflow')
if (not (training_args.do_train or training_args.do_eval or training_args.do_predict)):
exit('Must specify at least one of --do_train, --do_eval or --do_predict!')
checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
checkpoint = get_last_checkpoint(training_args.output_dir)
if ((checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif ((checkpoint is not None) and (training_args.resume_from_checkpoint is None)):
logger.info(f'Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
datasets = load_dataset('glue', data_args.task_name, cache_dir=model_args.cache_dir, use_auth_token=(True if model_args.use_auth_token else None))
is_regression = (data_args.task_name == 'stsb')
if (not is_regression):
label_list = datasets['train'].features['label'].names
num_labels = len(label_list)
else:
num_labels = 1
if (data_args.predict_file is not None):
logger.info('Preparing user-supplied file for predictions...')
data_files = {'data': data_args.predict_file}
for key in data_files.keys():
logger.info(f'Loading a local file for {key}: {data_files[key]}')
if data_args.predict_file.endswith('.csv'):
user_dataset = load_dataset('csv', data_files=data_files, cache_dir=model_args.cache_dir)
else:
user_dataset = load_dataset('json', data_files=data_files, cache_dir=model_args.cache_dir)
needed_keys = task_to_keys[data_args.task_name]
for key in needed_keys:
assert (key in user_dataset['data'].features), f'Your supplied predict_file is missing the {key} key!'
datasets['user_data'] = user_dataset['data']
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
(sentence1_key, sentence2_key) = task_to_keys[data_args.task_name]
if data_args.pad_to_max_length:
padding = 'max_length'
else:
padding = False
label_to_id = None
if ((config.label2id != PretrainedConfig(num_labels=num_labels).label2id) and (not is_regression)):
label_name_to_id = {k.lower(): v for (k, v) in config.label2id.items()}
if (sorted(label_name_to_id.keys()) == sorted(label_list)):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning("Your model seems to have been trained with labels, but they don't match the dataset: ", f'''model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}.
Ignoring the model labels as a result.''')
label_to_id = {label: i for (i, label) in enumerate(label_list)}
if (label_to_id is not None):
config.label2id = label_to_id
config.id2label = {id: label for (label, id) in config.label2id.items()}
elif ((data_args.task_name is not None) and (not is_regression)):
config.label2id = {l: i for (i, l) in enumerate(label_list)}
config.id2label = {id: label for (label, id) in config.label2id.items()}
if (data_args.max_seq_length > tokenizer.model_max_length):
logger.warning(f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for themodel ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.')
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
args = ((examples[sentence1_key],) if (sentence2_key is None) else (examples[sentence1_key], examples[sentence2_key]))
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=(not data_args.overwrite_cache))
if data_args.pad_to_max_length:
data_collator = DefaultDataCollator(return_tensors='np')
else:
data_collator = DataCollatorWithPadding(tokenizer, return_tensors='np')
metric = evaluate.load('glue', data_args.task_name)
def compute_metrics(preds, label_ids):
preds = preds['logits']
preds = (np.squeeze(preds) if is_regression else np.argmax(preds, axis=1))
result = metric.compute(predictions=preds, references=label_ids)
if (len(result) > 1):
result['combined_score'] = np.mean(list(result.values())).item()
return result
with training_args.strategy.scope():
if (checkpoint is None):
model_path = model_args.model_name_or_path
else:
model_path = checkpoint
model = TFAutoModelForSequenceClassification.from_pretrained(model_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
dataset_options = tf.data.Options()
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF
num_replicas = training_args.strategy.num_replicas_in_sync
tf_data = {}
max_samples = {'train': data_args.max_train_samples, 'validation': data_args.max_eval_samples, 'validation_matched': data_args.max_eval_samples, 'validation_mismatched': data_args.max_eval_samples, 'test': data_args.max_predict_samples, 'test_matched': data_args.max_predict_samples, 'test_mismatched': data_args.max_predict_samples, 'user_data': None}
for key in datasets.keys():
if ((key == 'train') or key.startswith('validation')):
assert ('label' in datasets[key].features), f'Missing labels from {key} data!'
if (key == 'train'):
shuffle = True
batch_size = (training_args.per_device_train_batch_size * num_replicas)
else:
shuffle = False
batch_size = (training_args.per_device_eval_batch_size * num_replicas)
samples_limit = max_samples[key]
dataset = datasets[key]
if (samples_limit is not None):
dataset = dataset.select(range(samples_limit))
data = model.prepare_tf_dataset(dataset, shuffle=shuffle, batch_size=batch_size, collate_fn=data_collator, tokenizer=tokenizer)
data = data.with_options(dataset_options)
tf_data[key] = data
if training_args.do_train:
num_train_steps = (len(tf_data['train']) * training_args.num_train_epochs)
if (training_args.warmup_steps > 0):
num_warmup_steps = training_args.warmup_steps
elif (training_args.warmup_ratio > 0):
num_warmup_steps = int((num_train_steps * training_args.warmup_ratio))
else:
num_warmup_steps = 0
(optimizer, schedule) = create_optimizer(init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm)
else:
optimizer = 'adam'
if is_regression:
metrics = []
else:
metrics = ['accuracy']
model.compile(optimizer=optimizer, metrics=metrics, jit_compile=training_args.xla)
push_to_hub_model_id = training_args.push_to_hub_model_id
model_name = model_args.model_name_or_path.split('/')[(- 1)]
if (not push_to_hub_model_id):
push_to_hub_model_id = f'{model_name}-finetuned-glue'
model_card_kwargs = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
model_card_kwargs['task_name'] = data_args.task_name
if training_args.push_to_hub:
callbacks = [PushToHubCallback(output_dir=training_args.output_dir, hub_model_id=push_to_hub_model_id, hub_token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs)]
else:
callbacks = []
if training_args.do_train:
if (training_args.do_eval and (not (data_args.task_name == 'mnli'))):
validation_data = tf_data['validation']
else:
validation_data = None
model.fit(tf_data['train'], validation_data=validation_data, epochs=int(training_args.num_train_epochs), callbacks=callbacks)
if training_args.do_eval:
logger.info('*** Evaluate ***')
if (data_args.task_name == 'mnli'):
tasks = ['mnli', 'mnli-mm']
tf_datasets = [tf_data['validation_matched'], tf_data['validation_mismatched']]
raw_datasets = [datasets['validation_matched'], datasets['validation_mismatched']]
else:
tasks = [data_args.task_name]
tf_datasets = [tf_data['validation']]
raw_datasets = [datasets['validation']]
for (raw_dataset, tf_dataset, task) in zip(raw_datasets, tf_datasets, tasks):
eval_predictions = model.predict(tf_dataset)
eval_metrics = compute_metrics(eval_predictions, raw_dataset['label'])
print(f'Evaluation metrics ({task}):')
print(eval_metrics)
if (training_args.output_dir is not None):
output_eval_file = os.path.join(training_args.output_dir, 'all_results.json')
with open(output_eval_file, 'w') as writer:
writer.write(json.dumps(eval_metrics))
if (training_args.do_predict or data_args.predict_file):
logger.info('*** Predict ***')
tasks = []
tf_datasets = []
raw_datasets = []
if training_args.do_predict:
if (data_args.task_name == 'mnli'):
tasks.extend(['mnli', 'mnli-mm'])
tf_datasets.extend([tf_data['test_matched'], tf_data['test_mismatched']])
raw_datasets.extend([datasets['test_matched'], datasets['test_mismatched']])
else:
tasks.append(data_args.task_name)
tf_datasets.append(tf_data['test'])
raw_datasets.append(datasets['test'])
if data_args.predict_file:
tasks.append('user_data')
tf_datasets.append(tf_data['user_data'])
raw_datasets.append(datasets['user_data'])
for (raw_dataset, tf_dataset, task) in zip(raw_datasets, tf_datasets, tasks):
test_predictions = model.predict(tf_dataset)
if ('label' in raw_dataset):
test_metrics = compute_metrics(test_predictions, raw_dataset['label'])
print(f'Test metrics ({task}):')
print(test_metrics)
if is_regression:
predictions_to_write = np.squeeze(test_predictions['logits'])
else:
predictions_to_write = np.argmax(test_predictions['logits'], axis=1)
output_predict_file = os.path.join(training_args.output_dir, f'predict_results_{task}.txt')
with open(output_predict_file, 'w') as writer:
logger.info(f'***** Writing prediction results for {task} *****')
writer.write('index\tprediction\n')
for (index, item) in enumerate(predictions_to_write):
if is_regression:
writer.write(f'''{index} {item:3.3f}
''')
else:
item = model.config.id2label[item]
writer.write(f'''{index} {item}
''')
if ((training_args.output_dir is not None) and (not training_args.push_to_hub)):
model.save_pretrained(training_args.output_dir) |
(scope='function')
def pvwatts_dc_pvwatts_ac_faiman_temp_system():
module_parameters = {'pdc0': 220, 'gamma_pdc': (- 0.003)}
temp_model_params = {'u0': 25.0, 'u1': 6.84}
inverter_parameters = {'pdc0': 220, 'eta_inv_nom': 0.95}
system = PVSystem(surface_tilt=32.2, surface_azimuth=180, module_parameters=module_parameters, temperature_model_parameters=temp_model_params, inverter_parameters=inverter_parameters)
return system |
class py_dep(dep):
def __init__(self, name, pip_only=False):
self.pip_only = pip_only
super(py_dep, self).__init__(name)
def test(self):
remap = {'pil': 'PIL', 'gevent-websocket': 'geventwebsocket', 'flask-socketio': 'flask_socketio', 'flask-babel': 'flask_babel', 'python-socketio': 'socketio', 'opengl': 'OpenGL'}
name = (remap[self.name] if (self.name in remap) else self.name)
try:
__import__(name)
except Exception as e:
print('failed to import', self.name, ' ')
return False
return True
def install(self):
apt_name = ('python3-' + self.name)
if self.pip_only:
os.system(('sudo apt remove ' + apt_name))
ret = True
else:
ret = os.system(('sudo apt install -y ' + apt_name))
if ret:
print('failed to install via apt, trying with pip', self.name)
if (self.name == 'pil'):
name = 'pillow'
elif (self.name == 'flask-socketio'):
name = 'flask-socketio==5'
else:
name = self.name
ret = os.system(('sudo python3 -m pip install ' + name))
if ret:
print('failed to install dependency', name)
return False
return True |
def save_checkpoint(state, is_best, checkpoint='checkpoints/', filename='checkpoint.pth.tar', snapshot=None):
if (not os.path.exists(checkpoint)):
os.makedirs(checkpoint)
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if (snapshot and ((state.epoch % snapshot) == 0)):
shutil.copyfile(filepath, os.path.join(checkpoint, 'checkpoint_{}.pth.tar'.format(state.epoch)))
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar')) |
def test_has_dict():
value = HasDict(10, {uuid.UUID('-0000-1111-0000-'): 15}, [RandovaniaGame.BLANK], [None], {}, datetime.datetime(2019, 1, 3, 2, 50, tzinfo=datetime.UTC), N(2403, True), (60, RandovaniaGame.METROID_PRIME_ECHOES, 'foo'))
data = {'a': 10, 'b': {'-0000-1111-0000-': 15}, 'c': ['blank'], 'd': [None], 'e': {}, 'f': '2019-01-03T02:50:00+00:00', 'g': {'a': 2403, 'b': True}, 'h': [60, 'prime2', 'foo']}
assert (HasDict.from_json(data) == value)
assert (value.as_json == data) |
('I assign {value} to font.{sub_super}script')
def when_I_assign_value_to_font_sub_super(context, value, sub_super):
font = context.font
name = {'sub': 'subscript', 'super': 'superscript'}[sub_super]
new_value = {'None': None, 'True': True, 'False': False}[value]
setattr(font, name, new_value) |
class L1_plus_perceptualLoss(nn.Module):
def __init__(self, lambda_L1, lambda_perceptual, perceptual_layers, gpu_ids, percep_is_l1):
super(L1_plus_perceptualLoss, self).__init__()
self.lambda_L1 = lambda_L1
self.lambda_perceptual = lambda_perceptual
self.gpu_ids = gpu_ids
self.percep_is_l1 = percep_is_l1
vgg19 = models.vgg19(pretrained=False)
vgg19.load_state_dict(torch.load('/home/haihuam/CASD-main/dataset/fashion/vgg19-dcbb9e9d.pth'))
vgg = vgg19.features
self.vgg_submodel = nn.Sequential()
for (i, layer) in enumerate(list(vgg)):
self.vgg_submodel.add_module(str(i), layer)
if (i == perceptual_layers):
break
self.vgg_submodel = self.vgg_submodel.cuda()
print(self.vgg_submodel)
def forward(self, inputs, targets):
if ((self.lambda_L1 == 0) and (self.lambda_perceptual == 0)):
return (Variable(torch.zeros(1)).cuda(), Variable(torch.zeros(1)), Variable(torch.zeros(1)))
loss_l1 = (F.l1_loss(inputs, targets) * self.lambda_L1)
mean = torch.FloatTensor(3)
mean[0] = 0.485
mean[1] = 0.456
mean[2] = 0.406
mean = Variable(mean)
mean = mean.resize(1, 3, 1, 1).cuda()
std = torch.FloatTensor(3)
std[0] = 0.229
std[1] = 0.224
std[2] = 0.225
std = Variable(std)
std = std.resize(1, 3, 1, 1).cuda()
fake_p2_norm = ((inputs + 1) / 2)
fake_p2_norm = ((fake_p2_norm - mean) / std)
input_p2_norm = ((targets + 1) / 2)
input_p2_norm = ((input_p2_norm - mean) / std)
fake_p2_norm = self.vgg_submodel(fake_p2_norm)
input_p2_norm = self.vgg_submodel(input_p2_norm)
input_p2_norm_no_grad = input_p2_norm.detach()
if (self.percep_is_l1 == 1):
loss_perceptual = (F.l1_loss(fake_p2_norm, input_p2_norm_no_grad) * self.lambda_perceptual)
else:
loss_perceptual = (F.mse_loss(fake_p2_norm, input_p2_norm_no_grad) * self.lambda_perceptual)
loss = (loss_l1 + loss_perceptual)
return (loss, loss_l1, loss_perceptual) |
class MaskRCNNLossComputation(object):
def __init__(self, proposal_matcher, discretization_size):
self.proposal_matcher = proposal_matcher
self.discretization_size = discretization_size
def match_targets_to_proposals(self, proposal, target):
match_quality_matrix = boxlist_iou(target, proposal)
matched_idxs = self.proposal_matcher(match_quality_matrix)
target = target.copy_with_fields(['label', 'mask2d'])
matched_targets = target[matched_idxs.clamp(min=0)]
matched_targets.add_field('matched_idxs', matched_idxs)
return matched_targets
def prepare_targets(self, proposals, targets):
labels = []
masks = []
for (proposals_per_image, targets_per_image) in zip(proposals, targets):
matched_targets = self.match_targets_to_proposals(proposals_per_image, targets_per_image)
matched_idxs = matched_targets.get_field('matched_idxs')
labels_per_image = matched_targets.get_field('label')
labels_per_image = labels_per_image.to(dtype=torch.int64)
neg_inds = (matched_idxs == Matcher.BELOW_LOW_THRESHOLD)
labels_per_image[neg_inds] = 0
positive_inds = torch.nonzero((labels_per_image > 0), as_tuple=False).squeeze(1)
segmentation_masks = matched_targets.get_field('mask2d')
segmentation_masks = segmentation_masks[positive_inds]
positive_proposals = proposals_per_image[positive_inds]
masks_per_image = project_masks_on_boxes(segmentation_masks, positive_proposals, self.discretization_size)
labels.append(labels_per_image)
masks.append(masks_per_image)
return (labels, masks)
def __call__(self, proposals, mask_logits, targets):
(labels, mask_targets) = self.prepare_targets(proposals, targets)
labels = cat(labels, dim=0)
mask_targets = cat(mask_targets, dim=0)
positive_inds = torch.nonzero((labels > 0), as_tuple=False).squeeze(1)
labels_pos = labels[positive_inds]
if (mask_targets.numel() == 0):
return (mask_logits.sum() * 0)
mask_loss = F.binary_cross_entropy_with_logits(mask_logits[(positive_inds, labels_pos)], mask_targets)
return mask_loss |
class ViltConfig(PretrainedConfig):
model_type = 'vilt'
def __init__(self, vocab_size=30522, type_vocab_size=2, modality_type_vocab_size=2, max_position_embeddings=40, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act='gelu', hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=384, patch_size=32, num_channels=3, qkv_bias=True, max_image_length=(- 1), tie_word_embeddings=False, num_images=(- 1), **kwargs):
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
self.vocab_size = vocab_size
self.type_vocab_size = type_vocab_size
self.modality_type_vocab_size = modality_type_vocab_size
self.max_position_embeddings = max_position_embeddings
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.qkv_bias = qkv_bias
self.max_image_length = max_image_length
self.num_images = num_images |
def printalltokens(args):
printT('All tokens which are accessible from current thread:')
if (('currentpidonly' in args) and (args['currentpidonly'] == True)):
args['pid'] = GetCurrentProcessId()
imp = Impersonate()
if (args['filter'] == ''):
imp.printAllTokensAccessible(targetPID=args['pid'], printFull=args['printFull'], printLinked=args['printLinked'], _useThreadMethod=args['_useThreadMethod'])
else:
filter = json.loads(args['filter'])
imp.printTokensAccessibleFilter(targetPID=args['pid'], filter=filter, printFull=args['printFull'], printLinked=args['printLinked'], _useThreadMethod=args['_useThreadMethod']) |
def gen_train_facts(data_file_name, truth_dir):
fact_file_name = data_file_name[data_file_name.find('train_'):]
fact_file_name = os.path.join(truth_dir, fact_file_name.replace('.json', '.fact'))
if os.path.exists(fact_file_name):
fact_in_train = set([])
triples = json.load(open(fact_file_name))
for x in triples:
fact_in_train.add(tuple(x))
return fact_in_train
fact_in_train = set([])
ori_data = json.load(open(data_file_name))
for data in ori_data:
vertexSet = data['vertexSet']
for label in data['labels']:
rel = label['r']
for n1 in vertexSet[label['h']]:
for n2 in vertexSet[label['t']]:
fact_in_train.add((n1['name'], n2['name'], rel))
json.dump(list(fact_in_train), open(fact_file_name, 'w'))
return fact_in_train |
def main():
args = parse_args()
if (args.job_dir == ''):
args.job_dir = (get_shared_folder() / '%j')
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = (args.timeout * 60)
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(mem_gb=(40 * num_gpus_per_node), gpus_per_node=num_gpus_per_node, tasks_per_node=num_gpus_per_node, cpus_per_task=10, nodes=nodes, timeout_min=timeout_min, slurm_partition=partition, slurm_signal_delay_s=120, **kwargs)
executor.update_parameters(name=args.job_name)
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print('Submitted job_id:', job.job_id) |
class CRF(nn.Module):
def __init__(self, args, hidden_size: int, device: torch.device):
super(CRF, self).__init__()
self.modelname = 'crf'
self.hidden_size = hidden_size
self._crf = crf(args.tagger_classes, batch_first=True).to(device)
self._hidden2tag = Linear(self.hidden_size, args.tagger_classes)
def forward(self, input: torch.Tensor, gt: torch.Tensor, mask: torch.Tensor=None) -> torch.Tensor:
tag_encoded = self._hidden2tag(input)
return (- self._crf(tag_encoded, gt, reduction='mean', mask=mask))
def decode(self, input: torch.Tensor) -> np.ndarray:
tag_encoded = self._hidden2tag(input)
return np.array(self._crf.decode(tag_encoded)) |
def calc_gradient_penalty(x, y_pred):
gradients = torch.autograd.grad(outputs=y_pred, inputs=x, grad_outputs=torch.ones_like(y_pred), create_graph=True)[0]
gradients = gradients.flatten(start_dim=1)
grad_norm = gradients.norm(2, dim=1)
gradient_penalty = ((grad_norm - 1) ** 2).mean()
return gradient_penalty |
def train(num_epochs, model, optimizer, train_loader, val_loader, fabric, accumulation_steps):
for epoch in range(num_epochs):
train_acc = torchmetrics.Accuracy(task='multiclass', num_classes=2).to(fabric.device)
model.train()
for (batch_idx, batch) in enumerate(train_loader):
model.train()
outputs = model(batch['input_ids'], attention_mask=batch['attention_mask'], labels=batch['label'])
outputs['loss'] /= accumulation_steps
fabric.backward(outputs['loss'])
if ((batch_idx % accumulation_steps) == 0):
optimizer.step()
optimizer.zero_grad()
if (not (batch_idx % 300)):
fabric.print(f"Epoch: {(epoch + 1):04d}/{num_epochs:04d} | Batch {batch_idx:04d}/{len(train_loader):04d} | Loss: {outputs['loss']:.4f}")
model.eval()
with torch.no_grad():
predicted_labels = torch.argmax(outputs['logits'], 1)
train_acc.update(predicted_labels, batch['label'])
model.eval()
with torch.no_grad():
val_acc = torchmetrics.Accuracy(task='multiclass', num_classes=2).to(fabric.device)
for batch in val_loader:
outputs = model(batch['input_ids'], attention_mask=batch['attention_mask'], labels=batch['label'])
predicted_labels = torch.argmax(outputs['logits'], 1)
val_acc.update(predicted_labels, batch['label'])
fabric.print(f'Epoch: {(epoch + 1):04d}/{num_epochs:04d} | Train acc.: {(train_acc.compute() * 100):.2f}% | Val acc.: {(val_acc.compute() * 100):.2f}%')
(train_acc.reset(), val_acc.reset()) |
class Session():
def hascreds(cls, config):
return NotImplemented
def get_credential_options(self):
return NotImplemented
def from_foreign_session(session, cls=None):
if (not cls):
return DummySession()
else:
return cls(session)
def cls_from_path(path):
if (not path):
return DummySession
path = _parse_path(path)
if (isinstance(path, _UnparsedPath) or path.is_local):
return DummySession
elif (((path.scheme == 's3') or ('amazonaws.com' in path.path)) and (not ('X-Amz-Signature' in path.path))):
if (boto3 is not None):
return AWSSession
else:
log.info('boto3 not available, falling back to a DummySession.')
return DummySession
elif ((path.scheme == 'oss') or ('aliyuncs.com' in path.path)):
return OSSSession
elif path.path.startswith('/vsiswift/'):
return SwiftSession
elif (path.scheme == 'az'):
return AzureSession
else:
return DummySession
def from_path(path, *args, **kwargs):
return Session.cls_from_path(path)(*args, **kwargs)
def aws_or_dummy(*args, **kwargs):
if (boto3 is not None):
return AWSSession(*args, **kwargs)
else:
return DummySession(*args, **kwargs)
def from_environ(*args, **kwargs):
try:
session = Session.aws_or_dummy(*args, **kwargs)
session.credentials
except RuntimeError as exc:
log.warning('Credentials in environment have expired. Creating a DummySession.')
session = DummySession(*args, **kwargs)
return session |
class GSConv(nn.Module):
def __init__(self, c1, c2, k=1, s=1, g=1, act=True):
super().__init__()
c_ = (c2 // 2)
self.cv1 = Conv(c1, c_, k, s, None, g, act)
self.cv2 = Conv(c_, c_, 5, 1, None, c_, act)
def forward(self, x):
x1 = self.cv1(x)
x2 = torch.cat((x1, self.cv2(x1)), 1)
(b, n, h, w) = x2.data.size()
b_n = ((b * n) // 2)
y = x2.reshape(b_n, 2, (h * w))
y = y.permute(1, 0, 2)
y = y.reshape(2, (- 1), (n // 2), h, w)
return torch.cat((y[0], y[1]), 1) |
class ArgumentGroup(object):
def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des)
def add_arg(self, name, type, default, help, **kwargs):
type = (str2bool if (type == bool) else type)
self._group.add_argument(('--' + name), default=default, type=type, help=(help + ' Default: %(default)s.'), **kwargs) |
.supported(only_if=(lambda backend: backend.cipher_supported(algorithms._IDEAInternal((b'\x00' * 16)), modes.CFB((b'\x00' * 8)))), skip_message='Does not support IDEA CFB')
class TestIDEAModeCFB():
test_cfb = generate_encrypt_test(load_nist_vectors, os.path.join('ciphers', 'IDEA'), ['idea-cfb.txt'], (lambda key, **kwargs: algorithms._IDEAInternal(binascii.unhexlify(key))), (lambda iv, **kwargs: modes.CFB(binascii.unhexlify(iv)))) |
def read_image(img_path):
got_img = False
if (not osp.exists(img_path)):
raise IOError('{} does not exist'.format(img_path))
while (not got_img):
try:
img = Image.open(img_path).convert('RGB')
got_img = True
except IOError:
print("IOError incurred when reading '{}'. Will redo. Don't worry. Just chill.".format(img_path))
pass
return img |
def highs_solve_qp(P: Union[(np.ndarray, spa.csc_matrix)], q: np.ndarray, G: Optional[Union[(np.ndarray, spa.csc_matrix)]]=None, h: Optional[np.ndarray]=None, A: Optional[Union[(np.ndarray, spa.csc_matrix)]]=None, b: Optional[np.ndarray]=None, lb: Optional[np.ndarray]=None, ub: Optional[np.ndarray]=None, initvals: Optional[np.ndarray]=None, verbose: bool=False, **kwargs) -> Optional[np.ndarray]:
problem = Problem(P, q, G, h, A, b, lb, ub)
solution = highs_solve_problem(problem, initvals, verbose, **kwargs)
return (solution.x if solution.found else None) |
def test_push_pull_emoji_unicode(pusher, puller, unicode_emoji_images, liveserver_session, app_reloader):
credentials = ('devtable', 'password')
pusher.push(liveserver_session, 'devtable', 'newrepo', 'latest', unicode_emoji_images, credentials=credentials)
puller.pull(liveserver_session, 'devtable', 'newrepo', 'latest', unicode_emoji_images, credentials=credentials) |
def main():
args = parse_command_line_arguments()
if args.verbose:
logging.getLogger().setLevel(logging.DEBUG)
test_set = load_test_set(os.path.abspath(args.test_set_path))
results = Results(find_results_file(args), test_set)
if (args.command == 'run'):
run(test_set, results, only_problem=args.problem, only_settings=args.settings, only_solver=args.solver, rerun=args.rerun, include_timeouts=args.include_timeouts)
if (args.command == 'check_problem'):
problem = test_set.get_problem(args.problem)
_ = problem
logging.info(f'Check out `problem` for the {args.problem} problem')
if (args.command == 'check_results'):
logging.info('Check out `results` for the full results data')
df = results.df
_ = df
logging.info('Check out `df` for results as a pandas DataFrame')
if (args.command in ['check_problem', 'check_results']):
try:
import IPython
if (not IPython.get_ipython()):
IPython.embed()
except ImportError:
logging.error('IPython not found, run this script in interactive mode')
if (args.command == 'plot'):
plot_metric(args.metric, results.df, args.settings, test_set, solvers=args.solvers, linewidth=args.linewidth, savefig=args.savefig, title=args.title)
if (args.command in ['report', 'run']):
report(args, results) |
class ImageLogger(Callback):
def __init__(self, batch_frequency, max_images, clamp=True, increase_log_steps=True):
super().__init__()
self.batch_freq = batch_frequency
self.max_images = max_images
self.logger_log_images = {pl.loggers.WandbLogger: self._wandb, pl.loggers.TestTubeLogger: self._testtube}
self.log_steps = [(2 ** n) for n in range((int(np.log2(self.batch_freq)) + 1))]
if (not increase_log_steps):
self.log_steps = [self.batch_freq]
self.clamp = clamp
_zero_only
def _wandb(self, pl_module, images, batch_idx, split):
raise ValueError('No way wandb')
grids = dict()
for k in images:
grid = torchvision.utils.make_grid(images[k])
grids[f'{split}/{k}'] = wandb.Image(grid)
pl_module.logger.experiment.log(grids)
_zero_only
def _testtube(self, pl_module, images, batch_idx, split):
for k in images:
grid = torchvision.utils.make_grid(images[k])
grid = ((grid + 1.0) / 2.0)
tag = f'{split}/{k}'
pl_module.logger.experiment.add_image(tag, grid, global_step=pl_module.global_step)
_zero_only
def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx):
root = os.path.join(save_dir, 'images', split)
for k in images:
grid = torchvision.utils.make_grid(images[k], nrow=4)
grid = ((grid + 1.0) / 2.0)
grid = grid.transpose(0, 1).transpose(1, 2).squeeze((- 1))
grid = grid.numpy()
grid = (grid * 255).astype(np.uint8)
filename = '{}_gs-{:06}_e-{:06}_b-{:06}.png'.format(k, global_step, current_epoch, batch_idx)
path = os.path.join(root, filename)
os.makedirs(os.path.split(path)[0], exist_ok=True)
Image.fromarray(grid).save(path)
def log_img(self, pl_module, batch, batch_idx, split='train'):
if (self.check_frequency(batch_idx) and hasattr(pl_module, 'log_images') and callable(pl_module.log_images) and (self.max_images > 0)):
logger = type(pl_module.logger)
is_train = pl_module.training
if is_train:
pl_module.eval()
with torch.no_grad():
images = pl_module.log_images(batch, split=split)
for k in images:
N = min(images[k].shape[0], self.max_images)
images[k] = images[k][:N]
if isinstance(images[k], torch.Tensor):
images[k] = images[k].detach().cpu()
if self.clamp:
images[k] = torch.clamp(images[k], (- 1.0), 1.0)
self.log_local(pl_module.logger.save_dir, split, images, pl_module.global_step, pl_module.current_epoch, batch_idx)
logger_log_images = self.logger_log_images.get(logger, (lambda *args, **kwargs: None))
logger_log_images(pl_module, images, pl_module.global_step, split)
if is_train:
pl_module.train()
def check_frequency(self, batch_idx):
if (((batch_idx % self.batch_freq) == 0) or (batch_idx in self.log_steps)):
try:
self.log_steps.pop(0)
except IndexError:
pass
return True
return False
def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.log_img(pl_module, batch, batch_idx, split='train')
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
self.log_img(pl_module, batch, batch_idx, split='val') |
_datapipe('lines_to_paragraphs')
class ParagraphAggregatorIterDataPipe(IterDataPipe[Tuple[(str, str)]]):
def __init__(self, source_datapipe: IterDataPipe[Tuple[(str, T_co)]], joiner: Callable=_default_line_join) -> None:
self.source_datapipe: IterDataPipe[Tuple[(str, T_co)]] = source_datapipe
_check_unpickable_fn(joiner)
self.joiner: Callable = joiner
self.buffer: List = []
def __iter__(self) -> Iterator[Tuple[(str, str)]]:
prev_filename = None
for (filename, line) in self.source_datapipe:
if (prev_filename is None):
prev_filename = filename
if (line and (prev_filename == filename)):
self.buffer.append(line)
else:
if self.buffer:
(yield (prev_filename, self.joiner(self.buffer)))
if line:
self.buffer = [line]
else:
self.buffer = []
prev_filename = filename
if self.buffer:
(yield (prev_filename, self.joiner(self.buffer)))
def reset(self) -> None:
self.buffer = []
def __getstate__(self):
state = (self.source_datapipe, self.joiner)
if (IterDataPipe.getstate_hook is not None):
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(self.source_datapipe, self.joiner) = state
self.buffer = []
def __del__(self):
self.buffer.clear() |
def test_filter_by_type(graphql_client, user, conference_factory, submission_factory, mock_has_ticket):
graphql_client.force_login(user)
conference = conference_factory(submission_types=('talk', 'workshop'))
submission = submission_factory(conference=conference, custom_submission_type='talk')
submission_factory(conference=conference, custom_submission_type='workshop')
mock_has_ticket(conference)
query = 'query Submissions($code: String!, $types: [String!]) {\n submissions(code: $code, types: $types) {\n items {\n id\n }\n }\n }'
resp = graphql_client.query(query, variables={'code': conference.code, 'types': [str(submission.type.id)]})
assert (not resp.get('errors'))
assert (resp['data']['submissions']['items'] == [{'id': submission.hashid}]) |
def test_inparchive(tmpdir, multiproc_backend):
workdir = os.path.join(str(tmpdir), 'workdir')
inputarchive = 'file://{}/tests/testspecs/dynamic_glob/inputs/three_files.zip'.format(os.path.abspath(os.curdir))
with steering_ctx(('local:' + workdir), 'workflow_frominit.yml', {'inputfiles': '*.txt'}, 'tests/testspecs/dynamic_glob', multiproc_backend, dataopts=dict(inputarchive=inputarchive)) as ys:
ys.adage_argument(default_trackers=False) |
class ClientTests(unittest.TestCase):
def test_make_batch(self):
transport = mock.Mock(spec=metrics.NullTransport)
client = metrics.Client(transport, 'namespace')
batch = client.batch()
self.assertIsInstance(batch, metrics.Batch)
self.assertEqual(batch.namespace, b'namespace') |
def parse_arguments():
parser = ArgumentParser()
parser = add_experimental_args(parser)
parser.add_argument('--dataset', type=str, default='argoverse', help='Name of dataset to use')
parser.add_argument('--model-name', type=str, default='WIMP', help='Name of model to load')
(temp_args, _) = parser.parse_known_args()
if (temp_args.dataset == 'argoverse'):
parser = ArgoverseDataModule.add_data_specific_args(parser)
else:
raise NotImplementedError
if (temp_args.model_name == 'WIMP'):
parser = WIMP.add_model_specific_args(parser)
else:
raise NotImplementedError
args = parser.parse_args()
return args |
class GithubBuildTrigger(BuildTriggerHandler):
def _get_client(self):
return Github(base_url=github_trigger.api_endpoint(), login_or_token=(self.auth_token if self.auth_token else github_trigger.client_id()), password=(None if self.auth_token else github_trigger.client_secret()), timeout=5)
def service_name(cls):
return 'github'
def is_active(self):
return ('hook_id' in self.config)
def get_repository_url(self):
source = self.config['build_source']
return github_trigger.get_public_url(source)
def _get_error_message(ghe, default_msg):
if (ghe.data.get('errors') and ghe.data['errors'][0].get('message')):
return ghe.data['errors'][0]['message']
return default_msg
_catch_ssl_errors
def activate(self, standard_webhook_url):
config = self.config
new_build_source = config['build_source']
gh_client = self._get_client()
try:
gh_repo = gh_client.get_repo(new_build_source)
except UnknownObjectException:
msg = ('Unable to find GitHub repository for source: %s' % new_build_source)
raise TriggerActivationException(msg)
(public_key, private_key) = generate_ssh_keypair()
config['credentials'] = [{'name': 'SSH Public Key', 'value': public_key.decode('ascii')}]
try:
deploy_key = gh_repo.create_key(('%s Builder' % app.config['REGISTRY_TITLE']), public_key.decode('ascii'))
config['deploy_key_id'] = deploy_key.id
except GithubException as ghe:
default_msg = ('Unable to add deploy key to repository: %s' % new_build_source)
msg = GithubBuildTrigger._get_error_message(ghe, default_msg)
raise TriggerActivationException(msg)
webhook_config = {'url': standard_webhook_url, 'content_type': 'json'}
try:
hook = gh_repo.create_hook('web', webhook_config)
config['hook_id'] = hook.id
config['master_branch'] = gh_repo.default_branch
except GithubException as ghe:
default_msg = ('Unable to create webhook on repository: %s' % new_build_source)
msg = GithubBuildTrigger._get_error_message(ghe, default_msg)
raise TriggerActivationException(msg)
return (config, {'private_key': private_key.decode('ascii')})
_catch_ssl_errors
def deactivate(self):
config = self.config
gh_client = self._get_client()
try:
repo = gh_client.get_repo(config['build_source'])
except UnknownObjectException:
msg = ('Unable to find GitHub repository for source: %s' % config['build_source'])
raise TriggerDeactivationException(msg)
except GitHubBadCredentialsException:
msg = 'Unable to access repository to disable trigger'
raise TriggerDeactivationException(msg)
try:
if config['deploy_key_id']:
deploy_key = repo.get_key(config['deploy_key_id'])
deploy_key.delete()
except KeyError:
pass
except GithubException as ghe:
default_msg = ('Unable to remove deploy key: %s' % config['deploy_key_id'])
msg = GithubBuildTrigger._get_error_message(ghe, default_msg)
raise TriggerDeactivationException(msg)
if ('hook_id' in config):
try:
hook = repo.get_hook(config['hook_id'])
hook.delete()
except GithubException as ghe:
default_msg = ('Unable to remove hook: %s' % config['hook_id'])
msg = GithubBuildTrigger._get_error_message(ghe, default_msg)
raise TriggerDeactivationException(msg)
config.pop('hook_id', None)
self.config = config
return config
_catch_ssl_errors
def list_build_source_namespaces(self):
gh_client = self._get_client()
usr = gh_client.get_user()
namespaces = {}
namespaces[usr.login] = {'personal': True, 'id': usr.login, 'title': (usr.name or usr.login), 'avatar_url': usr.avatar_url, 'url': usr.html_url, 'score': (usr.plan.private_repos if usr.plan else 0)}
for org in usr.get_orgs():
organization = (org.login if org.login else org.name)
namespaces[organization] = {'personal': False, 'id': organization, 'title': organization, 'avatar_url': org.avatar_url, 'url': '', 'score': 0}
return BuildTriggerHandler.build_namespaces_response(namespaces)
_catch_ssl_errors
def list_build_sources_for_namespace(self, namespace):
def repo_view(repo):
return {'name': repo.name, 'full_name': repo.full_name, 'description': (repo.description or ''), 'last_updated': (timegm(repo.pushed_at.utctimetuple()) if repo.pushed_at else 0), 'url': repo.html_url, 'has_admin_permissions': True, 'private': repo.private}
gh_client = self._get_client()
usr = gh_client.get_user()
if (namespace == usr.login):
repos = [repo_view(repo) for repo in usr.get_repos(type='owner', sort='updated')]
return BuildTriggerHandler.build_sources_response(repos)
try:
org = gh_client.get_organization(namespace)
if (org is None):
return []
except GithubException:
return []
repos = [repo_view(repo) for repo in org.get_repos(type='member')]
return BuildTriggerHandler.build_sources_response(repos)
_catch_ssl_errors
def list_build_subdirs(self):
config = self.config
gh_client = self._get_client()
source = config['build_source']
try:
repo = gh_client.get_repo(source)
repo_branches = (self.list_field_values('branch_name') or [])
branches = find_matching_branches(config, repo_branches)
branches = (branches or [(repo.default_branch or 'master')])
default_commit = repo.get_branch(branches[0]).commit
commit_tree = repo.get_git_tree(default_commit.sha, recursive=True)
return [elem.path for elem in commit_tree.tree if ((elem.type == 'blob') and self.filename_is_dockerfile(os.path.basename(elem.path)))]
except GithubException as ghe:
message = ghe.data.get('message', ('Unable to list contents of repository: %s' % source))
if (message == 'Branch not found'):
raise EmptyRepositoryException()
raise RepositoryReadException(message)
_catch_ssl_errors
def load_dockerfile_contents(self):
config = self.config
gh_client = self._get_client()
source = config['build_source']
try:
repo = gh_client.get_repo(source)
except GithubException as ghe:
message = ghe.data.get('message', ('Unable to list contents of repository: %s' % source))
raise RepositoryReadException(message)
path = self.get_dockerfile_path()
if (not path):
return None
try:
file_info = repo.get_contents(path)
except (GithubException, TypeError) as ghe:
logger.error(('got error from trying to find github file %s' % ghe))
return None
if (file_info is None):
return None
if isinstance(file_info, list):
return None
content = file_info.content
if (file_info.encoding == 'base64'):
content = base64.b64decode(content)
return content
_catch_ssl_errors
def list_field_values(self, field_name, limit=None):
if (field_name == 'refs'):
branches = self.list_field_values('branch_name')
tags = self.list_field_values('tag_name')
return ([{'kind': 'branch', 'name': b} for b in branches] + [{'kind': 'tag', 'name': tag} for tag in tags])
config = self.config
source = config.get('build_source')
if (source is None):
return []
if (field_name == 'tag_name'):
try:
gh_client = self._get_client()
repo = gh_client.get_repo(source)
gh_tags = repo.get_tags()
if limit:
gh_tags = repo.get_tags()[0:limit]
return [tag.name for tag in gh_tags]
except GitHubBadCredentialsException:
return []
except GithubException:
logger.exception('Got GitHub Exception when trying to list tags for trigger %s', self.trigger.id)
return []
if (field_name == 'branch_name'):
try:
gh_client = self._get_client()
repo = gh_client.get_repo(source)
gh_branches = repo.get_branches()
if limit:
gh_branches = repo.get_branches()[0:limit]
branches = [branch.name for branch in gh_branches]
if (not (repo.default_branch in branches)):
branches.insert(0, repo.default_branch)
if (branches[0] != repo.default_branch):
branches.remove(repo.default_branch)
branches.insert(0, repo.default_branch)
return branches
except GitHubBadCredentialsException:
return ['master']
except GithubException:
logger.exception('Got GitHub Exception when trying to list branches for trigger %s', self.trigger.id)
return ['master']
return None
def _build_metadata_for_commit(cls, commit_sha, ref, repo):
try:
commit = repo.get_commit(commit_sha)
except GithubException:
logger.exception('Could not load commit information from GitHub')
return None
commit_info = {'url': commit.html_url, 'message': commit.commit.message, 'date': commit.last_modified}
if commit.author:
commit_info['author'] = {'username': commit.author.login, 'avatar_url': commit.author.avatar_url, 'url': commit.author.html_url}
if commit.committer:
commit_info['committer'] = {'username': commit.committer.login, 'avatar_url': commit.committer.avatar_url, 'url': commit.committer.html_url}
return {'commit': commit_sha, 'ref': ref, 'default_branch': repo.default_branch, 'git_url': repo.ssh_url, 'commit_info': commit_info}
_catch_ssl_errors
def manual_start(self, run_parameters=None):
config = self.config
source = config['build_source']
try:
gh_client = self._get_client()
repo = gh_client.get_repo(source)
default_branch = repo.default_branch
except GithubException as ghe:
msg = GithubBuildTrigger._get_error_message(ghe, 'Unable to start build trigger')
raise TriggerStartException(msg)
def get_branch_sha(branch_name):
try:
branch = repo.get_branch(branch_name)
return branch.commit.sha
except GithubException:
raise TriggerStartException('Could not find branch in repository')
def get_tag_sha(tag_name):
tags = {tag.name: tag for tag in repo.get_tags()}
if (not (tag_name in tags)):
raise TriggerStartException('Could not find tag in repository')
return tags[tag_name].commit.sha
(commit_sha, ref) = determine_build_ref(run_parameters, get_branch_sha, get_tag_sha, default_branch)
metadata = GithubBuildTrigger._build_metadata_for_commit(commit_sha, ref, repo)
return self.prepare_build(metadata, is_manual=True)
_catch_ssl_errors
def lookup_user(self, username):
try:
gh_client = self._get_client()
user = gh_client.get_user(username)
return {'html_url': user.html_url, 'avatar_url': user.avatar_url}
except GithubException:
return None
_catch_ssl_errors
def handle_trigger_request(self, request):
payload = request.get_json()
if (payload is None):
raise InvalidPayloadException('Missing payload')
if ('zen' in payload):
raise SkipRequestException()
if ('repository' not in payload):
raise InvalidPayloadException("Missing 'repository' on request")
if ('owner' not in payload['repository']):
raise InvalidPayloadException("Missing 'owner' on repository")
if ('name' not in payload['repository']['owner']):
raise InvalidPayloadException("Missing owner 'name' on repository")
if ('name' not in payload['repository']):
raise InvalidPayloadException("Missing 'name' on repository")
default_branch = None
lookup_user = None
try:
repo_full_name = ('%s/%s' % (payload['repository']['owner']['name'], payload['repository']['name']))
gh_client = self._get_client()
repo = gh_client.get_repo(repo_full_name)
default_branch = repo.default_branch
lookup_user = self.lookup_user
except GitHubBadCredentialsException:
logger.exception('Got GitHub Credentials Exception; Cannot lookup default branch')
except GithubException:
logger.exception('Got GitHub Exception when trying to start trigger %s', self.trigger.id)
raise SkipRequestException()
logger.debug('GitHub trigger payload %s', payload)
metadata = get_transformed_webhook_payload(payload, default_branch=default_branch, lookup_user=lookup_user)
prepared = self.prepare_build(metadata)
raise_if_skipped_build(prepared, self.config)
return prepared |
class ID3v1Tags(TestCase):
def setUp(self):
self.filename = os.path.join(DATA_DIR, 'silence-44-s-v1.mp3')
self.id3 = ID3(self.filename)
def test_album(self):
self.assertEquals('Quod Libet Test Data', self.id3['TALB'])
def test_genre(self):
self.assertEquals('Darkwave', self.id3['TCON'].genres[0])
def test_title(self):
self.assertEquals('Silence', str(self.id3['TIT2']))
def test_artist(self):
self.assertEquals(['piman'], self.id3['TPE1'])
def test_track(self):
self.assertEquals('2', self.id3['TRCK'])
self.assertEquals(2, (+ self.id3['TRCK']))
def test_year(self):
self.assertEquals('2004', self.id3['TDRC'])
def test_v1_not_v11(self):
self.id3['TRCK'] = TRCK(encoding=0, text='32')
tag = MakeID3v1(self.id3)
self.failUnless(32, ParseID3v1(tag)['TRCK'])
del self.id3['TRCK']
tag = MakeID3v1(self.id3)
tag = ((tag[:125] + b' ') + tag[(- 1):])
self.failIf(('TRCK' in ParseID3v1(tag)))
def test_nulls(self):
s = u'TAG%(title)30s%(artist)30s%(album)30s%(year)4s%(cmt)29s\x03\x01'
s = (s % dict(artist=u'abcd\x00fg', title=u'hijklmn\x00p', album=u'qrst\x00v', cmt=u'wxyz', year=u'1224'))
tags = ParseID3v1(s.encode('ascii'))
self.assertEquals(b'abcd'.decode('latin1'), tags['TPE1'])
self.assertEquals(b'hijklmn'.decode('latin1'), tags['TIT2'])
self.assertEquals(b'qrst'.decode('latin1'), tags['TALB'])
def test_nonascii(self):
s = u'TAG%(title)30s%(artist)30s%(album)30s%(year)4s%(cmt)29s\x03\x01'
s = (s % dict(artist=u'abcdefg', title=u'hijklmnop', album=u'qrstuv', cmt=u'wxyz', year=u'1234'))
tags = ParseID3v1(s.encode('latin-1'))
self.assertEquals(b'abcd\xe9fg'.decode('latin1'), tags['TPE1'])
self.assertEquals(b'hijklmn\xf3p'.decode('latin1'), tags['TIT2'])
self.assertEquals(b'qrst\xfcv'.decode('latin1'), tags['TALB'])
self.assertEquals('wxyz', tags['COMM'])
self.assertEquals('3', tags['TRCK'])
self.assertEquals('1234', tags['TDRC'])
def test_roundtrip(self):
frames = {}
for key in ['TIT2', 'TALB', 'TPE1', 'TDRC']:
frames[key] = self.id3[key]
self.assertEquals(ParseID3v1(MakeID3v1(frames)), frames)
def test_make_from_empty(self):
empty = ((b'TAG' + (b'\x00' * 124)) + b'\xff')
self.assertEquals(MakeID3v1({}), empty)
self.assertEquals(MakeID3v1({'TCON': TCON()}), empty)
self.assertEquals(MakeID3v1({'COMM': COMM(encoding=0, text='')}), empty)
def test_make_v1_from_tyer(self):
self.assertEquals(MakeID3v1({'TDRC': TDRC(text='2010-10-10')}), MakeID3v1({'TYER': TYER(text='2010')}))
self.assertEquals(ParseID3v1(MakeID3v1({'TDRC': TDRC(text='2010-10-10')})), ParseID3v1(MakeID3v1({'TYER': TYER(text='2010')})))
def test_invalid(self):
self.failUnless((ParseID3v1(b'') is None))
def test_invalid_track(self):
tag = {}
tag['TRCK'] = TRCK(encoding=0, text='not a number')
v1tag = MakeID3v1(tag)
self.failIf(('TRCK' in ParseID3v1(v1tag)))
def test_v1_genre(self):
tag = {}
tag['TCON'] = TCON(encoding=0, text='Pop')
v1tag = MakeID3v1(tag)
self.failUnlessEqual(ParseID3v1(v1tag)['TCON'].genres, ['Pop']) |
def get_extensions():
extension = CppExtension
extra_link_args = []
extra_compile_args = {'cxx': ['-O3', '-std=c++17', '-fdiagnostics-color=always']}
debug_mode = (os.getenv('DEBUG', '0') == '1')
if debug_mode:
print('Compiling in debug mode')
extra_compile_args = {'cxx': ['-O0', '-fno-inline', '-g', '-std=c++17', '-fdiagnostics-color=always']}
extra_link_args = ['-O0', '-g']
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, 'tensordict', 'csrc')
extension_sources = {os.path.join(extensions_dir, p) for p in glob.glob(os.path.join(extensions_dir, '*.cpp'))}
sources = list(extension_sources)
ext_modules = [extension('tensordict._tensordict', sources, include_dirs=[this_dir], extra_compile_args=extra_compile_args, extra_link_args=extra_link_args)]
return ext_modules |
class _RoIPooling(Module):
def __init__(self, pooled_height, pooled_width, spatial_scale):
super(_RoIPooling, self).__init__()
self.pooled_width = int(pooled_width)
self.pooled_height = int(pooled_height)
self.spatial_scale = float(spatial_scale)
def forward(self, features, rois):
return RoIPoolFunction(self.pooled_height, self.pooled_width, self.spatial_scale)(features, rois) |
def command_dlow(command, args):
def setup(parser):
add_source_options(parser)
add_double_options(parser)
add_filter_options(parser)
parser.remove_option('--highpass')
parser.remove_option('--highpass_rel')
parser.set_defaults(rel_lowpass_frequency=0.25)
(parser, opts, args) = cl_parse(command, args, setup=setup)
if (opts['lowpass_frequency'] is not None):
opts['rel_lowpass_frequency'] = None
(dir1, dir2, smin, smax) = verify_arguements('dlow', 4, args)
opts['rel_highpass_frequency'] = None
out_filename = opts.pop('output')
gfts = gftest.runComparissonStandardCheck(dir1, dir2, smin, smax, **opts)
if (out_filename is not None):
return (gfts, out_filename) |
class GdbClick(sublime_plugin.TextCommand):
def run(self, edit):
if (not is_running()):
return
(row, col) = self.view.rowcol(self.view.sel()[0].a)
if (gdb_variables_view.is_open() and (self.view.id() == gdb_variables_view.get_view().id())):
gdb_variables_view.expand_collapse_variable(self.view, toggle=True)
elif (gdb_callstack_view.is_open() and (self.view.id() == gdb_callstack_view.get_view().id())):
gdb_callstack_view.select(row)
elif (gdb_threads_view.is_open() and (self.view.id() == gdb_threads_view.get_view().id())):
gdb_threads_view.select(row)
update_cursor()
def is_enabled(self):
return is_running() |
('mmocr.utils.ocr.init_detector')
('mmocr.utils.ocr.build_detector')
('mmocr.utils.ocr.Config.fromfile')
('mmocr.utils.ocr.load_checkpoint')
('mmocr.utils.ocr.model_inference')
def test_single_inference(mock_model_inference, mock_loading, mock_config, mock_build_detector, mock_init_detector):
def dummy_inference(model, arr, batch_mode):
return arr
mock_model_inference.side_effect = dummy_inference
mmocr = MMOCR()
data = list(range(20))
model = 'dummy'
res = mmocr.single_inference(model, data, batch_mode=False)
assert (data == res)
mock_model_inference.reset_mock()
res = mmocr.single_inference(model, data, batch_mode=True)
assert (data == res)
mock_model_inference.assert_called_once()
mock_model_inference.reset_mock()
res = mmocr.single_inference(model, data, batch_mode=True, batch_size=100)
assert (data == res)
mock_model_inference.assert_called_once()
mock_model_inference.reset_mock()
res = mmocr.single_inference(model, data, batch_mode=True, batch_size=3)
assert (data == res) |
def _recall_update(input: torch.Tensor, target: torch.Tensor, num_classes: Optional[int], average: Optional[str]) -> Tuple[(torch.Tensor, torch.Tensor, torch.Tensor)]:
_recall_update_input_check(input, target, num_classes)
if (input.ndim == 2):
input = torch.argmax(input, dim=1)
if (average == 'micro'):
num_tp = (input == target).sum()
num_labels = target.new_tensor(target.numel())
num_predictions = num_labels
return (num_tp, num_labels, num_predictions)
assert isinstance(num_classes, int), f'`num_classes` must be an integer, but received {num_classes}.'
num_labels = target.new_zeros(num_classes).scatter_(0, target, 1, reduce='add')
num_predictions = target.new_zeros(num_classes).scatter_(0, input, 1, reduce='add')
num_tp = target.new_zeros(num_classes).scatter_(0, target[(input == target)], 1, reduce='add')
return (num_tp, num_labels, num_predictions) |
class ParallelReadConcat(IterDataPipe):
def __init__(self, *datapipes: IterDataPipe, dp_selector: Callable[([Sequence[IterDataPipe]], Sequence[IterDataPipe])]=_default_dp_selector) -> None:
super().__init__()
self.datapipes: Tuple[(IterDataPipe, ...)] = datapipes
self.dp_selector = dp_selector
def __iter__(self) -> Iterator[Any]:
selected_dps = self.dp_selector(self.datapipes)
for dp in selected_dps:
for data in dp:
(yield data) |
def test_show_dynamic(hatch, temp_dir):
project_name = 'My.App'
with temp_dir.as_cwd():
hatch('new', project_name)
path = (temp_dir / 'my-app')
with path.as_cwd():
result = hatch('version')
assert (result.exit_code == 0), result.output
assert (result.output == '0.0.1\n') |
('torch.__version__', torch_version)
.parametrize('in_w,in_h,in_feature,out_feature', [(10, 10, 1, 1), (20, 20, 3, 3)])
def test_linear(in_w, in_h, in_feature, out_feature):
x_empty = torch.randn(0, in_feature, requires_grad=True)
torch.manual_seed(0)
wrapper = Linear(in_feature, out_feature)
wrapper_out = wrapper(x_empty)
x_normal = torch.randn(3, in_feature)
torch.manual_seed(0)
ref = nn.Linear(in_feature, out_feature)
ref_out = ref(x_normal)
assert (wrapper_out.shape[0] == 0)
assert (wrapper_out.shape[1:] == ref_out.shape[1:])
wrapper_out.sum().backward()
assert (wrapper.weight.grad is not None)
assert (wrapper.weight.grad.shape == wrapper.weight.shape)
assert torch.equal(wrapper(x_normal), ref_out)
x_empty = torch.randn(0, in_feature)
wrapper = Linear(in_feature, out_feature)
wrapper.eval()
wrapper(x_empty) |
def main(argv):
(parser, subparsers) = setup_args()
for c in codecs:
cparser = subparsers.add_parser(c.__name__.lower(), help=f'{c.__name__}')
setup_common_args(cparser)
c.setup_args(cparser)
args = parser.parse_args(argv)
codec_cls = next((c for c in codecs if (c.__name__.lower() == args.codec)))
codec = codec_cls(args)
results = collect(codec, args.dataset, args.qps, args.metrics, args.num_jobs)
output = {'name': codec.name, 'description': codec.description, 'results': results}
print(json.dumps(output, indent=2)) |
_torch
_vision
class EfficientFormerModelIntegrationTest(unittest.TestCase):
_property
def default_feature_extractor(self):
return (EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300') if is_vision_available() else None)
def test_inference_image_classification_head(self):
model = EfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300').to(torch_device)
feature_extractor = self.default_feature_extractor
image = prepare_img()
inputs = feature_extractor(images=image, return_tensors='pt').to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
expected_shape = (1, 1000)
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([(- 0.0555), 0.4825, (- 0.0852)]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0][:3], expected_slice, atol=0.0001))
def test_inference_image_classification_head_with_teacher(self):
model = EfficientFormerForImageClassificationWithTeacher.from_pretrained('snap-research/efficientformer-l1-300').to(torch_device)
feature_extractor = self.default_feature_extractor
image = prepare_img()
inputs = feature_extractor(images=image, return_tensors='pt').to(torch_device)
with torch.no_grad():
outputs = model(**inputs)
expected_shape = (1, 1000)
self.assertEqual(outputs.logits.shape, expected_shape)
expected_slice = torch.tensor([(- 0.1312), 0.4353, (- 1.0499)]).to(torch_device)
self.assertTrue(torch.allclose(outputs.logits[0][:3], expected_slice, atol=0.0001)) |
def parse_handshake(handshake):
reader = StreamReader()
reader.feed_data(handshake)
parser = Request.parse(reader.read_line)
try:
next(parser)
except StopIteration:
pass
else:
assert False, 'parser should return request'
reader.feed_eof()
assert reader.at_eof(), 'parser should consume all data' |
.parametrize('dm', [partial(qutip.thermal_dm, n=1.0), qutip.maximally_mixed_dm, partial(qutip.coherent_dm, alpha=0.5), partial(qutip.fock_dm, n=1), partial(qutip.spin_state, m=2, type='dm'), partial(qutip.spin_coherent, theta=1, phi=2, type='dm')], ids=['thermal_dm', 'maximally_mixed_dm', 'coherent_dm', 'fock_dm', 'spin_state', 'spin_coherent'])
def test_dm(dm):
N = 5
rho = dm(N)
assert (rho.tr() == pytest.approx(1.0)) |
def test_js_quirks_match_files(webengine_tab):
quirks_path = ((pathlib.Path(qutebrowser.__file__).parent / 'javascript') / 'quirks')
suffix = '.user.js'
quirks_files = {p.name[:(- len(suffix))] for p in quirks_path.glob(f'*{suffix}')}
quirks_code = {q.filename for q in webengine_tab._scripts._get_quirks()}
assert (quirks_code == quirks_files) |
class EmailTest(object):
def assert_bad_email(self, validator, value, msg=None):
msg = (msg or '{0} is not a valid email')
with pytest.raises(ValueError) as cm:
validator(value)
assert (str(cm.value) == msg.format(value))
.parametrize('value', ['', '', 'coucou+', '', '-with-hyphens.com', '.com', '.example.com', 'Loic.Accentue.fr'])
def test_valid_value_default(self, value):
validator = inputs.email()
assert (validator(value) == value)
.parametrize('value', ['', '.0.0.1', '.1.2.3', '::1', '.8.9.10', ':db8:85a3::8a2e:370:7334', ('?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?' + '.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?.?')])
def test_invalid_value_default(self, value):
self.assert_bad_email(inputs.email(), value)
.parametrize('value', ['', ''])
def test_valid_value_check(self, value):
email = inputs.email(check=True)
assert (email(value) == value)
.parametrize('value', ['-found.fr', '', '.0.0.1', '.1.2.3', '::1', '.8.9.10', ':db8:85a3::8a2e:370:7334'])
def test_invalid_values_check(self, value):
email = inputs.email(check=True)
self.assert_bad_email(email, value)
.parametrize('value', ['', '', 'coucou+', '', '-with-hyphens.com', '.com', '.8.9.10', ':db8:85a3::8a2e:370:7334'])
def test_valid_value_ip(self, value):
email = inputs.email(ip=True)
assert (email(value) == value)
.parametrize('value', ['', '.0.0.1', '.1.2.3', '::1'])
def test_invalid_value_ip(self, value):
email = inputs.email(ip=True)
self.assert_bad_email(email, value)
.parametrize('value', ['', '', 'coucou+', '', '', '-with-hyphens.com', '.com', ''])
def test_valid_value_local(self, value):
email = inputs.email(local=True)
assert (email(value) == value)
.parametrize('value', ['.0.0.1', '.1.2.3', '::1', '.8.9.10', ':db8:85a3::8a2e:370:7334'])
def test_invalid_value_local(self, value):
email = inputs.email(local=True)
self.assert_bad_email(email, value)
.parametrize('value', ['', '', 'coucou+', '', '', '-with-hyphens.com', '.com', '.8.9.10', ':db8:85a3::8a2e:370:7334', '', '.0.0.1', '.1.2.3', '::1'])
def test_valid_value_ip_and_local(self, value):
email = inputs.email(ip=True, local=True)
assert (email(value) == value)
.parametrize('value', ['', '', 'coucou+', ''])
def test_valid_value_domains(self, value):
email = inputs.email(domains=('gmail.com', 'cmoi.fr'))
assert (email(value) == value)
.parametrize('value', ['-with-hyphens.com', '.com', '', '.0.0.1', '.1.2.3', '::1', '.8.9.10', ':db8:85a3::8a2e:370:7334'])
def test_invalid_value_domains(self, value):
email = inputs.email(domains=('gmail.com', 'cmoi.fr'))
self.assert_bad_email(email, value, '{0} does not belong to the authorized domains')
.parametrize('value', ['', '', 'coucou+', ''])
def test_valid_value_exclude(self, value):
email = inputs.email(exclude=('somewhere.com', 'foo.bar'))
assert (email(value) == value)
.parametrize('value', ['', ''])
def test_invalid_value_exclude(self, value):
email = inputs.email(exclude=('somewhere.com', 'foo.bar'))
self.assert_bad_email(email, value, '{0} belongs to a forbidden domain')
.parametrize('value', ['', '', 'email.somewhere.com', '[invalid!email]', 'me.', 'me..'])
def test_bad_email(self, value):
email = inputs.email()
self.assert_bad_email(email, value)
def test_schema(self):
assert (inputs.email().__schema__ == {'type': 'string', 'format': 'email'}) |
def _get_sequence(exons, genome, strand='+'):
seq = []
pos = []
for exon in exons:
seq.append(genome[exon[0]:exon[1]])
pos.extend(range(exon[0], exon[1]))
if (strand == '-'):
return (rev_complement(''.join(seq)), pos)
else:
return (''.join(seq), pos) |
class RegNetParams():
def __init__(self, depth: int, w_0: int, w_a: float, w_m: float, group_w: int, stem_type: StemType='SIMPLE_STEM_IN', stem_width: int=32, block_type: BlockType='RES_BOTTLENECK_BLOCK', activation_type: ActivationType='RELU', use_se: bool=True, se_ratio: float=0.25, bn_epsilon: float=1e-05, bn_momentum: bool=0.1):
assert ((w_a >= 0) and (w_0 > 0) and (w_m > 1) and ((w_0 % 8) == 0)), 'Invalid RegNet settings'
self.depth = depth
self.w_0 = w_0
self.w_a = w_a
self.w_m = w_m
self.group_w = group_w
self.stem_type = StemType[stem_type]
self.block_type = BlockType[block_type]
self.activation_type = ActivationType[activation_type]
self.stem_width = stem_width
self.use_se = use_se
self.se_ratio = (se_ratio if use_se else None)
self.bn_epsilon = bn_epsilon
self.bn_momentum = bn_momentum
self.relu_in_place = RELU_IN_PLACE
def get_expanded_params(self):
QUANT = 8
STRIDE = 2
BOT_MUL = 1.0
widths_cont = ((np.arange(self.depth) * self.w_a) + self.w_0)
block_capacity = np.round((np.log((widths_cont / self.w_0)) / np.log(self.w_m)))
block_widths = (np.round(np.divide((self.w_0 * np.power(self.w_m, block_capacity)), QUANT)) * QUANT)
num_stages = len(np.unique(block_widths))
block_widths = block_widths.astype(int).tolist()
split_helper = zip((block_widths + [0]), ([0] + block_widths), (block_widths + [0]), ([0] + block_widths))
splits = [((w != wp) or (r != rp)) for (w, wp, r, rp) in split_helper]
stage_widths = [w for (w, t) in zip(block_widths, splits[:(- 1)]) if t]
stage_depths = np.diff([d for (d, t) in enumerate(splits) if t]).tolist()
strides = ([STRIDE] * num_stages)
bot_muls = ([BOT_MUL] * num_stages)
group_widths = ([self.group_w] * num_stages)
(stage_widths, group_widths) = _adjust_widths_groups_compatibilty(stage_widths, bot_muls, group_widths)
return zip(stage_widths, strides, stage_depths, bot_muls, group_widths) |
_rewriter([BetaBinomialRV])
def beta_binomial_from_beta_binomial(fgraph, node):
(rng, *other_inputs, n, a, b) = node.inputs
(n, a, b) = broadcast_arrays(n, a, b)
(next_rng, b) = beta.make_node(rng, *other_inputs, a, b).outputs
(next_rng, b) = binomial.make_node(next_rng, *other_inputs, n, b).outputs
return [next_rng, b] |
('pyinaturalist.auth._get_jwt', return_value=NOT_CACHED_RESPONSE)
def test_get_access_token__invalid_creds(mock_get_jwt, requests_mock):
requests_mock.post(f'{API_V0}/oauth/token', json=token_rejected_json, status_code=401)
with pytest.raises(HTTPError):
get_access_token('username', 'password', 'app_id', 'app_secret') |
class Translations(NullTranslations, gettext.GNUTranslations):
DEFAULT_DOMAIN = 'messages'
def __init__(self, fp: (gettext._TranslationsReader | None)=None, domain: (str | None)=None):
super().__init__(fp=fp)
self.domain = (domain or self.DEFAULT_DOMAIN)
ugettext = gettext.GNUTranslations.gettext
ungettext = gettext.GNUTranslations.ngettext
def load(cls, dirname: ((str | os.PathLike[str]) | None)=None, locales: (((Iterable[(str | Locale)] | str) | Locale) | None)=None, domain: (str | None)=None) -> NullTranslations:
if (not domain):
domain = cls.DEFAULT_DOMAIN
filename = gettext.find(domain, dirname, _locales_to_names(locales))
if (not filename):
return NullTranslations()
with open(filename, 'rb') as fp:
return cls(fp=fp, domain=domain)
def __repr__(self) -> str:
version = self._info.get('project-id-version')
return f'<{type(self).__name__}: "{version}">'
def add(self, translations: Translations, merge: bool=True):
domain = getattr(translations, 'domain', self.DEFAULT_DOMAIN)
if (merge and (domain == self.domain)):
return self.merge(translations)
existing = self._domains.get(domain)
if (merge and isinstance(existing, Translations)):
existing.merge(translations)
else:
translations.add_fallback(self)
self._domains[domain] = translations
return self
def merge(self, translations: Translations):
if isinstance(translations, gettext.GNUTranslations):
self._catalog.update(translations._catalog)
if isinstance(translations, Translations):
self.files.extend(translations.files)
return self |
class LoginActionTest(BaseActionTest):
def test_login(self):
self.do_login()
def test_login_with_partial_pipeline(self):
self.do_login_with_partial_pipeline()
def test_fields_stored_in_session(self):
self.strategy.set_settings({'SOCIAL_AUTH_FIELDS_STORED_IN_SESSION': ['foo', 'bar']})
self.strategy.set_request_data({'foo': '1', 'bar': '2'}, self.backend)
self.do_login()
self.assertEqual(self.strategy.session_get('foo'), '1')
self.assertEqual(self.strategy.session_get('bar'), '2')
self._logout(self.backend)
self.assertEqual(self.strategy.session_get('foo'), '1')
self.assertEqual(self.strategy.session_get('bar'), '2')
self.strategy.remove_from_request_data('bar')
self.strategy.set_request_data({'foo': '3'}, self.backend)
self.do_login()
self.assertEqual(self.strategy.session_get('foo'), '3')
self.assertEqual(self.strategy.session_get('bar'), None)
def test_redirect_value(self):
self.strategy.set_request_data({'next': '/after-login'}, self.backend)
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/after-login')
def test_redirect_value_set_by_backend(self):
self.backend = BackendThatControlsRedirect(self.strategy)
self.user = TestUserSocialAuth.create_user('test-user')
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/after-login')
def test_login_with_invalid_partial_pipeline(self):
def before_complete():
partial_token = self.strategy.session_get(PARTIAL_TOKEN_SESSION_NAME)
partial = self.strategy.storage.partial.load(partial_token)
partial.data['backend'] = 'foobar'
self.do_login_with_partial_pipeline(before_complete)
def test_new_user(self):
self.strategy.set_settings({'SOCIAL_AUTH_NEW_USER_REDIRECT_URL': '/new-user'})
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/new-user')
def test_inactive_user(self):
self.strategy.set_settings({'SOCIAL_AUTH_INACTIVE_USER_URL': '/inactive'})
User.set_active(False)
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/inactive')
def test_invalid_user(self):
self.strategy.set_settings({'SOCIAL_AUTH_LOGIN_ERROR_URL': '/error', 'SOCIAL_AUTH_PIPELINE': ('social_core.pipeline.social_auth.social_details', 'social_core.pipeline.social_auth.social_uid', 'social_core.pipeline.social_auth.auth_allowed', 'social_core.pipeline.social_auth.social_user', 'social_core.pipeline.user.get_username', 'social_core.pipeline.user.create_user', 'social_core.pipeline.social_auth.associate_user', 'social_core.pipeline.social_auth.load_extra_data', 'social_core.pipeline.user.user_details', 'social_core.tests.pipeline.remove_user')})
redirect = self.do_login(after_complete_checks=False)
self.assertEqual(redirect.url, '/error') |
.parametrize('value', [np.nan, np.inf])
.filterwarnings('ignore:Cannot cache compiled function "numba_funcified_fgraph"')
def test_solve_triangular_raises_on_nan_inf(value):
A = pt.matrix('A')
b = pt.matrix('b')
X = pt.linalg.solve_triangular(A, b, check_finite=True)
f = pytensor.function([A, b], X, mode='NUMBA')
A_val = np.random.normal(size=(5, 5))
A_sym = (A_val A_val.conj().T)
A_tri = np.linalg.cholesky(A_sym).astype(config.floatX)
b = np.full((5, 1), value)
with pytest.raises(ValueError, match=re.escape('Non-numeric values (nan or inf) returned ')):
f(A_tri, b) |
class TestTrainingExtensionsWeightSvdCostCalculator(unittest.TestCase):
def test_calculate_weight_svd_cost(self):
conv = nn.Conv2d(32, 64, kernel_size=5, padding=(2, 2))
layer = Layer(conv, 'conv', output_shape=[1, 64, 28, 28])
self.assertEqual(32, cc.WeightSvdCostCalculator.calculate_max_rank(layer))
comp_ratios_to_check = [0.8, 0.75, 0.5, 0.25, 0.125]
original_cost = cc.CostCalculator.compute_layer_cost(layer)
for comp_ratio in comp_ratios_to_check:
rank = cc.WeightSvdCostCalculator.calculate_rank_given_comp_ratio(layer, comp_ratio, CostMetric.mac)
print('Rank = {}, for compression_ratio={}'.format(rank, comp_ratio))
compressed_cost = cc.WeightSvdCostCalculator.calculate_cost_given_rank(layer, rank)
print('Compressed cost={}, compression_ratio={}'.format(compressed_cost, (compressed_cost.mac / original_cost.mac)))
self.assertTrue(math.isclose((compressed_cost.mac / original_cost.mac), comp_ratio, abs_tol=0.03))
for comp_ratio in comp_ratios_to_check:
compressed_cost = cc.WeightSvdCostCalculator.calculate_per_layer_compressed_cost(layer, comp_ratio, CostMetric.mac)
self.assertTrue(math.isclose((compressed_cost.mac / original_cost.mac), comp_ratio, abs_tol=0.03))
def test_calculate_weight_svd_cost_all_layers(self):
model = mnist_torch_model.Net().to('cpu')
print(model)
input_shape = (1, 1, 28, 28)
dummy_input = create_rand_tensors_given_shapes(input_shape, get_device(model))
layer_db = LayerDatabase(model, dummy_input)
layer_ratio_list = []
for layer in layer_db:
if isinstance(layer.module, nn.Conv2d):
layer_ratio_list.append(LayerCompRatioPair(layer, Decimal('0.5')))
else:
layer_ratio_list.append(LayerCompRatioPair(layer, Decimal('0.5')))
compressed_cost = cc.WeightSvdCostCalculator.calculate_compressed_cost(layer_db, layer_ratio_list, CostMetric.mac)
self.assertEqual(7031800, compressed_cost.mac) |
def add_arguments(parser):
parser.description = 'Python Language Server'
parser.add_argument('--tcp', action='store_true', help='Use TCP server instead of stdio')
parser.add_argument('--ws', action='store_true', help='Use Web Sockets server instead of stdio')
parser.add_argument('--host', default='127.0.0.1', help='Bind to this address')
parser.add_argument('--port', type=int, default=2087, help='Bind to this port')
parser.add_argument('--check-parent-process', action='store_true', help='Check whether parent process is still alive using os.kill(ppid, 0) and auto shut down language server process when parent process is not alive.Note that this may not work on a Windows machine.')
log_group = parser.add_mutually_exclusive_group()
log_group.add_argument('--log-config', help='Path to a JSON file containing Python logging config.')
log_group.add_argument('--log-file', help='Redirect logs to the given file instead of writing to stderr.Has no effect if used with --log-config.')
parser.add_argument('-v', '--verbose', action='count', default=0, help='Increase verbosity of log output, overrides log config file')
parser.add_argument('-V', '--version', action='version', version=('%(prog)s v' + __version__)) |
def _makeTags(tagStr, xml, suppress_LT=Suppress('<'), suppress_GT=Suppress('>')):
if isinstance(tagStr, str_type):
resname = tagStr
tagStr = Keyword(tagStr, caseless=(not xml))
else:
resname = tagStr.name
tagAttrName = Word(alphas, (alphanums + '_-:'))
if xml:
tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)
openTag = ((((suppress_LT + tagStr('tag')) + Dict(ZeroOrMore(Group(((tagAttrName + Suppress('=')) + tagAttrValue))))) + Optional('/', default=[False])('empty').setParseAction((lambda s, l, t: (t[0] == '/')))) + suppress_GT)
else:
tagAttrValue = (quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars='>'))
openTag = ((((suppress_LT + tagStr('tag')) + Dict(ZeroOrMore(Group((tagAttrName.setParseAction((lambda t: t[0].lower())) + Optional((Suppress('=') + tagAttrValue))))))) + Optional('/', default=[False])('empty').setParseAction((lambda s, l, t: (t[0] == '/')))) + suppress_GT)
closeTag = Combine(((_L('</') + tagStr) + '>'), adjacent=False)
openTag.setName(('<%s>' % resname))
openTag.addParseAction((lambda t: t.__setitem__(('start' + ''.join(resname.replace(':', ' ').title().split())), t.copy())))
closeTag = closeTag(('end' + ''.join(resname.replace(':', ' ').title().split()))).setName(('</%s>' % resname))
openTag.tag = resname
closeTag.tag = resname
openTag.tag_body = SkipTo(closeTag())
return (openTag, closeTag) |
class Solution(object):
def networkDelayTime(self, times, N, K):
graph = collections.defaultdict(list)
for (u, v, w) in times:
graph[u].append((v, w))
dist = {node: float('inf') for node in xrange(1, (N + 1))}
seen = ([False] * (N + 1))
dist[K] = 0
while True:
cand_node = (- 1)
cand_dist = float('inf')
for i in xrange(1, (N + 1)):
if ((not seen[i]) and (dist[i] < cand_dist)):
cand_dist = dist[i]
cand_node = i
if (cand_node < 0):
break
seen[cand_node] = True
for (nei, d) in graph[cand_node]:
dist[nei] = min(dist[nei], (dist[cand_node] + d))
ans = max(dist.values())
return (ans if (ans < float('inf')) else (- 1)) |
def resolve_dns_srv(host: str):
srv_records = dns.resolver.query(host, 'SRV')
srv_records = sorted(srv_records, key=(lambda x: (x.priority, (- x.weight))))
def dict_from_srv_record(srv):
return {'host': str(srv.target), 'port': srv.port}
return [dict_from_srv_record(srv) for srv in srv_records] |
def test_private_is_deprecated() -> None:
class PrivateInit():
def __init__(self, foo: int, *, _ispytest: bool=False) -> None:
deprecated.check_ispytest(_ispytest)
with pytest.warns(pytest.PytestDeprecationWarning, match='private pytest class or function'):
PrivateInit(10)
PrivateInit(10, _ispytest=True) |
class PyObjectToTextual():
def __init__(self, project):
self.project = project
def transform(self, pyobject):
if (pyobject is None):
return ('none',)
object_type = type(pyobject)
try:
method = getattr(self, (object_type.__name__ + '_to_textual'))
return method(pyobject)
except AttributeError:
return ('unknown',)
def __call__(self, pyobject):
return self.transform(pyobject)
def PyObject_to_textual(self, pyobject):
if isinstance(pyobject.get_type(), rope.base.pyobjects.AbstractClass):
result = self.transform(pyobject.get_type())
if (result[0] == 'defined'):
return ('instance', result)
return result
return ('unknown',)
def PyFunction_to_textual(self, pyobject):
return self._defined_to_textual(pyobject)
def PyClass_to_textual(self, pyobject):
return self._defined_to_textual(pyobject)
def _defined_to_textual(self, pyobject):
address = []
while (pyobject.parent is not None):
address.insert(0, pyobject.get_name())
pyobject = pyobject.parent
return ('defined', self._get_pymodule_path(pyobject.get_module()), '.'.join(address))
def PyModule_to_textual(self, pyobject):
return ('defined', self._get_pymodule_path(pyobject))
def PyPackage_to_textual(self, pyobject):
return ('defined', self._get_pymodule_path(pyobject))
def List_to_textual(self, pyobject):
return ('builtin', 'list', self.transform(pyobject.holding))
def Dict_to_textual(self, pyobject):
return ('builtin', 'dict', self.transform(pyobject.keys), self.transform(pyobject.values))
def Tuple_to_textual(self, pyobject):
objects = [self.transform(holding) for holding in pyobject.get_holding_objects()]
return tuple((['builtin', 'tuple'] + objects))
def Set_to_textual(self, pyobject):
return ('builtin', 'set', self.transform(pyobject.holding))
def Iterator_to_textual(self, pyobject):
return ('builtin', 'iter', self.transform(pyobject.holding))
def Generator_to_textual(self, pyobject):
return ('builtin', 'generator', self.transform(pyobject.holding))
def Str_to_textual(self, pyobject):
return ('builtin', 'str')
def File_to_textual(self, pyobject):
return ('builtin', 'file')
def BuiltinFunction_to_textual(self, pyobject):
return ('builtin', 'function', pyobject.get_name())
def _get_pymodule_path(self, pymodule):
return self.resource_to_path(pymodule.get_resource())
def resource_to_path(self, resource):
if (resource.project == self.project):
return resource.path
else:
return resource.real_path |
class TrainerMemoryTracker():
stages = {'__init__': 'init', 'train': 'train', '_inner_training_loop': 'train', 'evaluate': 'eval', 'predict': 'test'}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if (not is_psutil_available()):
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if (caller in self.stages):
return self.stages[caller]
else:
raise ValueError(f'was called from {caller}, but only expect to be called from one of {self.stages.keys()}')
def cpu_mem_used(self):
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = (- 1)
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
if (not self.peak_monitoring):
break
def start(self):
if self.skip_memory_metrics:
return
stage = self.derive_stage()
if ((self.cur_stage is not None) and (self.cur_stage != stage)):
return
self.cur_stage = stage
gc.collect()
if (self.torch is not None):
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
if (self.torch is not None):
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
if ((self.cur_stage is not None) and (self.cur_stage != stage)):
return
self.peak_monitoring = False
gc.collect()
if (self.torch is not None):
self.torch.cuda.empty_cache()
if (self.torch is not None):
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(begin=self.gpu_mem_used_at_start, end=self.gpu_mem_used_now, alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start), peaked=max(0, (self.gpu_mem_used_peak - self.gpu_mem_used_now)))
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(begin=self.cpu_mem_used_at_start, end=self.cpu_mem_used_now, alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start), peaked=max(0, (self.cpu_mem_used_peak - self.cpu_mem_used_now)))
self.cur_stage = None
def update_metrics(self, stage, metrics):
if self.skip_memory_metrics:
return
if ((self.cur_stage is not None) and (self.cur_stage != stage)):
return
stages = [stage]
if (not self.init_reported):
stages.insert(0, 'init')
self.init_reported = True
for stage in stages:
for t in ['alloc', 'peaked']:
if ((stage in self.cpu) and (t in self.cpu[stage])):
metrics[f'{stage}_mem_cpu_{t}_delta'] = self.cpu[stage][t]
if ((self.torch is not None) and (stage in self.gpu) and (t in self.gpu[stage])):
metrics[f'{stage}_mem_gpu_{t}_delta'] = self.gpu[stage][t]
if (stages[0] == 'init'):
metrics['before_init_mem_cpu'] = self.cpu['init']['begin']
if (self.torch is not None):
metrics['before_init_mem_gpu'] = self.gpu['init']['begin']
def stop_and_update_metrics(self, metrics=None):
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
if (metrics is not None):
self.update_metrics(stage, metrics) |
('rendered_page_break.preceding_paragraph_fragment is the content before break')
def then_rendered_page_break_preceding_paragraph_fragment_is_the_content_before_break(context: Context):
para_frag = context.rendered_page_break.preceding_paragraph_fragment
actual_value = type(para_frag).__name__
expected_value = 'Paragraph'
assert (actual_value == expected_value), f"expected: '{expected_value}', got: '{actual_value}'"
actual_value = para_frag.text
expected_value = 'Page break here>>'
assert (actual_value == expected_value), f"expected: '{expected_value}', got: '{actual_value}'"
actual_value = para_frag.alignment
expected_value = WD_PARAGRAPH_ALIGNMENT.CENTER
assert (actual_value == expected_value), f"expected: '{expected_value}', got: '{actual_value}'"
actual_value = para_frag.runs[0].style.name
expected_value = 'Default Paragraph Font'
assert (actual_value == expected_value), f"expected: '{expected_value}', got: '{actual_value}'" |
def test_typeshed(args: TestConfig, tempdir: Path) -> TestSummary:
print(f'*** Testing Python {args.version} on {args.platform}')
(stdlib_dir, stubs_dir) = (Path('stdlib'), Path('stubs'))
summary = TestSummary()
if ((stdlib_dir in args.filter) or any(((stdlib_dir in path.parents) for path in args.filter))):
(mypy_result, files_checked) = test_stdlib(args)
summary.register_result(mypy_result, files_checked)
print()
if ((stubs_dir in args.filter) or any(((stubs_dir in path.parents) for path in args.filter))):
tp_results = test_third_party_stubs(args, tempdir)
summary.merge(tp_results)
print()
return summary |
def test_path_warning(pipx_temp_env, capsys, monkeypatch, caplog):
assert (not run_pipx_cli(['install', 'pycowsay']))
assert ('is not on your PATH environment variable' not in unwrap_log_text(caplog.text))
monkeypatch.setenv('PATH', '')
assert (not run_pipx_cli(['install', 'pycowsay', '--force']))
assert ('is not on your PATH environment variable' in unwrap_log_text(caplog.text)) |
def call_api(input_json: Dict[(str, Any)], api_key) -> Dict[(str, Any)]:
headers = {'X-API-Key': api_key, 'Content-Type': 'application/json'}
url = '
response = requests.post(url, headers=headers)
if (response.status_code == 200):
return response.json()
else:
return {'status_code': response.status_code, 'text': response.text} |
def valid_int(s, min_value=None, max_value=None):
if (s is None):
return (False, 'cannot is None')
if (not isinstance(s, str)):
return (False, 'must a string value')
s = int(s)
if ((max_value is not None) and (s > max_value)):
return (False, ('%d must less than %d' % (s, max_value)))
if ((min_value is not None) and (s < min_value)):
return (False, ('%d must greater than %d' % (s, min_value)))
return (True, s) |
def get_module_inp_acts(module: torch.nn.Module, model: torch.nn.Module, params: SeqMseParams, forward_fn: Callable, cached_dataset: CachedDataset) -> torch.Tensor:
inp_acts = []
def hook_fn(_, inp, __):
if isinstance(inp, tuple):
inp_acts.append(inp[0])
raise StopForwardException
handle = module.register_forward_hook(hook_fn)
iterator = iter(cached_dataset)
for _ in range(params.num_batches):
batch = change_tensor_device_placement(next(iterator), get_device(model))
try:
with in_eval_mode(model), torch.no_grad():
forward_fn(model, batch)
except StopForwardException:
pass
handle.remove()
inp_acts = torch.stack(inp_acts)
return inp_acts |
def init_state(model, state_in, state_out, dt):
wp.launch(kernel=integrate_particles, dim=model.particle_count, inputs=[state_in.particle_q, state_in.particle_qd, state_in.particle_f, model.particle_inv_mass, model.gravity, dt], outputs=[state_out.particle_q, state_out.particle_qd], device=model.device) |
def factor(n):
isqrt = getattr(math, 'isqrt', (lambda x: int(math.sqrt(x))))
for prime in sieve((isqrt(n) + 1)):
while True:
(quotient, remainder) = divmod(n, prime)
if remainder:
break
(yield prime)
n = quotient
if (n == 1):
return
if (n >= 2):
(yield n) |
_SAMPLERS.register_module()
class CustomGroupMultiSourceSampler(GroupMultiSourceSampler):
def _get_source_group_info(self) -> None:
num_sources = len(self.num_per_source)
self.group2size_per_source = [{0: 0, 1: 0} for _ in range(num_sources)]
self.group2inds_per_source = [{0: [], 1: []} for _ in range(num_sources)]
for (source, dataset) in enumerate(self.dataset.datasets):
for idx in range(len(dataset)):
data_info = dataset.get_data_info(idx)
(width, height) = (data_info['width'], data_info['height'])
group = (0 if (width < height) else 1)
self.group2size_per_source[source][group] += 1
self.group2inds_per_source[source][group].append(idx)
self.group_sizes = np.zeros(2, dtype=np.int64)
for group2size in self.group2size_per_source:
for (group, size) in group2size.items():
self.group_sizes[group] += size
self.group_ratio = (self.group_sizes / sum(self.group_sizes)) |
def try_accept_invite(code, user):
(team, inviter) = model.team.confirm_team_invite(code, user)
model.notification.delete_matching_notifications(user, 'org_team_invite', org=team.organization.username)
orgname = team.organization.username
log_action('org_team_member_invite_accepted', orgname, {'member': user.username, 'team': team.name, 'inviter': inviter.username})
return team |
def test_wandb_hook():
sys.modules['wandb'] = MagicMock()
runner = _build_demo_runner()
hook = WandbLoggerHook()
loader = DataLoader(torch.ones((5, 2)))
runner.register_hook(hook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
hook.wandb.init.assert_called_with()
hook.wandb.log.assert_called_with({'learning_rate': 0.02, 'momentum': 0.95}, step=6, commit=True)
hook.wandb.join.assert_called_with() |
_REGISTRY.register()
class VideoTestDUFDataset(VideoTestDataset):
def __getitem__(self, index):
folder = self.data_info['folder'][index]
(idx, max_idx) = self.data_info['idx'][index].split('/')
(idx, max_idx) = (int(idx), int(max_idx))
border = self.data_info['border'][index]
lq_path = self.data_info['lq_path'][index]
select_idx = generate_frame_indices(idx, max_idx, self.opt['num_frame'], padding=self.opt['padding'])
if self.cache_data:
if self.opt['use_duf_downsampling']:
imgs_lq = self.imgs_gt[folder].index_select(0, torch.LongTensor(select_idx))
imgs_lq = duf_downsample(imgs_lq, kernel_size=13, scale=self.opt['scale'])
else:
imgs_lq = self.imgs_lq[folder].index_select(0, torch.LongTensor(select_idx))
img_gt = self.imgs_gt[folder][idx]
else:
if self.opt['use_duf_downsampling']:
img_paths_lq = [self.imgs_gt[folder][i] for i in select_idx]
imgs_lq = read_img_seq(img_paths_lq, require_mod_crop=True, scale=self.opt['scale'])
imgs_lq = duf_downsample(imgs_lq, kernel_size=13, scale=self.opt['scale'])
else:
img_paths_lq = [self.imgs_lq[folder][i] for i in select_idx]
imgs_lq = read_img_seq(img_paths_lq)
img_gt = read_img_seq([self.imgs_gt[folder][idx]], require_mod_crop=True, scale=self.opt['scale'])
img_gt.squeeze_(0)
return {'lq': imgs_lq, 'gt': img_gt, 'folder': folder, 'idx': self.data_info['idx'][index], 'border': border, 'lq_path': lq_path} |
class GenericRemote(RemoteControl):
def __init__(self, tvFactory: TVFactory):
super().__init__(tvFactory)
def nextChannel(self) -> None:
channel: int = self.getChannel()
self.setChannel((channel + 1))
def prevChannel(self) -> None:
channel: int = self.getChannel()
self.setChannel((channel - 1)) |
class Counter():
def __init__(self, transport: Transport, name: bytes, tags: Optional[Dict[(str, Any)]]=None):
self.transport = transport
self.name = name
self.tags = tags
def increment(self, delta: float=1.0, sample_rate: float=1.0) -> None:
self.send(delta, sample_rate)
def decrement(self, delta: float=1.0, sample_rate: float=1.0) -> None:
self.increment(delta=(- delta), sample_rate=sample_rate)
def send(self, delta: float, sample_rate: float) -> None:
serialized = self.name
formatted_tags = _format_tags(self.tags)
if formatted_tags:
serialized += formatted_tags
serialized += (f':{delta:g}'.encode() + b'|c')
if (sample_rate < 1.0):
sampling_info = f'{sample_rate:g}'.encode()
serialized = b'|'.join([serialized, sampling_info])
self.transport.send(serialized) |
def find_lib(elf: ELFFile, lib: str, ldpaths: list[str], root: str='/') -> tuple[((str | None), (str | None))]:
for ldpath in ldpaths:
path = os.path.join(ldpath, lib)
target = readlink(path, root, prefixed=True)
if os.path.exists(target):
with open(target, 'rb') as f:
libelf = ELFFile(f)
if compatible_elfs(elf, libelf):
return (target, path)
return (None, None) |
def postprocess_text(preds, responses, metric_name):
_preds = [pred.strip() for pred in preds]
_responses = [response.strip() for response in responses]
if (metric_name == 'rouge'):
_preds = ['\n'.join(nltk.sent_tokenize(pred)) for pred in _preds]
_responses = ['\n'.join(nltk.sent_tokenize(response)) for response in _responses]
elif (metric_name == 'sacrebleu'):
_responses = [[response] for response in _responses]
elif (metric_name == 'bleu'):
_preds = [pred.split(' ') for pred in _preds]
_responses = [[response.split(' ')] for response in _responses]
else:
pass
return (_preds, _responses) |
.supported(only_if=(lambda backend: backend.hash_supported(hashes.SHA512_224())), skip_message='Does not support SHA512/224')
class TestSHA512224():
test_sha512_224 = generate_hash_test(load_hash_vectors, os.path.join('hashes', 'SHA2'), ['SHA512_224LongMsg.rsp', 'SHA512_224ShortMsg.rsp'], hashes.SHA512_224()) |
.usefixtures('hook_fixture')
def test_hook_calls_subscriber_async_in_existing_loop():
async def t():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co(8))
hook.fire('group_window_add')
(await asyncio.sleep(0))
assert (val == 8)
asyncio.run(t()) |
class Plugins():
steps_by_id: Dict[(str, PluginStep)]
def __init__(self, steps: List[PluginStep]):
self.steps_by_id = dict()
for step in steps:
if (step.schema.id in self.steps_by_id):
raise Exception('Duplicate step ID: {}'.format(step.schema.id))
self.steps_by_id[step.schema.id] = step
def run(self, file: str, kubeconfig_path: str, kraken_config: str):
data = serialization.load_from_file(abspath(file))
if (not isinstance(data, list)):
raise Exception('Invalid scenario configuration file: {} expected list, found {}'.format(file, type(data).__name__))
i = 0
for entry in data:
if (not isinstance(entry, dict)):
raise Exception("Invalid scenario configuration file: {} expected a list of dict's, found {} on step {}".format(file, type(entry).__name__, i))
if ('id' not in entry):
raise Exception("Invalid scenario configuration file: {} missing 'id' field on step {}".format(file, i))
if ('config' not in entry):
raise Exception("Invalid scenario configuration file: {} missing 'config' field on step {}".format(file, i))
if (entry['id'] not in self.steps_by_id):
raise Exception('Invalid step {} in {} ID: {} expected one of: {}'.format(i, file, entry['id'], ', '.join(self.steps_by_id.keys())))
step = self.steps_by_id[entry['id']]
unserialized_input = step.schema.input.unserialize(entry['config'])
if ('kubeconfig_path' in step.schema.input.properties):
unserialized_input.kubeconfig_path = kubeconfig_path
if ('kraken_config' in step.schema.input.properties):
unserialized_input.kraken_config = kraken_config
(output_id, output_data) = step.schema(unserialized_input)
logging.info((step.render_output(output_id, output_data) + '\n'))
if (output_id in step.error_output_ids):
raise Exception('Step {} in {} ({}) failed'.format(i, file, step.schema.id))
i = (i + 1)
def json_schema(self):
result = {'$id': ' '$schema': ' 'title': 'Kraken Arcaflow scenarios', 'description': 'Serial execution of Arcaflow Python plugins. See for details.', 'type': 'array', 'minContains': 1, 'items': {'oneOf': []}}
for step_id in self.steps_by_id.keys():
step = self.steps_by_id[step_id]
step_input = jsonschema.step_input(step.schema)
del step_input['$id']
del step_input['$schema']
del step_input['title']
del step_input['description']
result['items']['oneOf'].append({'type': 'object', 'properties': {'id': {'type': 'string', 'const': step_id}, 'config': step_input}, 'required': ['id', 'config']})
return json.dumps(result, indent='\t') |
class Encoder(nn.Module):
def __init__(self, encoder, quant_conv, quantize):
super().__init__()
self.encoder = encoder
self.quant_conv = quant_conv
self.quantize = quantize
_grad()
def forward(self, x):
x = ((2 * x) - 1)
h = self.encoder(x)
h = self.quant_conv(h)
(quant, _, [_, _, indices]) = self.quantize(h)
return indices.view(x.shape[0], (- 1)) |
class YahooOAuth2(BaseOAuth2):
name = 'yahoo-oauth2'
ID_KEY = 'sub'
AUTHORIZATION_URL = '
ACCESS_TOKEN_URL = '
ACCESS_TOKEN_METHOD = 'POST'
EXTRA_DATA = [('sub', 'id'), ('access_token', 'access_token'), ('expires_in', 'expires'), ('refresh_token', 'refresh_token'), ('token_type', 'token_type')]
def get_user_names(self, first_name, last_name):
if (first_name or last_name):
return (' '.join((first_name, last_name)), first_name, last_name)
return (None, None, None)
def get_user_details(self, response):
(fullname, first_name, last_name) = self.get_user_names(first_name=response.get('given_name'), last_name=response.get('family_name'))
email = response.get('email')
return {'username': response.get('nickname'), 'email': email, 'fullname': fullname, 'first_name': first_name, 'last_name': last_name}
def user_data(self, access_token, *args, **kwargs):
url = '
return self.get_json(url, headers={'Authorization': f'Bearer {access_token}'}, method='GET')
_
def auth_complete(self, *args, **kwargs):
self.process_error(self.data)
response = self.request_access_token(self.ACCESS_TOKEN_URL, auth=HTTPBasicAuth(*self.get_key_and_secret()), data=self.auth_complete_params(self.validate_state()), headers=self.auth_headers(), method=self.ACCESS_TOKEN_METHOD)
self.process_error(response)
return self.do_auth(response['access_token'], *args, response=response, **kwargs)
def refresh_token_params(self, token, *args, **kwargs):
return {'refresh_token': token, 'grant_type': 'refresh_token', 'redirect_uri': 'oob'}
def refresh_token(self, token, *args, **kwargs):
params = self.refresh_token_params(token, *args, **kwargs)
url = (self.REFRESH_TOKEN_URL or self.ACCESS_TOKEN_URL)
method = self.REFRESH_TOKEN_METHOD
key = ('params' if (method == 'GET') else 'data')
request_args = {'headers': self.auth_headers(), 'method': method, key: params}
request = self.request(url, auth=HTTPBasicAuth(*self.get_key_and_secret()), **request_args)
return self.process_refresh_token_response(request, *args, **kwargs)
def auth_complete_params(self, state=None):
return {'grant_type': 'authorization_code', 'code': self.data.get('code', ''), 'redirect_uri': self.get_redirect_uri(state)} |
class ModuleType(Enum):
AVAILABLE = 1
RESERVED = 2
BLOCKED = 3
def to_char(module):
if (module == ModuleType.AVAILABLE):
return '1'
elif (module == ModuleType.RESERVED):
return '2'
elif (module == ModuleType.BLOCKED):
return '0'
def to_color(module):
if (module == ModuleType.AVAILABLE):
return 255
elif (module == ModuleType.RESERVED):
return 128
elif (module == ModuleType.BLOCKED):
return 128 |
def parse(code: str, module_name: str='', path: (str | None)=None, apply_transforms: bool=True) -> nodes.Module:
code = textwrap.dedent(code)
builder = AstroidBuilder(manager=AstroidManager(), apply_transforms=apply_transforms)
return builder.string_build(code, modname=module_name, path=path) |
class AdversarialAttacker():
def __init__(self):
self.phonetic_attacker = PhoneticAttacker(stats_folder=os.path.join(os.path.realpath(os.path.dirname(__file__)), 'phonetic_attacks/statistics'))
self.confusable_attacker = UnicodeConfusable()
self.methods = ['phonetic', 'full-swap', 'inner-swap', 'disemvowel', 'truncate', 'keyboard-typo', 'natural-typo', 'intrude', 'segmentation', 'confusable']
def do_one_attack(self, sentence, method, severity):
if (method not in self.methods):
raise ValueError('Invalid method')
elif (method == 'phonetic'):
return self.phonetic_attacker(sentence, severity)
elif (method == 'confusable'):
return self.confusable_attacker(sentence, severity)
elif (method == 'segmentation'):
return manip_segmentations(sentence, severity)
else:
return simple_perturb(sentence, method, severity)
def multiattack(self, sentence, attacks_with_severity):
for (i, (attack, severity)) in enumerate(attacks_with_severity):
if (attack == 'rand'):
attack = random.choice(self.methods)
while ((attack == 'intrude') and (i != (len(attacks_with_severity) - 1))):
attack = random.choice(self.methods)
sentence = self.do_one_attack(sentence, attack, severity)
return sentence |
def for_each_class():
for kobj in kset_for_each_object(gdb.parse_and_eval('class_kset')):
subsys = container_of(kobj, kset_type.get_type().pointer(), 'kobj')
subsys_priv = container_of(subsys, subsys_private_type.get_type().pointer(), 'subsys')
(yield subsys_priv['class']) |
def note_detection_with_onset_offset_regress(frame_output, onset_output, onset_shift_output, offset_output, offset_shift_output, velocity_output, frame_threshold):
output_tuples = []
bgn = None
frame_disappear = None
offset_occur = None
for i in range(onset_output.shape[0]):
if (onset_output[i] == 1):
if bgn:
fin = max((i - 1), 0)
output_tuples.append([bgn, fin, onset_shift_output[bgn], 0, velocity_output[bgn]])
(frame_disappear, offset_occur) = (None, None)
bgn = i
if (bgn and (i > bgn)):
if ((frame_output[i] <= frame_threshold) and (not frame_disappear)):
frame_disappear = i
if ((offset_output[i] == 1) and (not offset_occur)):
offset_occur = i
if frame_disappear:
if (offset_occur and ((offset_occur - bgn) > (frame_disappear - offset_occur))):
fin = offset_occur
else:
fin = frame_disappear
output_tuples.append([bgn, fin, onset_shift_output[bgn], offset_shift_output[fin], velocity_output[bgn]])
(bgn, frame_disappear, offset_occur) = (None, None, None)
if (bgn and (((i - bgn) >= 600) or (i == (onset_output.shape[0] - 1)))):
fin = i
output_tuples.append([bgn, fin, onset_shift_output[bgn], offset_shift_output[fin], velocity_output[bgn]])
(bgn, frame_disappear, offset_occur) = (None, None, None)
output_tuples.sort(key=(lambda pair: pair[0]))
return output_tuples |
_fixtures(WebFixture, PartyAccountFixture, InputScenarios)
def test_persisting_input(web_fixture, party_account_fixture, input_scenarios):
(Form)
class FormStub():
view = web_fixture.view
user_interface = EmptyStub(name='myui')
channel_name = 'myform'
fixture = input_scenarios
form = FormStub()
previously_entered = UserInput.get_previously_entered_for_form(form, 'aninput', fixture.entered_input_type)
assert (previously_entered is None)
UserInput.save_input_value_for_form(form, 'aninput', fixture.entered_input, fixture.entered_input_type)
previously_entered = UserInput.get_previously_entered_for_form(form, 'aninput', fixture.entered_input_type)
assert (previously_entered == fixture.entered_input)
UserInput.clear_for_form(form)
previously_entered = UserInput.get_previously_entered_for_form(form, 'aninput', fixture.entered_input_type)
assert (previously_entered is None)
UserInput.save_input_value_for_form(form, 'aninput', fixture.empty_entered_input, fixture.entered_input_type)
previously_entered = UserInput.get_previously_entered_for_form(form, 'aninput', fixture.entered_input_type)
assert (previously_entered == fixture.empty_entered_input) |
class HerokuTests(unittest.TestCase):
def setUp(self):
self.server = heroku.Host()
def test_port(self):
old_port = os.environ.get('PORT')
def reset_port():
if (old_port is None):
del os.environ['PORT']
else:
os.environ['PORT'] = old_port
port = random.randint(1, ((2 ** 16) - 1))
os.environ['PORT'] = str(port)
self.addCleanup(reset_port)
self.assertEqual(self.server.port(), port)
def test_contrib_auth_token(self):
auth_token = 'some_oauth_token'
os.environ['GH_AUTH_TOKEN'] = auth_token
self.assertEqual(self.server.contrib_auth_token(), auth_token)
def test_contrib_secret(self):
secret = 'secret'
os.environ['GH_SECRET'] = secret
self.assertEqual(self.server.contrib_secret(), secret)
def test_user_agent(self):
user_agent = 'Testing-Agent'
self.assertIsNone(self.server.user_agent())
os.environ['USER_AGENT'] = user_agent
self.assertEqual(self.server.user_agent(), user_agent)
def test_log_exception(self):
exc_type = NotImplementedError
exc_message = 'hello'
try:
raise exc_type(exc_message)
except Exception as caught:
exc = caught
stderr = io.StringIO()
with contextlib.redirect_stderr(stderr):
self.server.log_exception(exc)
logged = stderr.getvalue()
self.assertIn(exc_type.__name__, logged)
self.assertIn(exc_message, logged)
self.assertIn('Traceback', logged)
def test_log(self):
message = 'something happened'
stderr = io.StringIO()
with contextlib.redirect_stderr(stderr):
self.server.log(message)
self.assertEqual(stderr.getvalue(), (message + '\n'))
.dict(os.environ, {'CLA_TRUSTED_USERS': 'miss-islington,bedevere-bot,blurb-it[bot]'})
def test_trusted_users(self):
self.assertEqual(self.server.trusted_users(), frozenset(['miss-islington', 'bedevere-bot', 'blurb-it[bot]']))
.dict(os.environ, {'CLA_TRUSTED_USERS': ''})
def test_no_trusted_users(self):
self.assertEqual(self.server.trusted_users(), frozenset({''})) |
def run(client: Client, args: Namespace, config: Config):
wait_for_cluster(client, shutdown_on_failure=True)
assert (len(client.scheduler_info()['workers']) > 0)
setup_memory_pools(client, (args.type == 'gpu'), args.rmm_pool_size, args.disable_rmm_pool, args.enable_rmm_async, args.enable_rmm_managed, args.rmm_release_threshold, args.rmm_log_directory, args.enable_rmm_statistics, args.enable_rmm_track_allocations)
(address_to_index, results, message_data) = gather_bench_results(client, args, config)
p2p_bw = peer_to_peer_bandwidths(message_data, address_to_index)
config.pretty_print_results(args, address_to_index, p2p_bw, results)
if args.output_basename:
(df, p2p_bw) = config.create_tidy_results(args, p2p_bw, results)
df['num_workers'] = len(address_to_index)
save_benchmark_data(args.output_basename, address_to_index, df, p2p_bw) |
.parametrize('username,password,email', site_managers)
def test_is_site_manager_returns_true_for_site_managers(db, client, username, password, email):
client.login(username=username, password=password)
user = get_user_model().objects.get(username=username, email=email)
assert (is_site_manager(user) is True) |
def test_AddValueToZero_simple_weights_gt0():
dm = skcriteria.mkdm(matrix=[[1, 2, 3], [4, 5, 6]], objectives=[min, max, min], weights=[1, 2, 3])
expected = skcriteria.mkdm(matrix=[[1, 2, 3], [4, 5, 6]], objectives=[min, max, min], weights=[1, 2, 3])
scaler = AddValueToZero(value=0.5, target='weights')
result = scaler.transform(dm)
assert result.equals(expected) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.