code stringlengths 281 23.7M |
|---|
def fidelity_circuit(network: Union[(Network_DQNN, Network_QAOA)], state_pair: List[np.ndarray], draw_circ: bool=False) -> QuantumCircuit:
(circ, q_reg, c_reg) = init_quantum_circuit(((2 * network.num_qubits) + network.auxillary_qubits), ((2 * network.num_qubits) if (network.fid_meas_method == 'destructive_swap') else 1))
circ = add_input_state_initialization(circ, q_reg[network.auxillary_qubits:], network.num_qubits, state_pair)
circ = add_fidelity_measurement(circ, q_reg, c_reg, network.num_qubits, method=network.fid_meas_method)
if draw_circ:
sd.draw_circuit(circ, filename='fid_circuit')
return circ |
def cuttree(node, nettree, levl):
for i in node.snode:
i.pnode.remove(node)
for i in node.pnode:
i.snode.remove(node)
if (node.snode != []):
cuttree(node.snode[0], nettree, (levl + 1))
print(((str(levl) + ':') + str(nettree[levl][0].position)))
nettree[levl].remove(node) |
def verify_logs(directory, filename, mtype, meid):
path = '/'
file_path = ((directory + path) + filename)
f = open(file_path, 'r')
for l in f:
if (meid is not None):
if ((l.find(mtype) > 0) and (l.find(meid) > 0)):
return True
elif (l.find(mtype) > 0):
return True
return False |
def register_to_config(init):
(init)
def inner_init(self, *args, **kwargs):
init_kwargs = {k: v for (k, v) in kwargs.items() if (not k.startswith('_'))}
init(self, *args, **init_kwargs)
if (not isinstance(self, ConfigMixin)):
raise RuntimeError(f'`_for_config` was applied to {self.__class__.__name__} init method, but this class does not inherit from `ConfigMixin`.')
ignore = getattr(self, 'ignore_for_config', [])
new_kwargs = {}
signature = inspect.signature(init)
parameters = {name: p.default for (i, (name, p)) in enumerate(signature.parameters.items()) if ((i > 0) and (name not in ignore))}
for (arg, name) in zip(args, parameters.keys()):
new_kwargs[name] = arg
new_kwargs.update({k: init_kwargs.get(k, default) for (k, default) in parameters.items() if ((k not in ignore) and (k not in new_kwargs))})
getattr(self, 'register_to_config')(**new_kwargs)
return inner_init |
class ObjectEntry(Entry):
location: str
serializer: str
obj_type: str
replicated: bool
def __init__(self, location: str, serializer: str, obj_type: str, replicated: bool) -> None:
super().__init__(type='object')
self.location = location
self.serializer = serializer
self.obj_type = obj_type
self.replicated = replicated |
def dataloader_didemo_test(args, tokenizer, subset='test'):
didemo_testset = DiDeMo_DataLoader(subset=subset, data_path=args.data_path, features_path=args.features_path, max_words=args.max_words, feature_framerate=args.feature_framerate, tokenizer=tokenizer, max_frames=args.max_frames, frame_order=args.eval_frame_order, slice_framepos=args.slice_framepos)
dataloader_didemo = DataLoader(didemo_testset, batch_size=args.batch_size_val, num_workers=args.num_thread_reader, shuffle=False, drop_last=False)
return (dataloader_didemo, len(didemo_testset)) |
def _add_install(subparsers: argparse._SubParsersAction, shared_parser: argparse.ArgumentParser) -> None:
p = subparsers.add_parser('install', help='Install a package', formatter_class=LineWrapRawTextHelpFormatter, description=INSTALL_DESCRIPTION, parents=[shared_parser])
p.add_argument('package_spec', help='package name(s) or pip installation spec(s)', nargs='*')
add_include_dependencies(p)
p.add_argument('--force', '-f', action='store_true', help='Modify existing virtual environment and files in PIPX_BIN_DIR and PIPX_MAN_DIR')
p.add_argument('--suffix', default='', help='Optional suffix for virtual environment and executable names. NOTE: The suffix feature is experimental and subject to change.')
p.add_argument('--python', help=f'Python to install with. Possible values can be the executable name (python3.11), the version to pass to py launcher (3.11), or the full path to the executable.Requires Python {MINIMUM_PYTHON_VERSION} or above.')
p.add_argument('--preinstall', action='append', help='Optional packages to be installed into the Virtual Environment before installing the main package.')
add_pip_venv_args(p) |
class Seq2seqTrainerTester(TestCasePlus):
_torch
def test_finetune_bert2bert(self):
bert2bert = EncoderDecoderModel.from_encoder_decoder_pretrained('prajjwal1/bert-tiny', 'prajjwal1/bert-tiny')
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
bert2bert.config.vocab_size = bert2bert.config.encoder.vocab_size
bert2bert.config.eos_token_id = tokenizer.sep_token_id
bert2bert.config.decoder_start_token_id = tokenizer.cls_token_id
bert2bert.config.max_length = 128
train_dataset = datasets.load_dataset('cnn_dailymail', '3.0.0', split='train[:1%]')
val_dataset = datasets.load_dataset('cnn_dailymail', '3.0.0', split='validation[:1%]')
train_dataset = train_dataset.select(range(32))
val_dataset = val_dataset.select(range(16))
batch_size = 4
def _map_to_encoder_decoder_inputs(batch):
inputs = tokenizer(batch['article'], padding='max_length', truncation=True, max_length=512)
outputs = tokenizer(batch['highlights'], padding='max_length', truncation=True, max_length=128)
batch['input_ids'] = inputs.input_ids
batch['attention_mask'] = inputs.attention_mask
batch['decoder_input_ids'] = outputs.input_ids
batch['labels'] = outputs.input_ids.copy()
batch['labels'] = [[((- 100) if (token == tokenizer.pad_token_id) else token) for token in labels] for labels in batch['labels']]
batch['decoder_attention_mask'] = outputs.attention_mask
assert all([(len(x) == 512) for x in inputs.input_ids])
assert all([(len(x) == 128) for x in outputs.input_ids])
return batch
def _compute_metrics(pred):
labels_ids = pred.label_ids
pred_ids = pred.predictions
pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True)
label_str = tokenizer.batch_decode(labels_ids, skip_special_tokens=True)
accuracy = (sum([int((pred_str[i] == label_str[i])) for i in range(len(pred_str))]) / len(pred_str))
return {'accuracy': accuracy}
train_dataset = train_dataset.map(_map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=['article', 'highlights'])
train_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'])
val_dataset = val_dataset.map(_map_to_encoder_decoder_inputs, batched=True, batch_size=batch_size, remove_columns=['article', 'highlights'])
val_dataset.set_format(type='torch', columns=['input_ids', 'attention_mask', 'decoder_input_ids', 'decoder_attention_mask', 'labels'])
output_dir = self.get_auto_remove_tmp_dir()
training_args = Seq2SeqTrainingArguments(output_dir=output_dir, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, predict_with_generate=True, evaluation_strategy='steps', do_train=True, do_eval=True, warmup_steps=0, eval_steps=2, logging_steps=2)
trainer = Seq2SeqTrainer(model=bert2bert, args=training_args, compute_metrics=_compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=tokenizer)
trainer.train() |
def console_entry() -> None:
try:
main()
sys.stdout.flush()
sys.stderr.flush()
except BrokenPipeError:
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(2)
except KeyboardInterrupt:
(_, options) = process_options(args=sys.argv[1:])
if options.show_traceback:
sys.stdout.write(traceback.format_exc())
formatter = FancyFormatter(sys.stdout, sys.stderr, False)
msg = 'Interrupted\n'
sys.stdout.write(formatter.style(msg, color='red', bold=True))
sys.stdout.flush()
sys.stderr.flush()
sys.exit(2) |
def same_domain(url1: QUrl, url2: QUrl) -> bool:
ensure_valid(url1)
ensure_valid(url2)
if (url1.scheme() != url2.scheme()):
return False
if (url1.port() != url2.port()):
return False
assert machinery.IS_QT5, machinery.INFO
suffix1 = url1.topLevelDomain()
suffix2 = url2.topLevelDomain()
if (not suffix1):
return (url1.host() == url2.host())
if (suffix1 != suffix2):
return False
domain1 = url1.host()[:(- len(suffix1))].split('.')[(- 1)]
domain2 = url2.host()[:(- len(suffix2))].split('.')[(- 1)]
return (domain1 == domain2) |
def train_callback(batch_idx, outer_loss, list_out, stats):
list_acc = [x[0] for x in list_out]
list_ce = [x[1] for x in list_out]
acc = np.mean(list_acc)
ce = np.mean(list_ce)
stats.step(np.array([(acc * len(list_acc)), (ce * len(list_acc))]), n_step=len(list_acc))
msg = ('train batch acc: %.2f, ce: %.4f' % ((acc * 100), ce))
return msg |
def _train(params: Dict, dtrain: RayDMatrix, *args, evals=(), ray_params: RayParams, cpus_per_actor: int, gpus_per_actor: int, _training_state: _TrainingState, **kwargs) -> Tuple[(xgb.Booster, Dict, Dict)]:
from xgboost_ray.elastic import _get_actor_alive_status, _maybe_schedule_new_actors, _update_scheduled_actor_states
params = params.copy()
_training_state.restart_training_at = None
if (('nthread' in params) or ('n_jobs' in params)):
if ((('nthread' in params) and (params['nthread'] > cpus_per_actor)) or (('n_jobs' in params) and (params['n_jobs'] > cpus_per_actor))):
raise ValueError('Specified number of threads greater than number of CPUs. \nFIX THIS by passing a lower value for the `nthread` parameter or a higher number for `cpus_per_actor`.')
else:
params['nthread'] = cpus_per_actor
params['n_jobs'] = cpus_per_actor
if ray_params.verbose:
maybe_log = logger.info
params.setdefault('verbosity', 1)
else:
maybe_log = logger.debug
params.setdefault('verbosity', 0)
def handle_actor_failure(actor_id):
rank = _training_state.actors.index(actor_id)
_training_state.failed_actor_ranks.add(rank)
_training_state.actors[rank] = None
newly_created = 0
for i in list(_training_state.failed_actor_ranks):
if (_training_state.actors[i] is not None):
raise RuntimeError(f'Trying to create actor with rank {i}, but it already exists.')
actor = _create_actor(rank=i, num_actors=ray_params.num_actors, num_cpus_per_actor=cpus_per_actor, num_gpus_per_actor=gpus_per_actor, resources_per_actor=ray_params.resources_per_actor, placement_group=_training_state.placement_group, queue=_training_state.queue, checkpoint_frequency=ray_params.checkpoint_frequency, distributed_callbacks=ray_params.distributed_callbacks)
_training_state.actors[i] = actor
_training_state.failed_actor_ranks.remove(i)
newly_created += 1
alive_actors = sum((1 for a in _training_state.actors if (a is not None)))
maybe_log(f'[RayXGBoost] Created {newly_created} new actors ({alive_actors} total actors). Waiting until actors are ready for training.')
dtrain.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
dtrain.assign_shards_to_actors(_training_state.actors)
for (deval, _) in evals:
deval.assert_enough_shards_for_actors(num_actors=ray_params.num_actors)
deval.assign_shards_to_actors(_training_state.actors)
load_data = ([dtrain] + [eval[0] for eval in evals])
prepare_actor_tasks = [_PrepareActorTask(actor, queue=_training_state.queue, stop_event=_training_state.stop_event, load_data=load_data) for actor in _training_state.actors if (actor is not None)]
start_wait = time.time()
last_status = start_wait
try:
ready_states = [task.is_ready() for task in prepare_actor_tasks]
while (not all(ready_states)):
if (time.time() >= (last_status + ENV.STATUS_FREQUENCY_S)):
wait_time = (time.time() - start_wait)
logger.info(f'Waiting until actors are ready ({wait_time:.0f} seconds passed).')
last_status = time.time()
time.sleep(0.1)
ready_states = [task.is_ready() for task in prepare_actor_tasks]
except Exception as exc:
_training_state.stop_event.set()
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
raise RayActorError from exc
maybe_log('[RayXGBoost] Starting XGBoost training.')
(rabit_process, rabit_args) = _start_rabit_tracker(alive_actors)
if _training_state.checkpoint.value:
kwargs['xgb_model'] = pickle.loads(_training_state.checkpoint.value)
if (_training_state.checkpoint.iteration == (- 1)):
logger.error('Trying to load continue from checkpoint, but the checkpointindicates training already finished. Returning lastcheckpointed model instead.')
return (kwargs['xgb_model'], {}, _training_state.additional_results)
callback_returns = _training_state.additional_results.get('callback_returns')
if (callback_returns is None):
callback_returns = [list() for _ in range(len(_training_state.actors))]
_training_state.additional_results['callback_returns'] = callback_returns
_training_state.training_started_at = time.time()
live_actors = [actor for actor in _training_state.actors if (actor is not None)]
training_futures = [actor.train.remote(rabit_args, (i == 0), params, dtrain, evals, *args, **kwargs) for (i, actor) in enumerate(live_actors)]
start_wait = time.time()
last_status = start_wait
has_queue_been_handled = False
try:
not_ready = training_futures
while not_ready:
if _training_state.queue:
has_queue_been_handled = True
_handle_queue(queue=_training_state.queue, checkpoint=_training_state.checkpoint, callback_returns=callback_returns)
if (ray_params.elastic_training and (not ENV.ELASTIC_RESTART_DISABLED)):
_maybe_schedule_new_actors(training_state=_training_state, num_cpus_per_actor=cpus_per_actor, num_gpus_per_actor=gpus_per_actor, resources_per_actor=ray_params.resources_per_actor, ray_params=ray_params, load_data=load_data)
_update_scheduled_actor_states(_training_state)
if (time.time() >= (last_status + ENV.STATUS_FREQUENCY_S)):
wait_time = (time.time() - start_wait)
logger.info(f'Training in progress ({wait_time:.0f} seconds since last restart).')
last_status = time.time()
(ready, not_ready) = ray.wait(not_ready, num_returns=len(not_ready), timeout=1)
ray.get(ready)
if (not has_queue_been_handled):
time.sleep(1)
if _training_state.queue:
_handle_queue(queue=_training_state.queue, checkpoint=_training_state.checkpoint, callback_returns=callback_returns)
except Exception as exc:
logger.debug(f'Caught exception in training loop: {exc}')
_training_state.stop_event.set()
_get_actor_alive_status(_training_state.actors, handle_actor_failure)
_stop_rabit_tracker(rabit_process)
raise RayActorError from exc
_stop_rabit_tracker(rabit_process)
all_results: List[Dict[(str, Any)]] = ray.get(training_futures)
bst = all_results[0]['bst']
evals_result = all_results[0]['evals_result']
if callback_returns:
_training_state.additional_results['callback_returns'] = callback_returns
total_n = sum(((res['train_n'] or 0) for res in all_results))
_training_state.additional_results['total_n'] = total_n
return (bst, evals_result, _training_state.additional_results) |
def _as_scalar(res, dtype=None):
if (dtype is None):
dtype = config.floatX
if all(((s == 1) for s in res.type.shape)):
while (res.owner and isinstance(res.owner.op, DimShuffle)):
res = res.owner.inputs[0]
if (res.type.ndim > 0):
rval = res.dimshuffle()
else:
rval = res
if (rval.type.dtype in integer_dtypes):
if (pytensor.scalar.upcast(res.dtype, dtype) == dtype):
return ptb.cast(rval, dtype)
else:
return None
return rval |
class BFSCluster(Function):
def forward(ctx, semantic_label, ball_query_idxs, start_len, threshold):
N = start_len.size(0)
assert semantic_label.is_contiguous()
assert ball_query_idxs.is_contiguous()
assert start_len.is_contiguous()
cluster_idxs = semantic_label.new()
cluster_offsets = semantic_label.new()
PG_OP.bfs_cluster(semantic_label, ball_query_idxs, start_len, cluster_idxs, cluster_offsets, N, threshold)
return (cluster_idxs, cluster_offsets)
def backward(ctx, a=None):
return None |
class TestDiskUsageCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DiskUsageCollector', {'interval': 10, 'sector_size': '512', 'byte_unit': 'kilobyte'})
self.collector = DiskUsageCollector(config, None)
def test_config(self):
self.assertFalse(self.collector.config['send_zero'])
def test_import(self):
self.assertTrue(DiskUsageCollector)
('os.access', Mock(return_value=True))
def test_get_disk_statistics(self):
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('diskstats')))
open_mock = patch_open.start()
result = self.collector.get_disk_statistics()
patch_open.stop()
open_mock.assert_called_once_with('/proc/diskstats')
self.assertEqual(sorted(result.keys()), [(8, 0), (8, 1), (8, 16), (8, 17), (8, 32), (8, 33), (8, 48), (8, 49), (9, 0), (259, 0), (259, 1), (259, 2)])
return result
('os.access', Mock(return_value=True))
(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_1')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_2')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_should_work_with_real_data.pkl')
self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
('os.access', Mock(return_value=True))
(Collector, 'publish')
def test_verify_supporting_vda_and_xvdb(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_1_vda_xvdb')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_2_vda_xvdb')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_verify_supporting_vda_and_xvdb.pkl')
self.assertPublishedMany(publish_mock, metrics)
('os.access', Mock(return_value=True))
(Collector, 'publish')
def test_verify_supporting_nvme(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_1_nvme')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_2_nvme')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_verify_supporting_nvme.pkl')
self.assertPublishedMany(publish_mock, metrics)
('os.access', Mock(return_value=True))
(Collector, 'publish')
def test_verify_supporting_md_dm(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_1_md_dm')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_2_md_dm')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_verify_supporting_md_dm.pkl')
self.assertPublishedMany(publish_mock, metrics)
('os.access', Mock(return_value=True))
(Collector, 'publish')
def test_verify_supporting_disk(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_1_disk')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_2_disk')))
patch_time = patch('time.time', Mock(return_value=20))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_verify_supporting_disk.pkl')
self.assertPublishedMany(publish_mock, metrics)
('os.access', Mock(return_value=True))
(Collector, 'publish')
def test_service_Time(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_1_service_time')))
patch_time = patch('time.time', Mock(return_value=10))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
self.assertPublishedMany(publish_mock, {})
patch_open = patch('__builtin__.open', Mock(return_value=self.getFixture('proc_diskstats_2_service_time')))
patch_time = patch('time.time', Mock(return_value=70))
patch_open.start()
patch_time.start()
self.collector.collect()
patch_open.stop()
patch_time.stop()
metrics = self.getPickledResults('test_service_Time.pkl')
self.assertPublishedMany(publish_mock, metrics) |
def test_get_program_id_3():
circuit = cirq.Circuit(cirq.H(cirq.LineQubit(0)))
circuit.program_id = 'my_fancy_var_1/my_fancy_var_2/my_fancy_var_3/my_fancy_var_4/my_fancy_var_5/my_fancy_var_6'
assert (len(circuit.program_id) > 64)
prog_id = _get_program_id(circuit)
assert isinstance(prog_id, str)
with pytest.raises(ValueError):
val = uuid.UUID(prog_id, version=4)
assert prog_id.startswith('my_far_1_my_far_2_my_far_3_my_far_4_my_far_5_my_far_6_') |
class AbstractImageSequence():
def get_texture_sequence(self):
raise NotImplementedError('abstract')
def get_animation(self, period, loop=True):
return Animation.from_image_sequence(self, period, loop)
def __getitem__(self, slice):
raise NotImplementedError('abstract')
def __setitem__(self, slice, image):
raise NotImplementedError('abstract')
def __len__(self):
raise NotImplementedError('abstract')
def __iter__(self):
raise NotImplementedError('abstract') |
class GettextLexer(RegexLexer):
name = 'Gettext Catalog'
aliases = ['pot', 'po']
filenames = ['*.pot', '*.po']
mimetypes = ['application/x-gettext', 'text/x-gettext', 'text/gettext']
url = '
version_added = '0.9'
tokens = {'root': [('^#,\\s.*?$', Keyword.Type), ('^#:\\s.*?$', Keyword.Declaration), ('^(#|#\\.\\s|#\\|\\s|#~\\s|#\\s).*$', Comment.Single), ('^(")([A-Za-z-]+:)(.*")$', bygroups(String, Name.Property, String)), ('^".*"$', String), ('^(msgid|msgid_plural|msgstr|msgctxt)(\\s+)(".*")$', bygroups(Name.Variable, Text, String)), ('^(msgstr\\[)(\\d)(\\])(\\s+)(".*")$', bygroups(Name.Variable, Number.Integer, Name.Variable, Text, String))]} |
def main():
filenames = ParseArguments(sys.argv[1:])
remove_filenames = ['include/caffe/3rdparty/hungarian.h', 'src/caffe/3rdparty/hungarian.cpp']
for remove_filename in remove_filenames:
if (remove_filename in filenames):
filenames.remove(remove_filename)
sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'), codecs.getwriter('utf8'), 'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit((_cpplint_state.error_count > 0)) |
class AttrVI_ATTR_MEM_BASE(RangeAttribute):
resources = [(constants.InterfaceType.vxi, 'INSTR'), (constants.InterfaceType.vxi, 'SERVANT')]
py_name = ''
visa_name = 'VI_ATTR_MEM_BASE'
visa_type = ('ViBusAddress64' if constants.is_64bits else 'ViUInt32')
default = NotAvailable
(read, write, local) = (True, False, False)
(min_value, max_value, values) = (0, ( if constants.is_64bits else ), None) |
def _cast_to_dtype(data, dtype):
dt = _format_dtype(dtype)
try:
data = np.array(data, dtype=dt)
except ValueError as err:
try:
print('Cant cast data to specific dtype. Trying row by row:')
for r in range(len(data)):
try:
np.array(data[r], dtype=dt)
except ValueError:
break
print(f'Error may be on row {r}:')
print(('-' * 30))
for i in range(len(data[r])):
print(i, dtype[i], '\t\t', np.array(data[r][i], dtype=dt[i]))
print(('-' * 30))
print('>>> Next param:', dtype[i], '. Value:', data[r][i], '\n')
finally:
raise ValueError('Cant cast data to specific dtype. Tried column by column. See results above') from err
return data |
def test__getting_started__pendulum():
from bioptim.examples.getting_started import pendulum as ocp_module
bioptim_folder = os.path.dirname(ocp_module.__file__)
ocp_module.prepare_ocp(biorbd_model_path=(bioptim_folder + '/models/pendulum.bioMod'), final_time=3, n_shooting=100, phase_dynamics=PhaseDynamics.SHARED_DURING_THE_PHASE, expand_dynamics=False) |
def _execute_compaction_round(source_partition_locator: PartitionLocator, destination_partition_locator: PartitionLocator, primary_keys: Set[str], compaction_artifact_s3_bucket: str, last_stream_position_to_compact: int, hash_bucket_count: Optional[int], sort_keys: List[SortKey], records_per_compacted_file: int, input_deltas_stats: Dict[(int, DeltaStats)], min_hash_bucket_chunk_size: int, compacted_file_content_type: ContentType, pg_config: Optional[PlacementGroupConfig], schema_on_read: Optional[pa.schema], rebase_source_partition_locator: Optional[PartitionLocator], rebase_source_partition_high_watermark: Optional[int], enable_profiler: Optional[bool], metrics_config: Optional[MetricsConfig], list_deltas_kwargs: Optional[Dict[(str, Any)]], read_kwargs_provider: Optional[ReadKwargsProvider], s3_table_writer_kwargs: Optional[Dict[(str, Any)]], object_store: Optional[IObjectStore], s3_client_kwargs: Optional[Dict[(str, Any)]], deltacat_storage=unimplemented_deltacat_storage, deltacat_storage_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> Tuple[(Optional[Partition], Optional[RoundCompletionInfo], Optional[str])]:
if (deltacat_storage_kwargs is None):
deltacat_storage_kwargs = {}
rcf_source_partition_locator = (rebase_source_partition_locator if rebase_source_partition_locator else source_partition_locator)
base_audit_url = rcf_source_partition_locator.path(f's3://{compaction_artifact_s3_bucket}/compaction-audit')
audit_url = f'{base_audit_url}.json'
logger.info(f'Compaction audit will be written to {audit_url}')
compaction_audit = CompactionSessionAuditInfo(deltacat.__version__, ray.__version__, audit_url).set_compactor_version(CompactorVersion.V1.value)
compaction_start = time.monotonic()
if (not primary_keys):
raise NotImplementedError('Compaction only supports tables with 1 or more primary keys')
if (sort_keys is None):
sort_keys = []
bit_width_of_sort_keys = check_preconditions(source_partition_locator, destination_partition_locator, sort_keys, records_per_compacted_file, hash_bucket_count, deltacat_storage, deltacat_storage_kwargs, **kwargs)
primary_keys = sorted(primary_keys)
node_resource_keys = None
if pg_config:
cluster_resources = pg_config.resource
cluster_cpus = cluster_resources['CPU']
else:
cluster_resources = ray.cluster_resources()
logger.info(f'Total cluster resources: {cluster_resources}')
logger.info(f'Available cluster resources: {ray.available_resources()}')
cluster_cpus = int(cluster_resources['CPU'])
logger.info(f'Total cluster CPUs: {cluster_cpus}')
node_resource_keys = live_node_resource_keys()
logger.info(f'Found {len(node_resource_keys)} live cluster nodes: {node_resource_keys}')
logger.info(f'Setting round robin scheduling with node id:{node_resource_keys}')
round_robin_opt_provider = functools.partial(round_robin_options_provider, resource_keys=node_resource_keys, pg_config=(pg_config.opts if pg_config else None))
max_parallelism = int(cluster_cpus)
logger.info(f'Max parallelism: {max_parallelism}')
round_completion_info = None
if (not rebase_source_partition_locator):
round_completion_info = rcf.read_round_completion_file(compaction_artifact_s3_bucket, source_partition_locator, **s3_client_kwargs)
if (not round_completion_info):
logger.info(f'Both rebase partition and round completion file not found. Performing an entire backfill on source.')
logger.info(f'Round completion file: {round_completion_info}')
enable_manifest_entry_copy_by_reference = (False if rebase_source_partition_locator else True)
logger.info(f'Enable manifest entry copy by reference is set to: {enable_manifest_entry_copy_by_reference}')
high_watermark = (round_completion_info.high_watermark if round_completion_info else None)
delta_discovery_start = time.monotonic()
(input_deltas, previous_last_stream_position_compacted_on_destination_table) = io.discover_deltas(source_partition_locator, high_watermark, last_stream_position_to_compact, destination_partition_locator, rebase_source_partition_locator, rebase_source_partition_high_watermark, deltacat_storage, deltacat_storage_kwargs, list_deltas_kwargs)
delta_discovery_end = time.monotonic()
compaction_audit.set_delta_discovery_time_in_seconds((delta_discovery_end - delta_discovery_start))
s3_utils.upload(compaction_audit.audit_url, str(json.dumps(compaction_audit)), **s3_client_kwargs)
if (not input_deltas):
logger.info('No input deltas found to compact.')
return (None, None, None)
(uniform_deltas, hash_bucket_count, last_stream_position_compacted, require_multiple_rounds) = (io.fit_input_deltas(input_deltas, cluster_resources, compaction_audit, hash_bucket_count, deltacat_storage=deltacat_storage, deltacat_storage_kwargs=deltacat_storage_kwargs, **kwargs) if (input_deltas_stats is None) else io.limit_input_deltas(input_deltas, cluster_resources, hash_bucket_count, min_hash_bucket_chunk_size, compaction_audit=compaction_audit, input_deltas_stats=input_deltas_stats, deltacat_storage=deltacat_storage, deltacat_storage_kwargs=deltacat_storage_kwargs, **kwargs))
compaction_audit.set_uniform_deltas_created(len(uniform_deltas))
assert ((hash_bucket_count is not None) and (hash_bucket_count > 0)), f'Expected hash bucket count to be a positive integer, but found `{hash_bucket_count}`'
if ((not round_completion_info) and rebase_source_partition_locator):
dest_delta_locator = DeltaLocator.of(partition_locator=rebase_source_partition_locator, stream_position=None)
round_completion_info = RoundCompletionInfo.of(None, dest_delta_locator, None, 0, None)
if require_multiple_rounds:
logger.info(f'Compaction can not be completed in one round. Either increase cluster size or decrease input')
raise AssertionError('Multiple rounds are not supported. Please increase the cluster size and run again.')
hb_start = time.monotonic()
hb_tasks_pending = invoke_parallel(items=uniform_deltas, ray_task=hb.hash_bucket, max_parallelism=max_parallelism, options_provider=round_robin_opt_provider, round_completion_info=round_completion_info, primary_keys=primary_keys, sort_keys=sort_keys, num_buckets=hash_bucket_count, num_groups=max_parallelism, enable_profiler=enable_profiler, metrics_config=metrics_config, read_kwargs_provider=read_kwargs_provider, object_store=object_store, deltacat_storage=deltacat_storage, deltacat_storage_kwargs=deltacat_storage_kwargs, **kwargs)
hb_invoke_end = time.monotonic()
logger.info(f'Getting {len(hb_tasks_pending)} hash bucket results...')
hb_results: List[HashBucketResult] = ray.get(hb_tasks_pending)
logger.info(f'Got {len(hb_results)} hash bucket results.')
hb_end = time.monotonic()
hb_results_retrieved_at = time.time()
telemetry_time_hb = compaction_audit.save_step_stats(CompactionSessionAuditInfo.HASH_BUCKET_STEP_NAME, hb_results, hb_results_retrieved_at, (hb_invoke_end - hb_start), (hb_end - hb_start))
s3_utils.upload(compaction_audit.audit_url, str(json.dumps(compaction_audit)), **s3_client_kwargs)
all_hash_group_idx_to_obj_id = defaultdict(list)
for hb_result in hb_results:
for (hash_group_index, object_id) in enumerate(hb_result.hash_bucket_group_to_obj_id):
if object_id:
all_hash_group_idx_to_obj_id[hash_group_index].append(object_id)
hash_group_count = len(all_hash_group_idx_to_obj_id)
logger.info(f'Hash bucket groups created: {hash_group_count}')
total_hb_record_count = sum([hb_result.hb_record_count for hb_result in hb_results])
logger.info(f'Got {total_hb_record_count} hash bucket records from hash bucketing step...')
compaction_audit.set_input_records(total_hb_record_count.item())
compacted_stream_locator = destination_partition_locator.stream_locator
stream = deltacat_storage.get_stream(compacted_stream_locator.namespace, compacted_stream_locator.table_name, compacted_stream_locator.table_version, **deltacat_storage_kwargs)
partition = deltacat_storage.stage_partition(stream, destination_partition_locator.partition_values, **deltacat_storage_kwargs)
new_compacted_partition_locator = partition.locator
num_materialize_buckets = max_parallelism
logger.info(f'Materialize Bucket Count: {num_materialize_buckets}')
dedupe_start = time.monotonic()
dd_max_parallelism = int((max_parallelism * kwargs.get('dd_max_parallelism_ratio', DEFAULT_DEDUPE_MAX_PARALLELISM_RATIO_ARG)))
logger.info(f'dd max_parallelism is set to {dd_max_parallelism}, max_parallelism is {max_parallelism}')
dd_tasks_pending = invoke_parallel(items=all_hash_group_idx_to_obj_id.values(), ray_task=dd.dedupe, max_parallelism=dd_max_parallelism, options_provider=round_robin_opt_provider, kwargs_provider=(lambda index, item: {'dedupe_task_index': index, 'object_ids': item}), sort_keys=sort_keys, num_materialize_buckets=num_materialize_buckets, enable_profiler=enable_profiler, metrics_config=metrics_config, object_store=object_store)
dedupe_invoke_end = time.monotonic()
logger.info(f'Getting {len(dd_tasks_pending)} dedupe results...')
dd_results: List[DedupeResult] = ray.get(dd_tasks_pending)
logger.info(f'Got {len(dd_results)} dedupe results.')
dedupe_results_retrieved_at = time.time()
dedupe_end = time.monotonic()
total_dd_record_count = sum([ddr.deduped_record_count for ddr in dd_results])
logger.info(f'Deduped {total_dd_record_count} records...')
telemetry_time_dd = compaction_audit.save_step_stats(CompactionSessionAuditInfo.DEDUPE_STEP_NAME, dd_results, dedupe_results_retrieved_at, (dedupe_invoke_end - dedupe_start), (dedupe_end - dedupe_start))
compaction_audit.set_records_deduped(total_dd_record_count.item())
all_mat_buckets_to_obj_id = defaultdict(list)
for dd_result in dd_results:
for (bucket_idx, dd_task_index_and_object_id_tuple) in dd_result.mat_bucket_idx_to_obj_id.items():
all_mat_buckets_to_obj_id[bucket_idx].append(dd_task_index_and_object_id_tuple)
logger.info(f'Getting {len(dd_tasks_pending)} dedupe result stat(s)...')
logger.info(f'Materialize buckets created: {len(all_mat_buckets_to_obj_id)}')
compaction_audit.set_materialize_buckets(len(all_mat_buckets_to_obj_id))
s3_utils.upload(compaction_audit.audit_url, str(json.dumps(compaction_audit)), **s3_client_kwargs)
materialize_start = time.monotonic()
mat_tasks_pending = invoke_parallel(items=all_mat_buckets_to_obj_id.items(), ray_task=mat.materialize, max_parallelism=max_parallelism, options_provider=round_robin_opt_provider, kwargs_provider=(lambda index, mat_bucket_index_to_obj_id: {'mat_bucket_index': mat_bucket_index_to_obj_id[0], 'dedupe_task_idx_and_obj_id_tuples': mat_bucket_index_to_obj_id[1]}), schema=schema_on_read, round_completion_info=round_completion_info, source_partition_locator=source_partition_locator, partition=partition, enable_manifest_entry_copy_by_reference=enable_manifest_entry_copy_by_reference, max_records_per_output_file=records_per_compacted_file, compacted_file_content_type=compacted_file_content_type, enable_profiler=enable_profiler, metrics_config=metrics_config, read_kwargs_provider=read_kwargs_provider, s3_table_writer_kwargs=s3_table_writer_kwargs, object_store=object_store, deltacat_storage=deltacat_storage, deltacat_storage_kwargs=deltacat_storage_kwargs)
materialize_invoke_end = time.monotonic()
logger.info(f'Getting {len(mat_tasks_pending)} materialize result(s)...')
mat_results: List[MaterializeResult] = ray.get(mat_tasks_pending)
logger.info(f'Got {len(mat_results)} materialize result(s).')
materialize_end = time.monotonic()
materialize_results_retrieved_at = time.time()
telemetry_time_materialize = compaction_audit.save_step_stats(CompactionSessionAuditInfo.MATERIALIZE_STEP_NAME, mat_results, materialize_results_retrieved_at, (materialize_invoke_end - materialize_start), (materialize_end - materialize_start))
mat_results = sorted(mat_results, key=(lambda m: m.task_index))
deltas = [m.delta for m in mat_results]
merged_delta = Delta.merge_deltas(deltas, stream_position=last_stream_position_to_compact)
record_info_msg = f'Hash bucket records: {total_hb_record_count}, Deduped records: {total_dd_record_count}, Materialized records: {merged_delta.meta.record_count}'
logger.info(record_info_msg)
assert ((total_hb_record_count - total_dd_record_count) == merged_delta.meta.record_count), f'''Number of hash bucket records minus the number of deduped records does not match number of materialized records.
{record_info_msg}'''
compacted_delta = deltacat_storage.commit_delta(merged_delta, properties=kwargs.get('properties', DEFAULT_PROPERTIES_ARG), **deltacat_storage_kwargs)
logger.info(f'Committed compacted delta: {compacted_delta}')
compaction_end = time.monotonic()
compaction_audit.set_compaction_time_in_seconds((compaction_end - compaction_start))
new_compacted_delta_locator = DeltaLocator.of(new_compacted_partition_locator, compacted_delta.stream_position)
last_rebase_source_partition_locator = (rebase_source_partition_locator or (round_completion_info.rebase_source_partition_locator if round_completion_info else None))
pyarrow_write_result = PyArrowWriteResult.union([m.pyarrow_write_result for m in mat_results])
session_peak_memory = get_current_node_peak_memory_usage_in_bytes()
compaction_audit.set_peak_memory_used_bytes_by_compaction_session_process(session_peak_memory)
compaction_audit.save_round_completion_stats(mat_results, ((telemetry_time_hb + telemetry_time_dd) + telemetry_time_materialize))
s3_utils.upload(compaction_audit.audit_url, str(json.dumps(compaction_audit)), **s3_client_kwargs)
new_round_completion_info = RoundCompletionInfo.of(last_stream_position_compacted, new_compacted_delta_locator, pyarrow_write_result, bit_width_of_sort_keys, last_rebase_source_partition_locator, compaction_audit.untouched_file_ratio, audit_url, hash_bucket_count, None, CompactorVersion.V1.value)
logger.info(f'partition-{source_partition_locator.partition_values},compacted at: {last_stream_position_compacted},last position: {last_stream_position_to_compact}')
return (partition, new_round_completion_info, rcf_source_partition_locator) |
def read_detections(path, drop_detection_prob: float=0.0, add_detection_noise: float=0.0):
path = os.path.expanduser(path)
logger.debug(f'reading detections from {path}')
if (not os.path.isfile(path)):
raise ValueError('file does not exist')
df = pd.read_csv(path, names=COL_NAMES)
max_frame = df.frame_idx.max()
for frame_idx in range(max_frame):
detections = []
for (_, row) in df[(df.frame_idx == frame_idx)].iterrows():
if (random.random() < drop_detection_prob):
continue
box = [row.bb_left, row.bb_top, (row.bb_left + row.bb_width), (row.bb_top + row.bb_height)]
if (add_detection_noise > 0):
for i in range(4):
box[i] += random.uniform((- add_detection_noise), add_detection_noise)
detections.append(Detection(box=box))
(yield (frame_idx, detections)) |
def download_city(path):
_CITY_DOWNLOAD_URLS = [('gtFine_trainvaltest.zip', '99f532cb1af174f5fcc4c5bc8feea8c66246ddbc'), ('leftImg8bit_trainvaltest.zip', '2c0b77ce9933cc635adda307fbba5566f5d9d404')]
download_dir = os.path.join(path, 'downloads')
mkdir(download_dir)
for (filename, checksum) in _CITY_DOWNLOAD_URLS:
if (not check_sha1(filename, checksum)):
raise UserWarning('File {} is downloaded but the content hash does not match. The repo may be outdated or download may be incomplete. If the "repo_url" is overridden, consider switching to the default repo.'.format(filename))
with zipfile.ZipFile(filename, 'r') as zip_ref:
zip_ref.extractall(path=path)
print('Extracted', filename) |
class Scoresheet(BaseScoresheet):
def __getitem__(self, key):
return BaseScoresheet.__getitem__(self, Pair(key))
def __setitem__(self, key, val):
BaseScoresheet.__setitem__(self, Pair(key), float(val))
def __delitem__(self, key):
return dict.__delitem__(self, Pair(key))
def process_data(self, data, weight='weight'):
if isinstance(data, dict):
return {Pair(k): float(v) for (k, v) in data.items()}
if isinstance(data, nx.Graph):
return {Pair(u, v): float(d[weight]) for (u, v, d) in data.edges(data=True)}
return {Pair(k): float(v) for (k, v) in data}
def from_record(line, delimiter='\t'):
(u, v, score) = line.rstrip('\n').split(delimiter)
return ((u, v), score)
def to_record(key, value, delimiter='\t'):
(u, v) = key
(u, v, score) = map(make_qstr, (u, v, value))
return f'''{u}{delimiter}{v}{delimiter}{score}
''' |
class Tooltip(MacroElement):
_template = Template('\n {% macro script(this, kwargs) %}\n {{ this._parent.get_name() }}.bindTooltip(\n `<div{% if this.style %} style={{ this.style|tojson }}{% endif %}>\n {{ this.text }}\n </div>`,\n {{ this.options|tojson }}\n );\n {% endmacro %}\n ')
valid_options: Dict[(str, Tuple[(Type, ...)])] = {'pane': (str,), 'offset': (tuple,), 'direction': (str,), 'permanent': (bool,), 'sticky': (bool,), 'interactive': (bool,), 'opacity': (float, int), 'attribution': (str,), 'className': (str,)}
def __init__(self, text: str, style: Optional[str]=None, sticky: bool=True, **kwargs: TypeJsonValue):
super().__init__()
self._name = 'Tooltip'
self.text = str(text)
kwargs.update({'sticky': sticky})
self.options = self.parse_options(kwargs)
if style:
assert isinstance(style, str), 'Pass a valid inline HTML style property string to style.'
self.style = style
def parse_options(self, kwargs: Dict[(str, TypeJsonValue)]) -> Dict[(str, TypeJsonValue)]:
kwargs = {camelize(key): value for (key, value) in kwargs.items()}
for key in kwargs.keys():
assert (key in self.valid_options), 'The option {} is not in the available options: {}.'.format(key, ', '.join(self.valid_options))
assert isinstance(kwargs[key], self.valid_options[key]), f'The option {key} must be one of the following types: {self.valid_options[key]}.'
return kwargs |
def test_geodesic_gradient_descent(metric, data_x, data_y, high_rank_x, high_rank_y):
if metric.test_high_rank_data:
(x, y) = (high_rank_x, high_rank_y)
else:
(x, y) = (data_x, data_y)
(p_x, p_y) = (metric.neural_data_to_point(x), metric.neural_data_to_point(y))
frac = np.random.rand(1)[0]
k_z_closed_form = GeodesicLengthSpace.geodesic(metric, p_x, p_y, frac=frac)
k_z_grad_descent = LengthSpace.geodesic(metric, p_x, p_y, init_pt=k_z_closed_form, frac=frac, pt_tol=(atol / 100), fn_tol=1e-06)
assert (metric.length(k_z_grad_descent, k_z_closed_form) < atol), 'Closed-form and grad-descent geodesics are not close!' |
def calculate_fid_given_paths(paths, batch_size, size, length, dims, device):
for p in paths:
if (not os.path.exists(p)):
raise RuntimeError(('Invalid path: %s' % p))
model = nn.DataParallel(resnet101(sample_duration=16).cuda())
model.load_state_dict(torch.load('resnext-101-kinetics.pth')['state_dict'])
(m1, s1) = _compute_statistics_of_path(paths[0], model, batch_size, size, length, dims, device)
(m2, s2) = _compute_statistics_of_path(paths[1], model, batch_size, size, length, dims, device)
fid_value = calculate_frechet_distance(m1, s1, m2, s2)
return fid_value |
_metaclass(Singleton)
class IATSPI(object):
LIB = 'libatspi'
DEFAULT_LIB_NAME = 'libatspi.so'
def __get_roles(self):
control_types = []
get_role_name = self.atspi.atspi_role_get_name
get_role_name.argtypes = [c_int]
get_role_name.restype = c_char_p
for i in range(ATSPI_ROLE_COUNT):
role = get_role_name(i)
if (role is not None):
role = ''.join([part.capitalize() for part in role.decode('utf-8').split()])
control_types.append(role)
return control_types
def __init__(self):
try:
self.atspi = cdll.LoadLibrary(_find_library([self.LIB, self.DEFAULT_LIB_NAME]))
self.atspi.atspi_init()
if (not self.atspi.atspi_is_initialized()):
raise Exception('Cannot initialize atspi module')
self._control_types = self.__get_roles()
self.known_control_types = {}
self.known_control_type_ids = {}
for (type_id, ctrl_type) in enumerate(self._control_types):
self.known_control_types[ctrl_type] = type_id
self.known_control_type_ids[type_id] = ctrl_type
except Exception:
message = 'atspi library not installed. Please install at-spi2 library or choose another backend'
raise Exception(message)
def get_iface_func(self, func_name):
if hasattr(self.atspi, (func_name + '_iface')):
return getattr(self.atspi, (func_name + '_iface'))
elif hasattr(self.atspi, func_name):
return getattr(self.atspi, func_name)
else:
print('Warning! method: {} not found in libatspi.'.format(func_name))
return None |
def create_logger(root_output_path, cfg, image_set):
if (not os.path.exists(root_output_path)):
os.makedirs(root_output_path)
assert os.path.exists(root_output_path), '{} does not exist'.format(root_output_path)
cfg_name = os.path.basename(cfg).split('.')[0]
config_output_path = os.path.join(root_output_path, '{}'.format(cfg_name))
if (not os.path.exists(config_output_path)):
os.makedirs(config_output_path)
image_sets = [iset for iset in image_set.split(';')]
final_output_path = os.path.join(config_output_path, '{}'.format('_'.join(image_sets)))
if (not os.path.exists(final_output_path)):
os.makedirs(final_output_path)
log_file = '{}_{}.log'.format(cfg_name, time.strftime('%Y-%m-%d-%H-%M'))
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=os.path.join(final_output_path, log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_path = log_file[:(- 4)]
return (logger, final_output_path, log_path) |
def solar_position_numba(unixtime, lat, lon, elev, pressure, temp, delta_t, atmos_refract, numthreads, sst=False, esd=False):
loc_args = np.array([lat, lon, elev, pressure, temp, delta_t, atmos_refract, sst, esd])
ulength = unixtime.shape[0]
if sst:
dims = 3
elif esd:
dims = 1
else:
dims = 6
result = np.empty((dims, ulength), dtype=np.float64)
if (unixtime.dtype != np.float64):
unixtime = unixtime.astype(np.float64)
if (ulength < numthreads):
warnings.warn('The number of threads is more than the length of the time array. Only using %s threads.'.format(ulength))
numthreads = ulength
if (numthreads <= 1):
solar_position_loop(unixtime, loc_args, result)
return result
split0 = np.array_split(unixtime, numthreads)
split2 = np.array_split(result, numthreads, axis=1)
chunks = [[a0, loc_args, split2[i]] for (i, a0) in enumerate(split0)]
threads = [threading.Thread(target=solar_position_loop, args=chunk) for chunk in chunks]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return result |
class TestLoad(BaseTestLoading):
def test_load_hvu_label(self):
hvu_label_example1 = copy.deepcopy(self.hvu_label_example1)
hvu_label_example2 = copy.deepcopy(self.hvu_label_example2)
categories = hvu_label_example1['categories']
category_nums = hvu_label_example1['category_nums']
num_tags = sum(category_nums)
num_categories = len(categories)
loader = LoadHVULabel()
assert (repr(loader) == f'{loader.__class__.__name__}(hvu_initialized={False})')
result1 = loader(hvu_label_example1)
label1 = torch.zeros(num_tags)
mask1 = torch.zeros(num_tags)
category_mask1 = torch.zeros(num_categories)
assert (repr(loader) == f'{loader.__class__.__name__}(hvu_initialized={True})')
label1[[0, 4, 5, 7, 8]] = 1.0
mask1[:10] = 1.0
category_mask1[:3] = 1.0
assert torch.all(torch.eq(label1, result1['label']))
assert torch.all(torch.eq(mask1, result1['mask']))
assert torch.all(torch.eq(category_mask1, result1['category_mask']))
result2 = loader(hvu_label_example2)
label2 = torch.zeros(num_tags)
mask2 = torch.zeros(num_tags)
category_mask2 = torch.zeros(num_categories)
label2[[1, 8, 9, 11]] = 1.0
mask2[:2] = 1.0
mask2[7:] = 1.0
category_mask2[[0, 2, 3]] = 1.0
assert torch.all(torch.eq(label2, result2['label']))
assert torch.all(torch.eq(mask2, result2['mask']))
assert torch.all(torch.eq(category_mask2, result2['category_mask']))
def test_load_localization_feature(self):
target_keys = ['raw_feature']
action_result = copy.deepcopy(self.action_results)
with pytest.raises(NotImplementedError):
load_localization_feature = LoadLocalizationFeature('unsupport_ext')
load_localization_feature = LoadLocalizationFeature()
load_localization_feature_result = load_localization_feature(action_result)
assert assert_dict_has_keys(load_localization_feature_result, target_keys)
assert (load_localization_feature_result['raw_feature'].shape == (400, 5))
assert (repr(load_localization_feature) == f'{load_localization_feature.__class__.__name__}(raw_feature_ext=.csv)')
def test_load_proposals(self):
target_keys = ['bsp_feature', 'tmin', 'tmax', 'tmin_score', 'tmax_score', 'reference_temporal_iou']
action_result = copy.deepcopy(self.action_results)
with pytest.raises(NotImplementedError):
load_proposals = LoadProposals(5, self.proposals_dir, self.bsp_feature_dir, 'unsupport_ext')
with pytest.raises(NotImplementedError):
load_proposals = LoadProposals(5, self.proposals_dir, self.bsp_feature_dir, '.csv', 'unsupport_ext')
load_proposals = LoadProposals(5, self.proposals_dir, self.bsp_feature_dir)
load_proposals_result = load_proposals(action_result)
assert assert_dict_has_keys(load_proposals_result, target_keys)
assert (load_proposals_result['bsp_feature'].shape[0] == 5)
assert (load_proposals_result['tmin'].shape == (5,))
assert_array_almost_equal(load_proposals_result['tmin'], np.arange(0.1, 0.6, 0.1), decimal=4)
assert (load_proposals_result['tmax'].shape == (5,))
assert_array_almost_equal(load_proposals_result['tmax'], np.arange(0.2, 0.7, 0.1), decimal=4)
assert (load_proposals_result['tmin_score'].shape == (5,))
assert_array_almost_equal(load_proposals_result['tmin_score'], np.arange(0.95, 0.9, (- 0.01)), decimal=4)
assert (load_proposals_result['tmax_score'].shape == (5,))
assert_array_almost_equal(load_proposals_result['tmax_score'], np.arange(0.96, 0.91, (- 0.01)), decimal=4)
assert (load_proposals_result['reference_temporal_iou'].shape == (5,))
assert_array_almost_equal(load_proposals_result['reference_temporal_iou'], np.arange(0.85, 0.8, (- 0.01)), decimal=4)
assert (repr(load_proposals) == f'{load_proposals.__class__.__name__}(top_k={5}, pgm_proposals_dir={self.proposals_dir}, pgm_features_dir={self.bsp_feature_dir}, proposal_ext=.csv, feature_ext=.npy)')
def test_load_audio_feature(self):
target_keys = ['audios']
inputs = copy.deepcopy(self.audio_feature_results)
load_audio_feature = LoadAudioFeature()
results = load_audio_feature(inputs)
assert assert_dict_has_keys(results, target_keys)
inputs = copy.deepcopy(self.audio_feature_results)
inputs['audio_path'] = 'foo/foo/bar.npy'
load_audio_feature = LoadAudioFeature()
results = load_audio_feature(inputs)
assert (results['audios'].shape == (640, 80))
assert assert_dict_has_keys(results, target_keys)
assert (repr(load_audio_feature) == f'{load_audio_feature.__class__.__name__}(pad_method=zero)') |
class NormalizePathTest(TestCase):
def setUp(self):
self.filesystem = fake_filesystem.FakeFilesystem(path_separator='/')
self.root_name = self.filesystem.root_dir_name
def test_empty_path_should_get_normalized_to_root_path(self):
self.assertEqual(self.root_name, self.filesystem.absnormpath(''))
def test_root_path_remains_unchanged(self):
self.assertEqual(self.root_name, self.filesystem.absnormpath(self.root_name))
def test_relative_path_forced_to_cwd(self):
path = 'bar'
self.filesystem.cwd = '/foo'
self.assertEqual('/foo/bar', self.filesystem.absnormpath(path))
def test_absolute_path_remains_unchanged(self):
path = 'foo/bar'
self.assertEqual((self.root_name + path), self.filesystem.absnormpath(path))
def test_dotted_path_is_normalized(self):
path = '/foo/..'
self.assertEqual(self.filesystem.root_dir_name, self.filesystem.absnormpath(path))
path = 'foo/../bar'
self.assertEqual(f'{self.filesystem.root_dir_name}bar', self.filesystem.absnormpath(path))
def test_dot_path_is_normalized(self):
path = '.'
self.assertEqual(self.root_name, self.filesystem.absnormpath(path)) |
class VNet(MetaModule):
def __init__(self, input, hidden1, output):
super(VNet, self).__init__()
self.linear1 = MetaLinear(input, hidden1)
self.relu = nn.ReLU(inplace=True)
self.linear2 = MetaLinear(hidden1, output)
def forward(self, x):
x = self.linear1(x)
x = self.relu(x)
out = self.linear2(x)
return F.sigmoid(out) |
class TestProjectExplicit():
.parametrize('file_name', ['pyproject.toml', 'setup.py'])
def test_found_project_flag(self, hatch, temp_dir, config_file, helpers, file_name):
project_file = (temp_dir / file_name)
project_file.touch()
project = 'foo'
config_file.model.projects = {project: str(temp_dir)}
config_file.save()
result = hatch('-p', project, 'status')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
[Project] - {project}
[Location] - {temp_dir}
[Config] - {config_file.path}
'''))
.parametrize('file_name', ['pyproject.toml', 'setup.py'])
def test_found_project_env(self, hatch, temp_dir, config_file, helpers, file_name):
project_file = (temp_dir / file_name)
project_file.touch()
project = 'foo'
config_file.model.projects = {project: str(temp_dir)}
config_file.save()
with EnvVars({ConfigEnvVars.PROJECT: project}):
result = hatch('status')
assert (result.exit_code == 0), result.output
assert (result.output == helpers.dedent(f'''
[Project] - {project}
[Location] - {temp_dir}
[Config] - {config_file.path}
'''))
def test_unknown_project(self, hatch):
project = 'foo'
result = hatch('-p', project, 'status')
assert (result.exit_code == 1)
assert (result.output == f'''Unable to locate project {project}
''')
def test_not_a_project(self, hatch, temp_dir, config_file):
project = 'foo'
config_file.model.project = project
config_file.model.projects = {project: str(temp_dir)}
config_file.save()
result = hatch('-p', project, 'status')
assert (result.exit_code == 1)
assert (result.output == f'''Unable to locate project {project}
''') |
class MaxOrderCount(TradingControl):
def __init__(self, on_error, max_count):
super(MaxOrderCount, self).__init__(on_error, max_count=max_count)
self.orders_placed = 0
self.max_count = max_count
self.current_date = None
def validate(self, asset, amount, portfolio, algo_datetime, algo_current_data):
algo_date = algo_datetime.date()
if (self.current_date and (self.current_date != algo_date)):
self.orders_placed = 0
self.current_date = algo_date
if (self.orders_placed >= self.max_count):
self.handle_violation(asset, amount, algo_datetime)
self.orders_placed += 1 |
def get_args():
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='Symbolizes OP-TEE abort dumps or function graphs', epilog=epilog)
parser.add_argument('-d', '--dir', action='append', nargs='+', help='Search for ELF file in DIR. tee.elf is needed to decode a TEE Core or pseudo-TA abort, while <TA_uuid>.elf is required if a user-mode TA has crashed. For convenience, ELF files may also be given.')
parser.add_argument('-s', '--strip_path', nargs='?', help='Strip STRIP_PATH from file paths (default: current directory, use -s with no argument to show full paths)', default=os.getcwd())
return parser.parse_args() |
class TBMagicTurnHandler(DefaultScript):
def at_script_creation(self):
self.key = 'Combat Turn Handler'
self.interval = 5
self.persistent = True
self.db.fighters = []
for thing in self.obj.contents:
if thing.db.hp:
self.db.fighters.append(thing)
for fighter in self.db.fighters:
self.initialize_for_combat(fighter)
self.obj.db.combat_turnhandler = self
ordered_by_roll = sorted(self.db.fighters, key=roll_init, reverse=True)
self.db.fighters = ordered_by_roll
self.obj.msg_contents(('Turn order is: %s ' % ', '.join((obj.key for obj in self.db.fighters))))
self.start_turn(self.db.fighters[0])
self.db.turn = 0
self.db.timer = TURN_TIMEOUT
def at_stop(self):
for fighter in self.db.fighters:
combat_cleanup(fighter)
self.obj.db.combat_turnhandler = None
def at_repeat(self):
currentchar = self.db.fighters[self.db.turn]
self.db.timer -= self.interval
if (self.db.timer <= 0):
self.obj.msg_contents(("%s's turn timed out!" % currentchar))
spend_action(currentchar, 'all', action_name='disengage')
return
elif ((self.db.timer <= 10) and (not self.db.timeout_warning_given)):
currentchar.msg('WARNING: About to time out!')
self.db.timeout_warning_given = True
def initialize_for_combat(self, character):
combat_cleanup(character)
character.db.combat_actionsleft = 0
character.db.combat_turnhandler = self
character.db.combat_lastaction = 'null'
def start_turn(self, character):
character.db.combat_actionsleft = ACTIONS_PER_TURN
character.msg(("|wIt's your turn! You have %i HP remaining.|n" % character.db.hp))
def next_turn(self):
disengage_check = True
for fighter in self.db.fighters:
if (fighter.db.combat_lastaction != 'disengage'):
disengage_check = False
if disengage_check:
self.obj.msg_contents('All fighters have disengaged! Combat is over!')
self.stop()
return
defeated_characters = 0
for fighter in self.db.fighters:
if (fighter.db.HP == 0):
defeated_characters += 1
if (defeated_characters == (len(self.db.fighters) - 1)):
for fighter in self.db.fighters:
if (fighter.db.HP != 0):
LastStanding = fighter
self.obj.msg_contents(('Only %s remains! Combat is over!' % LastStanding))
self.stop()
return
currentchar = self.db.fighters[self.db.turn]
self.db.turn += 1
if (self.db.turn > (len(self.db.fighters) - 1)):
self.db.turn = 0
newchar = self.db.fighters[self.db.turn]
self.db.timer = (TURN_TIMEOUT + self.time_until_next_repeat())
self.db.timeout_warning_given = False
self.obj.msg_contents(("%s's turn ends - %s's turn begins!" % (currentchar, newchar)))
self.start_turn(newchar)
def turn_end_check(self, character):
if (not character.db.combat_actionsleft):
self.next_turn()
return
def join_fight(self, character):
self.db.fighters.insert(self.db.turn, character)
self.db.turn += 1
self.initialize_for_combat(character) |
class PedestrianMotionType(metaclass=_EnumMeta):
standing = _OscEnum('PedestrianMotionType', 'standing', min_minor_version=2)
sitting = _OscEnum('PedestrianMotionType', 'sitting', min_minor_version=2)
lying = _OscEnum('PedestrianMotionType', 'lying', min_minor_version=2)
squatting = _OscEnum('PedestrianMotionType', 'squatting', min_minor_version=2)
walking = _OscEnum('PedestrianMotionType', 'walking', min_minor_version=2)
running = _OscEnum('PedestrianMotionType', 'running', min_minor_version=2)
reeling = _OscEnum('PedestrianMotionType', 'reeling', min_minor_version=2)
crawling = _OscEnum('PedestrianMotionType', 'crawling', min_minor_version=2)
cycling = _OscEnum('PedestrianMotionType', 'cycling', min_minor_version=2)
jumping = _OscEnum('PedestrianMotionType', 'jumping', min_minor_version=2)
ducking = _OscEnum('PedestrianMotionType', 'ducking', min_minor_version=2)
bendingDown = _OscEnum('PedestrianMotionType', 'bendingDown', min_minor_version=2) |
def should_do_markup(file: TextIO) -> bool:
if (os.environ.get('PY_COLORS') == '1'):
return True
if (os.environ.get('PY_COLORS') == '0'):
return False
if os.environ.get('NO_COLOR'):
return False
if os.environ.get('FORCE_COLOR'):
return True
return (hasattr(file, 'isatty') and file.isatty() and (os.environ.get('TERM') != 'dumb')) |
class Config():
STATUS_SONGLESS = ('no_song_text', '')
PAT_PLAYING = ('playing_pattern', ' <~artist~title> ')
PAT_PAUSED = ('paused_pattern', ('<~artist~title> [%s]' % _('paused')))
HOST = ('host', 'localhost')
PORT = ('port', 1883)
USERNAME = ('username', '')
PASSWORD = ('password', '')
TOPIC = ('topic', 'quodlibet/now-playing')
EMPTY_STATUS = '' |
def agg(raw_items):
from collections import OrderedDict
agged = OrderedDict()
for raw_item in raw_items:
prefix = raw_item.split('_')[0]
value = '_'.join(raw_item.split('_')[1:])
if (not (prefix in agged.keys())):
agged[prefix] = []
agged[prefix].append(value)
agged_str = ' | '.join(['{}: {}'.format(key, ', '.join(value)) for (key, value) in agged.items()])
return agged_str |
class RepeatFactorTrainingSampler(Sampler):
def __init__(self, repeat_factors, *, shuffle=True, seed=None):
self._shuffle = shuffle
if (seed is None):
seed = comm.shared_random_seed()
self._seed = int(seed)
self._rank = comm.get_rank()
self._world_size = comm.get_world_size()
self._int_part = torch.trunc(repeat_factors)
self._frac_part = (repeat_factors - self._int_part)
def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
category_freq = defaultdict(int)
for dataset_dict in dataset_dicts:
cat_ids = {ann['category_id'] for ann in dataset_dict['annotations']}
for cat_id in cat_ids:
category_freq[cat_id] += 1
num_images = len(dataset_dicts)
for (k, v) in category_freq.items():
category_freq[k] = (v / num_images)
category_rep = {cat_id: max(1.0, math.sqrt((repeat_thresh / cat_freq))) for (cat_id, cat_freq) in category_freq.items()}
rep_factors = []
for dataset_dict in dataset_dicts:
cat_ids = {ann['category_id'] for ann in dataset_dict['annotations']}
rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
rep_factors.append(rep_factor)
return torch.tensor(rep_factors, dtype=torch.float32)
def _get_epoch_indices(self, generator):
rands = torch.rand(len(self._frac_part), generator=generator)
rep_factors = (self._int_part + (rands < self._frac_part).float())
indices = []
for (dataset_index, rep_factor) in enumerate(rep_factors):
indices.extend(([dataset_index] * int(rep_factor.item())))
return torch.tensor(indices, dtype=torch.int64)
def __iter__(self):
start = self._rank
(yield from itertools.islice(self._infinite_indices(), start, None, self._world_size))
def _infinite_indices(self):
g = torch.Generator()
g.manual_seed(self._seed)
while True:
indices = self._get_epoch_indices(g)
if self._shuffle:
randperm = torch.randperm(len(indices), generator=g)
(yield from indices[randperm].tolist())
else:
(yield from indices.tolist()) |
class Effect11516(BaseEffect):
type = 'passive'
def handler(fit, ship, context, projectionRange, **kwargs):
fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Shield Operation')), 'shieldBonus', ship.getModifiedItemAttr('shipBonusMC'), skill='Minmatar Cruiser', **kwargs) |
class HerbertConverter(Converter):
def converted(self) -> Tokenizer:
tokenizer_info_str = '#version:'
token_suffix = '</w>'
vocab = self.original_tokenizer.encoder
merges = list(self.original_tokenizer.bpe_ranks.keys())
if (tokenizer_info_str in merges[0][0]):
merges = merges[1:]
tokenizer = Tokenizer(BPE(vocab, merges, dropout=None, unk_token=self.original_tokenizer.unk_token, end_of_word_suffix=token_suffix))
tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False)
tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer()
tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix)
tokenizer.post_processor = processors.BertProcessing(sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id), cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id))
return tokenizer |
class Vasicek(SPEulerMaruyama):
def __init__(self, theta=1.0, mu=3.0, sigma=0.5, initial=1.0, T=1.0, rng=None):
super().__init__(T=T, rng=rng)
self.theta = theta
self.mu = mu
self.sigma = sigma
self.initial = initial
self.n = 1.0
self.dt = ((1.0 * self.T) / self.n)
self.times = None
self.name = 'Vasicek Process'
def f(x, _):
return (self.theta * (self.mu - x))
def g(x, _):
return self.sigma
self.f = f
self.g = g
def __str__(self):
return 'Vasicek process with parameters {speed}, {mean}, and {volatility} on [0, {T}].'.format(T=str(self.T), speed=str(self.theta), mean=str(self.mu), volatility=str(self.sigma))
def _process_expectation(self, times=None):
if (times is None):
times = self.times
return ((self.initial * np.exp((((- 1.0) * self.theta) * times))) + (self.mu * (np.ones(len(times)) - np.exp((((- 1.0) * self.theta) * times)))))
def marginal_expectation(self, times=None):
expectations = self._process_expectation(times=times)
return expectations
def _process_variance(self, times=None):
if (times is None):
times = self.times
variances = (((self.sigma ** 2) * (1.0 / (2.0 * self.theta))) * (np.ones(len(times)) - np.exp((((- 2.0) * self.theta) * times))))
return variances
def marginal_variance(self, times=None):
variances = self._process_variance(times=times)
return variances
def _process_stds(self):
stds = np.sqrt(self.marginal_variance())
return stds
def process_stds(self):
stds = self._process_stds()
return stds
def get_marginal(self, t):
mu_x = ((self.initial * np.exp((((- 1.0) * self.theta) * t))) + (self.mu * (1.0 - np.exp((((- 1.0) * self.theta) * t)))))
variance_x = (((self.sigma ** 2) * (1.0 / (2.0 * self.theta))) * (1.0 - np.exp((((- 2.0) * self.theta) * t))))
sigma_x = np.sqrt(variance_x)
marginal = norm(loc=mu_x, scale=sigma_x)
return marginal |
class Foo_shamt_list_wrap(Component):
def construct(s, nbits=1):
s.in_ = InPort(nbits)
s.out = [OutPort(nbits) for _ in range(5)]
s.inner = [Foo_shamt(i) for i in range(5)]
for i in range(5):
s.inner[i].in_ //= s.in_
s.inner[i].out //= s.out[i]
def line_trace(s):
return '|'.join([x.line_trace() for x in s.inner]) |
class MaxPool2dSame(nn.MaxPool2d):
def __init__(self, kernel_size: int, stride=None, padding=0, dilation=1, ceil_mode=False, count_include_pad=True):
kernel_size = tuple(repeat(kernel_size, 2))
stride = tuple(repeat(stride, 2))
dilation = tuple(repeat(dilation, 2))
super(MaxPool2dSame, self).__init__(kernel_size, stride, (0, 0), dilation, ceil_mode, count_include_pad)
def forward(self, x):
x = pad_same(x, self.kernel_size, self.stride, value=(- float('inf')))
return F.max_pool2d(x, self.kernel_size, self.stride, (0, 0), self.dilation, self.ceil_mode) |
def compute_intermediate_statistics(smiles, n_jobs=1, device='cpu', batch_size=512, pool=None):
close_pool = False
if (pool is None):
if (n_jobs != 1):
pool = Pool(n_jobs)
close_pool = True
else:
pool = 1
statistics = {}
mols = mapper(pool)(get_mol, smiles)
kwargs = {'n_jobs': pool, 'device': device, 'batch_size': batch_size}
kwargs_fcd = {'n_jobs': n_jobs, 'device': device, 'batch_size': batch_size}
statistics['FCD'] = FCDMetric(**kwargs_fcd).precalc(smiles)
statistics['SNN'] = SNNMetric(**kwargs).precalc(mols)
statistics['Frag'] = FragMetric(**kwargs).precalc(mols)
statistics['Scaf'] = ScafMetric(**kwargs).precalc(mols)
for (name, func) in [('logP', logP), ('SA', SA), ('QED', QED), ('NP', NP), ('weight', weight)]:
statistics[name] = FrechetMetric(func, **kwargs).precalc(mols)
if close_pool:
pool.terminate()
return statistics |
.needs_connection
def test_calc_hitran_spectrum(verbose=True, plot=False, *args, **kwargs):
from radis import test_spectrum
s = test_spectrum(databank=('hitran', 'full'), name='full range', verbose=verbose)
s2 = test_spectrum(databank=('hitran', 'range'), name='partial range', verbose=verbose)
if plot:
import matplotlib.pyplot as plt
from radis import plot_diff
plt.ion()
plot_diff(s, s2, method='ratio')
assert s.compare_with(s2, spectra_only='abscoeff', rtol=0.007, plot=plot)
return |
class TypeGuardTests(BaseTestCase):
def test_basics(self):
TypeGuard[int]
self.assertEqual(TypeGuard[int], TypeGuard[int])
def foo(arg) -> TypeGuard[int]:
...
self.assertEqual(gth(foo), {'return': TypeGuard[int]})
def test_repr(self):
if hasattr(typing, 'TypeGuard'):
mod_name = 'typing'
else:
mod_name = 'typing_extensions'
self.assertEqual(repr(TypeGuard), f'{mod_name}.TypeGuard')
cv = TypeGuard[int]
self.assertEqual(repr(cv), f'{mod_name}.TypeGuard[int]')
cv = TypeGuard[Employee]
self.assertEqual(repr(cv), f'{mod_name}.TypeGuard[{__name__}.Employee]')
cv = TypeGuard[Tuple[int]]
self.assertEqual(repr(cv), f'{mod_name}.TypeGuard[typing.Tuple[int]]')
def test_cannot_subclass(self):
with self.assertRaises(TypeError):
class C(type(TypeGuard)):
pass
with self.assertRaises(TypeError):
class C(type(TypeGuard[int])):
pass
def test_cannot_init(self):
with self.assertRaises(TypeError):
TypeGuard()
with self.assertRaises(TypeError):
type(TypeGuard)()
with self.assertRaises(TypeError):
type(TypeGuard[Optional[int]])()
def test_no_isinstance(self):
with self.assertRaises(TypeError):
isinstance(1, TypeGuard[int])
with self.assertRaises(TypeError):
issubclass(int, TypeGuard) |
class BytesViewTest(unittest.TestCase):
def test(self):
with self.assertRaises(TypeError):
bytesview(text_type('foobar'))
data = b'ABCDEF'
view = bytesview(data)
self.assertEqual(len(view), 16)
self.assertEqual(view[:], data)
self.assertIsInstance(view[:], bytes)
self.assertEqual(view[5:(- 6)], b'56789')
self.assertEqual(indexbytes(view, 7), ord('7'))
view = bytesview(view, 5)
self.assertEqual(view[:], b'56789ABCDEF')
self.assertEqual(indexbytes(view, 4), ord('9'))
view = bytesview(view, 0, 5)
self.assertEqual(view[:], b'56789')
self.assertEqual(indexbytes(view, 1), ord('6')) |
_rewriter(tracks=[Scan])
def transform_scan_values(fgraph: FunctionGraph, node: Apply) -> Optional[list[Apply]]:
rv_map_feature: Optional[PreserveRVMappings] = getattr(fgraph, 'preserve_rv_mappings', None)
values_to_transforms: Optional[TransformValuesMapping] = getattr(fgraph, 'values_to_transforms', None)
if ((rv_map_feature is None) or (values_to_transforms is None)):
return None
rv_vars = []
value_vars = []
for out in node.outputs:
value = rv_map_feature.rv_values.get(out, None)
if (value is None):
continue
rv_vars.append(out)
value_vars.append(value)
if (not value_vars):
return None
transforms = [values_to_transforms.get(rv_map_feature.original_values[value_var], None) for value_var in value_vars]
if all(((transform is None) for transform in transforms)):
return None
transformed_rv_op = TransformedValueRV(transforms)
cloned_outputs = node.clone().outputs
transformed_rv_node = transformed_rv_op.make_node(*cloned_outputs)
for (rv_var, value_var, transform) in zip(rv_vars, value_vars, transforms):
rv_var_out_idx = node.outputs.index(rv_var)
if (transform is None):
continue
original_value_var = rv_map_feature.original_values[value_var]
trans_original_value_var = transform.backward(original_value_var, *transformed_rv_node.inputs)
(trans_original_value_var,) = clone_replace((value_var.owner.inputs[0],), replace={original_value_var: trans_original_value_var})
transformed_value_var = value_var.owner.clone_with_new_inputs(inputs=([trans_original_value_var] + value_var.owner.inputs[1:])).default_output()
new_value_var = transformed_value(transformed_value_var, original_value_var)
if (value_var.name and getattr(transform, 'name', None)):
new_value_var.name = f'{value_var.name}_{transform.name}'
rv_map_feature.update_rv_maps(rv_var, new_value_var, transformed_rv_node.outputs[rv_var_out_idx])
return transformed_rv_node.outputs |
def to_tpb_grouped_weighted_pauli_operator(operator: Union[(WeightedPauliOperator, TPBGroupedWeightedPauliOperator, MatrixOperator)], grouping_func: Callable, **kwargs: int) -> TPBGroupedWeightedPauliOperator:
if (operator.__class__ == WeightedPauliOperator):
return grouping_func(operator, **kwargs)
elif (operator.__class__ == TPBGroupedWeightedPauliOperator):
op_tpb = cast(TPBGroupedWeightedPauliOperator, operator)
if ((grouping_func != op_tpb.grouping_func) and (kwargs != op_tpb.kwargs)):
return grouping_func(op_tpb, **kwargs)
else:
return op_tpb
elif (operator.__class__ == MatrixOperator):
op = to_weighted_pauli_operator(operator)
return grouping_func(op, **kwargs)
else:
raise AquaError('Unsupported type to convert to TPBGroupedWeightedPauliOperator: {}'.format(operator.__class__)) |
class ModelWithReusedNodes(nn.Module):
def __init__(self):
super(ModelWithReusedNodes, self).__init__()
self.conv1 = nn.Conv2d(3, 8, kernel_size=2, stride=2, padding=2, bias=False)
self.bn1 = nn.BatchNorm2d(8)
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
self.fc = nn.Linear(2592, 10)
def forward(self, *inputs):
x = self.conv1(inputs[0])
x = self.relu1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.relu2(x)
x = x.view(x.size(0), (- 1))
x = self.fc(x)
return x |
class EntitySummary(Base, Timestamp):
__tablename__ = 'stats_entity_summaries'
__table_args__ = (UniqueConstraint('name', 'year', 'month', name='uniq_key'),)
id = sa.Column(sa.Integer, autoincrement=True, primary_key=True)
name = sa.Column(sa.String(128), nullable=False)
count = sa.Column(sa.Integer, nullable=False)
board_stats = sa.Column(JSONType, nullable=False)
link_stats = sa.Column(JSONType, nullable=False)
posts = sa.Column(JSONType, nullable=False)
year = sa.Column(sa.Integer, nullable=False)
month = sa.Column(sa.Integer, nullable=False) |
('pypyr.moduleloader.get_module')
(Step, 'invoke_step')
def test_run_pipeline_steps_complex_with_run_99_true(mock_invoke_step, mock_get_module):
step = Step({'name': 'step1', 'run': 99})
context = get_test_context()
original_len = len(context)
with patch_logger('pypyr.dsl', logging.DEBUG) as mock_logger_debug:
step.run_step(context)
mock_logger_debug.assert_any_call('done')
mock_invoke_step.assert_called_once_with(context={'key1': 'value1', 'key2': 'value2', 'key3': 'value3', 'key4': [{'k4lk1': 'value4', 'k4lk2': 'value5'}, {'k4lk1': 'value6', 'k4lk2': 'value7'}], 'key5': False, 'key6': True, 'key7': 77})
assert (len(context) == original_len) |
def test_kinesis_logs_producers(logs_model, mock_elasticsearch, mock_db_model, kinesis_logs_producer_config):
mock_elasticsearch.template = Mock(return_value=DEFAULT_TEMPLATE_RESPONSE)
producer_config = kinesis_logs_producer_config
with patch('botocore.endpoint.EndpointCreator.create_endpoint'), patch('botocore.client.BaseClient._make_api_call') as mock_send:
configure(producer_config)
logs_model.log_action('pull_repo', 'user1', Mock(id=1), '192.168.1.1', {'key': 'value'}, None, 'repo1', parse('2019-01-01T03:30'))
mock_send.assert_called_once_with('PutRecord', mock_send.call_args_list[0][0][1]) |
class SitExecutor(ActionExecutor):
_MAX_OCCUPANCIES = {'couch': 4, 'bed': 4, 'chair': 1, 'loveseat': 2, 'sofa': 4, 'toilet': 1, 'pianobench': 2, 'bench': 2}
def execute(self, script: Script, state: EnvironmentState, info: ExecutionInfo, char_index, modify=True, in_place=False):
current_line = script[0]
info.set_current_line(current_line)
node = state.get_state_node(current_line.object())
if (node is None):
info.object_found_error()
elif self.check_sittable(state, node, info, char_index):
char_node = _get_character_node(state, char_index)
new_char_node = char_node.copy()
new_char_node.states.discard(State.LYING)
new_char_node.states.add(State.SITTING)
if modify:
(yield state.change_state([AddEdges(CharacterNode(char_index), Relation.ON, NodeInstance(node)), AddEdges(CharacterNode(char_index), Relation.FACING, RelationFrom(node, Relation.FACING)), ChangeNode(new_char_node)], in_place=in_place))
else:
(yield state)
def check_sittable(self, state: EnvironmentState, node: GraphNode, info: ExecutionInfo, char_index):
char_node = _get_character_node(state, char_index)
if (not _is_character_close_to(state, node, char_index)):
info.error('{} is not close to {}', char_node, node)
return False
if (State.SITTING in char_node.states):
info.error('{} is sitting', char_node)
return False
if (Property.SITTABLE not in node.properties):
info.error('{} is not sittable', node)
return False
max_occupancy = self._MAX_OCCUPANCIES.get(node.class_name, 1)
if state.evaluate(CountRelations(AnyNode(), Relation.ON, NodeInstanceFilter(node), min_value=max_occupancy)):
info.error('Too many things on {}', node)
return False
return True |
def execute_terraform(args: Optional[Sequence[str]]=None, cwd: Optional[Union[(Path, str)]]=None, env: Optional[dict]=None, capture: bool=False, verbose: Optional[bool]=None) -> CompletedProcess:
if (args is None):
args = (['terraform'] + sys.argv[1:])
else:
args = (['teraform'] + list(args))
for path in os.environ['PATH'].split(os.pathsep):
if (sys.platform == 'win32'):
terraform_path = os.path.join(path, 'terraform.exe')
else:
terraform_path = os.path.join(path, 'terraform')
if (not os.path.exists(terraform_path)):
continue
if (not os.access(terraform_path, os.X_OK)):
continue
real_name = os.path.basename(os.path.realpath(terraform_path))
if (real_name == 'pretf'):
continue
return util.execute(file=terraform_path, args=args, cwd=cwd, env=env, capture=capture, verbose=verbose)
log.bad('terraform: command not found')
raise CalledProcessError(returncode=1, cmd=' '.join((shlex.quote(arg) for arg in args))) |
class ParameterAddAction(_ActionType):
def __init__(self, parameter_ref, value):
self.parameter_ref = parameter_ref
self.value = convert_float(value)
def __eq__(self, other):
if isinstance(other, ParameterAddAction):
if ((self.get_attributes() == other.get_attributes()) and (self.parameter_ref == other.parameter_ref)):
return True
return False
def parse(element):
pa_element = element.find('ParameterAction')
parameterRef = pa_element.attrib['parameterRef']
ma_element = pa_element.find('ModifyAction')
rule_element = ma_element.find('Rule')
mbv_element = rule_element.find('AddValue')
value = convert_float(mbv_element.attrib['value'])
return ParameterAddAction(parameterRef, value)
def get_attributes(self):
return {'value': str(self.value)}
def get_element(self):
if (self.version_minor > 1):
raise OpenSCENARIOVersionError('ParameterAddAction was deprecated in OSC 1.2, please use VariableAddAction instead')
element = ET.Element('GlobalAction')
paramaction = ET.SubElement(element, 'ParameterAction', {'parameterRef': self.parameter_ref})
modifaction = ET.SubElement(paramaction, 'ModifyAction')
rule = ET.SubElement(modifaction, 'Rule')
ET.SubElement(rule, 'AddValue', self.get_attributes())
return element |
def test_geojson_find_identifier():
def _create(*properties):
return {'type': 'FeatureCollection', 'features': [{'type': 'Feature', 'properties': item} for item in properties]}
def _assert_id_got_added(data):
_geojson = GeoJson(data)
assert (_geojson.find_identifier() == 'feature.id')
assert (_geojson.data['features'][0]['id'] == '0')
data_with_id = _create(None, None)
data_with_id['features'][0]['id'] = 'this-is-an-id'
data_with_id['features'][1]['id'] = 'this-is-another-id'
geojson = GeoJson(data_with_id)
assert (geojson.find_identifier() == 'feature.id')
assert (geojson.data['features'][0]['id'] == 'this-is-an-id')
data_with_unique_properties = _create({'property-key': 'some-value'}, {'property-key': 'another-value'})
geojson = GeoJson(data_with_unique_properties)
assert (geojson.find_identifier() == 'feature.properties.property-key')
data_with_unique_properties = _create({'property-key': 42}, {'property-key': 43}, {'property-key': 'or a string'})
geojson = GeoJson(data_with_unique_properties)
assert (geojson.find_identifier() == 'feature.properties.property-key')
data_with_identical_ids = _create(None, None)
data_with_identical_ids['features'][0]['id'] = 'identical-ids'
data_with_identical_ids['features'][1]['id'] = 'identical-ids'
_assert_id_got_added(data_with_identical_ids)
data_with_some_missing_ids = _create(None, None)
data_with_some_missing_ids['features'][0]['id'] = 'this-is-an-id'
_assert_id_got_added(data_with_some_missing_ids)
data_with_identical_properties = _create({'property-key': 'identical-value'}, {'property-key': 'identical-value'})
_assert_id_got_added(data_with_identical_properties)
data_bare = _create(None)
_assert_id_got_added(data_bare)
data_empty_dict = _create({})
_assert_id_got_added(data_empty_dict)
data_without_properties = _create(None)
del data_without_properties['features'][0]['properties']
_assert_id_got_added(data_without_properties)
data_some_without_properties = _create({'key': 'value'}, 'will be deleted')
del data_some_without_properties['features'][1]['properties']
_assert_id_got_added(data_some_without_properties)
data_with_nested_properties = _create({'summary': {'distance': 343.2}, 'way_points': [3, 5]})
_assert_id_got_added(data_with_nested_properties)
data_with_incompatible_properties = _create({'summary': {'distances': [0, 6], 'durations': None}, 'way_points': [3, 5]})
_assert_id_got_added(data_with_incompatible_properties)
data_loose_geometry = {'type': 'LineString', 'coordinates': [[3.961389, 43.583333], [3.968056, 43.580833], [3.974722, 43.578333], [3.986389, 43.575278], [3.998333, 43.5725], [4.163333, 43.530556]]}
geojson = GeoJson(data_loose_geometry)
geojson.convert_to_feature_collection()
assert (geojson.find_identifier() == 'feature.id')
assert (geojson.data['features'][0]['id'] == '0') |
def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0, link=None, verbose=1, dry_run=0):
from distutils._modified import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if (not os.path.isfile(src)):
raise DistutilsFileError(("can't copy '%s': doesn't exist or not a regular file" % src))
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if (update and (not newer(src, dst))):
if (verbose >= 1):
log.debug('not copying %s (output up-to-date)', src)
return (dst, 0)
try:
action = _copy_action[link]
except KeyError:
raise ValueError(("invalid value '%s' for 'link' argument" % link))
if (verbose >= 1):
if (os.path.basename(dst) == os.path.basename(src)):
log.info('%s %s -> %s', action, src, dir)
else:
log.info('%s %s -> %s', action, src, dst)
if dry_run:
return (dst, 1)
elif (link == 'hard'):
if (not (os.path.exists(dst) and os.path.samefile(src, dst))):
try:
os.link(src, dst)
return (dst, 1)
except OSError:
pass
elif (link == 'sym'):
if (not (os.path.exists(dst) and os.path.samefile(src, dst))):
os.symlink(src, dst)
return (dst, 1)
_copy_file_contents(src, dst)
if (preserve_mode or preserve_times):
st = os.stat(src)
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1) |
class CrossEntropyLoss(nn.Module):
def __init__(self, num_classes, epsilon=0.1, use_gpu=True, label_smooth=True):
super(CrossEntropyLoss, self).__init__()
self.num_classes = num_classes
self.epsilon = (epsilon if label_smooth else 0)
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).data.cpu(), 1)
if self.use_gpu:
targets = targets.cuda()
targets = (((1 - self.epsilon) * targets) + (self.epsilon / self.num_classes))
loss = ((- targets) * log_probs).mean(0).sum()
return loss |
def estimate_mu_sigma(cam_data, cam_data_length, cam_r, cam_base_ctr, dsp_budget, volume, dsp_l, cam_v, algo_one_para, algo):
cam_mu = {}
cam_sigma = {}
para = algo_one_para[algo]
for cam in cam_data:
data = cam_data[cam]
length = cam_data_length[cam]
profit_margins = []
index = 0
for process in range(config.e_step_mu_process_num):
cost = 0
profit = 0
for i in range(min(volume, len(data))):
yzp = data[index]
index = ((index + 1) % length)
clk = yzp[0]
mp = yzp[1]
pctr = yzp[2]
r = cam_r[cam]
bid = arbitrage_rtb_test.bidding((r / config.cpc_payoff_ratio), cam_base_ctr[cam], r, dsp_l, pctr, algo, para)
if (bid > mp):
cost += (mp * 0.001)
profit += ((clk * r) - (mp * 0.001))
if (cost >= dsp_budget):
break
profit_margin = (profit / max(cost, 0.1))
profit_margins.append(profit_margin)
cam_mu[cam] = numpy.mean(profit_margins)
cam_sigma[cam] = numpy.std(profit_margins)
return (cam_mu, cam_sigma) |
class SolventPsi4(SolventBase):
program: Literal['psi4'] = 'psi4'
units: Literal[('au', 'angstrom')] = Field(..., description='The units used in the input options atomic units are used by default.')
codata: Literal[(1998, 2002, 2006, 2010)] = Field(2010, description='The set of fundamental physical constants to be used in the module.')
cavity_Type: Literal['GePol'] = Field('GePol', description='Completely specifies type of molecular surface and its discretization.')
cavity_Area: float = Field(0.3, description='Average area (weight) of the surface partition for the GePol cavity in the specified units. By default this is in AU.')
cavity_Scaling: bool = Field(True, description='If true, the radii for the spheres will be scaled by 1.2. For finer control on the scaling factor for each sphere, select explicit creation mode.')
cavity_RadiiSet: Literal[('bondi', 'uff', 'allinger')] = Field('bondi', description='Select set of atomic radii to be used. Currently Bondi-Mantina Bondi, UFF and Allingers MM3 sets available. Radii in Allingers MM3 set are obtained by dividing the value in the original paper by 1.2, as done in the ADF COSMO implementation We advise to turn off scaling of the radii by 1.2 when using this set.')
cavity_MinRadius: float = Field(100, description='Minimal radius for additional spheres not centered on atoms. An arbitrarily big value is equivalent to switching off the use of added spheres, which is the default in AU.')
cavity_Mode: Literal['Implicit'] = Field('Implicit', description='How to create the list of spheres for the generation of the molecular surface.')
medium_SolverType: Literal[('IEFPCM', 'CPCM')] = Field('IEFPCM', description='Type of solver to be used. All solvers are based on the Integral Equation Formulation of the Polarizable Continuum Model.')
medium_Nonequilibrium: bool = Field(False, description='Initializes an additional solver using the dynamic permittivity. To be used in response calculations.')
medium_Solvent: str = Field(..., description='Specification of the dielectric medium outside the cavity. Note this will always be converted to the molecular formula to aid parsing via PCM.')
medium_MatrixSymm: bool = Field(True, description='If True, the PCM matrix obtained by the IEFPCM collocation solver is symmetrized.')
medium_Correction: float = Field(0.0, description='Correction, k for the apparent surface charge scaling factor in the CPCM solver.', ge=0)
medium_DiagonalScaling: float = Field(1.07, description='Scaling factor for diagonal of collocation matrices, values commonly used in the literature are 1.07 and 1.0694.', ge=0)
medium_ProbeRadius: float = Field(1.0, description='Radius of the spherical probe approximating a solvent molecule. Used for generating the solvent-excluded surface (SES) or an approximation of it. Overridden by the built-in value for the chosen solvent. Default in AU.')
_solvents: ClassVar[Dict[(str, str)]] = {'water': 'H2O', 'dimethylsulfoxide': 'DMSO', 'nitromethane': 'CH3NO2', 'acetonitrile': 'CH3CN', 'methanol': 'CH3OH', 'ethanol': 'CH3CH2OH', '1,2-dichloroethane': 'C2H4CL2', 'methylenechloride': 'CH2CL2', 'tetrahydrofurane': 'THF', 'aniline': 'C6H5NH2', 'chlorobenzene': 'C6H5CL', 'chloroform': 'CHCL3', 'toluene': 'C6H5CH3', '1,4-dioxane': 'C4H8O2', 'carbon tetrachloride': 'CCL4', 'cyclohexane': 'C6H12', 'n-heptane': 'C7H16'}
('medium_Solvent')
def _check_solvent(cls, solvent: str) -> str:
solvent_formula = cls._solvents.get(solvent.lower(), solvent.upper())
if (solvent_formula not in cls._solvents.values()):
raise SpecificationError(f'The solvent {solvent} is not supported please chose from the following solvents or formulas {cls._solvents.items()}')
return solvent_formula
def __init__(self, **kwargs):
units = kwargs.get('units', None)
if ((units is not None) and (units.lower() == 'angstrom')):
if ('medium_ProbeRadius' not in kwargs):
medium_ProbeRadius = (self.__fields__['medium_ProbeRadius'].default * constants.BOHR_TO_ANGS)
kwargs['medium_ProbeRadius'] = medium_ProbeRadius
if ('cavity_MinRadius' not in kwargs):
cavity_MinRadius = (self.__fields__['cavity_MinRadius'].default * constants.BOHR_TO_ANGS)
kwargs['cavity_MinRadius'] = cavity_MinRadius
if ('cavity_Area' not in kwargs):
cavity_Area = (self.__fields__['cavity_Area'].default * (constants.BOHR_TO_ANGS ** 2))
kwargs['cavity_Area'] = cavity_Area
super(SolventPsi4, self).__init__(**kwargs)
def format_keywords(self) -> Dict[(str, Any)]:
(medium_str, cavity_str) = ('', '')
for prop in self.__fields__.keys():
if ('medium' in prop):
medium_str += f'''
{prop[7:]} = {getattr(self, prop)}'''
elif ('cavity' in prop):
cavity_str += f'''
{prop[7:]} = {getattr(self, prop)}'''
pcm_string = f'''
Units = {self.units}
CODATA = {self.codata}
Medium {{{medium_str}}}
Cavity {{{cavity_str}}}'''
return {'pcm': True, 'pcm__input': pcm_string, 'scf_properties': ['MBIS_CHARGES']} |
class ThriftTraceHeaderTests(GeventPatchedTestCase):
def test_user_agent(self):
class Handler(TestService.Iface):
def example(self, context):
return True
handler = Handler()
server_span_observer = mock.Mock(spec=ServerSpanObserver)
with serve_thrift(handler, TestService, server_span_observer) as server:
with baseplate_thrift_client(server.endpoint, TestService) as context:
context.example_service.example()
server_span_observer.on_set_tag.assert_called()
def test_no_headers(self):
class Handler(TestService.Iface):
def __init__(self):
self.server_span = None
def example(self, context):
self.server_span = context.span
return True
handler = Handler()
with serve_thrift(handler, TestService) as server:
with raw_thrift_client(server.endpoint, TestService) as client:
client_result = client.example()
self.assertIsNotNone(handler.server_span)
self.assertGreaterEqual(len(handler.server_span.id), 0)
self.assertTrue(client_result)
def test_header_propagation(self):
trace_id = '1234'
parent_id = '2345'
span_id = '3456'
flags = 4567
sampled = 1
class Handler(TestService.Iface):
def __init__(self):
self.server_span = None
def example(self, context):
self.server_span = context.span
return True
handler = Handler()
with serve_thrift(handler, TestService) as server:
with raw_thrift_client(server.endpoint, TestService) as client:
transport = client._oprot.trans
transport.set_header(b'Trace', trace_id.encode())
transport.set_header(b'Parent', parent_id.encode())
transport.set_header(b'Span', span_id.encode())
transport.set_header(b'Flags', str(flags).encode())
transport.set_header(b'Sampled', str(sampled).encode())
client_result = client.example()
self.assertIsNotNone(handler.server_span)
self.assertEqual(handler.server_span.trace_id, trace_id)
self.assertEqual(handler.server_span.parent_id, parent_id)
self.assertEqual(handler.server_span.id, span_id)
self.assertEqual(handler.server_span.flags, flags)
self.assertEqual(handler.server_span.sampled, sampled)
self.assertTrue(client_result)
def test_optional_headers_optional(self):
trace_id = '1234'
parent_id = '2345'
span_id = '3456'
class Handler(TestService.Iface):
def __init__(self):
self.server_span = None
def example(self, context):
self.server_span = context.span
return True
handler = Handler()
with serve_thrift(handler, TestService) as server:
with raw_thrift_client(server.endpoint, TestService) as client:
transport = client._oprot.trans
transport.set_header(b'Trace', trace_id.encode())
transport.set_header(b'Parent', parent_id.encode())
transport.set_header(b'Span', span_id.encode())
client_result = client.example()
self.assertIsNotNone(handler.server_span)
self.assertEqual(handler.server_span.trace_id, trace_id)
self.assertEqual(handler.server_span.parent_id, parent_id)
self.assertEqual(handler.server_span.id, span_id)
self.assertEqual(handler.server_span.flags, None)
self.assertEqual(handler.server_span.sampled, False)
self.assertTrue(client_result)
def test_budget_header(self):
budget = '1234'
class Handler(TestService.Iface):
def __init__(self):
self.server_span = None
def example(self, context):
self.server_span = context.span
self.context = context
return True
handler = Handler()
with serve_thrift(handler, TestService) as server:
with raw_thrift_client(server.endpoint, TestService) as client:
transport = client._oprot.trans
transport.set_header(b'Deadline-Budget', budget.encode())
client_result = client.example()
self.assertEqual(handler.context.headers.get(b'Deadline-Budget').decode(), budget)
self.assertTrue(client_result) |
def merge_meshes(meshes: List[trimesh.Trimesh]):
(n, vs, fs) = (0, [], [])
for mesh in meshes:
(v, f) = (mesh.vertices, mesh.faces)
vs.append(v)
fs.append((f + n))
n = (n + v.shape[0])
if n:
return trimesh.Trimesh(np.vstack(vs), np.vstack(fs))
else:
return None |
def render_lines(lines, font, color, lw, lh, lh_offset):
surface = pygame.Surface((lw, ((lh + lh_offset) * len(lines))), flags=pygame.SRCALPHA)
y_offset = 0
for line in lines:
font_surface = font.render(line, True, color)
surface.blit(font_surface, (0, y_offset))
y_offset += (lh + lh_offset)
return surface |
def check(a):
if (a == '0'):
return 6
elif (a == '1'):
return 2
elif (a == '2'):
return 5
elif (a == '3'):
return 5
elif (a == '4'):
return 4
elif (a == '5'):
return 5
elif (a == '6'):
return 6
elif (a == '7'):
return 3
elif (a == '8'):
return 7
elif (a == '9'):
return 6 |
def test_channelstate_filters():
test_state = factories.make_chain_state(number_of_channels=5)
chain_state = test_state.chain_state
token_network_registry_address = test_state.token_network_registry_address
token_address = test_state.token_address
(channel_open, channel_closing, channel_closed, channel_settling, channel_settled) = test_state.channels
in_progress = TransactionExecutionStatus(started_block_number=chain_state.block_number)
done = TransactionExecutionStatus(started_block_number=chain_state.block_number, finished_block_number=chain_state.block_number, result=TransactionExecutionStatus.SUCCESS)
channel_closing.close_transaction = in_progress
channel_closed.close_transaction = done
channel_settling.close_transaction = done
channel_settling.settle_transaction = in_progress
channel_settled.close_transaction = done
channel_settled.settle_transaction = done
unknown_token = factories.make_address()
assert (views.get_channelstate_open(chain_state=chain_state, token_network_registry_address=token_network_registry_address, token_address=unknown_token) == [])
opened = views.get_channelstate_open(chain_state=chain_state, token_network_registry_address=token_network_registry_address, token_address=token_address)
assert (opened == [channel_open])
closing = views.get_channelstate_closing(chain_state=chain_state, token_network_registry_address=token_network_registry_address, token_address=token_address)
assert (closing == [channel_closing])
closed = views.get_channelstate_closed(chain_state=chain_state, token_network_registry_address=token_network_registry_address, token_address=token_address)
assert (closed == [channel_closed])
settling = views.get_channelstate_settling(chain_state=chain_state, token_network_registry_address=token_network_registry_address, token_address=token_address)
assert (settling == [channel_settling])
settled = views.get_channelstate_settled(chain_state=chain_state, token_network_registry_address=token_network_registry_address, token_address=token_address)
assert (settled == [channel_settled]) |
def test_saturation_describing_function(satsys):
satfcn = saturation_class()
amprange = np.linspace(0, 10, 100)
df_anal = [satfcn.describing_function(a) for a in amprange]
df_fcn = ct.describing_function(saturation, amprange)
np.testing.assert_almost_equal(df_fcn, df_anal, decimal=3)
df_fcn = ct.describing_function(satfcn, amprange)
np.testing.assert_almost_equal(df_fcn, df_anal, decimal=3)
df_sys = ct.describing_function(satsys, amprange)
np.testing.assert_almost_equal(df_sys, df_anal, decimal=3)
df_arr = ct.describing_function(satsys, amprange)
np.testing.assert_almost_equal(df_arr, df_anal, decimal=3)
with pytest.raises(ValueError, match='cannot evaluate'):
ct.describing_function(saturation, (- 1))
class my_saturation(ct.DescribingFunctionNonlinearity):
def __call__(self, x):
return saturation(x)
satfcn_nometh = my_saturation()
df_nometh = ct.describing_function(satfcn_nometh, amprange)
np.testing.assert_almost_equal(df_nometh, df_anal, decimal=3) |
def main():
args = parse_args()
assert (len(args.scores) == len(args.coefficients))
score_list = args.scores
score_list = [load(f) for f in score_list]
if args.apply_softmax:
def apply_softmax(scores):
return [softmax(score) for score in scores]
score_list = [apply_softmax(scores) for scores in score_list]
weighted_scores = get_weighted_score(score_list, args.coefficients)
data = open(args.datalist).readlines()
labels = [int(x.strip().split()[(- 1)]) for x in data]
mean_class_acc = mean_class_accuracy(weighted_scores, labels)
(top_1_acc, top_5_acc) = top_k_accuracy(weighted_scores, labels, (1, 5))
print(f'Mean Class Accuracy: {mean_class_acc:.04f}')
print(f'Top 1 Accuracy: {top_1_acc:.04f}')
print(f'Top 5 Accuracy: {top_5_acc:.04f}') |
def train(args, accelerator, model, tokenizer, train_dataloader, optimizer, lr_scheduler, eval_dataloader=None):
total_batch_size = ((args.per_device_train_batch_size * accelerator.num_processes) * args.gradient_accumulation_steps)
logger.info('***** Running training *****')
logger.info(' Num examples = %d', args.num_examples[Split.TRAIN.value])
logger.info(' Instantaneous batch size per device = %d', args.per_device_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', total_batch_size)
logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', args.max_steps)
progress_bar = tqdm(range(args.max_steps), disable=(not accelerator.is_local_main_process))
checkpoints = None
eval_results = None
best_checkpoint = None
best_eval_result = None
early_stopping_patience_counter = 0
should_training_stop = False
epoch = 0
completed_steps = 0
train_loss = 0.0
model.zero_grad()
for _ in range(args.num_train_epochs):
epoch += 1
model.train()
for (step, batch) in enumerate(train_dataloader):
outputs = model(**batch)
loss = outputs.loss
loss = (loss / args.gradient_accumulation_steps)
accelerator.backward(loss)
train_loss += loss.item()
if (((step % args.gradient_accumulation_steps) == 0) or (step == (len(train_dataloader) - 1))):
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
completed_steps += 1
if ((eval_dataloader is not None) and (args.evaluation_strategy == IntervalStrategy.STEPS.value) and (args.eval_steps > 0) and ((completed_steps % args.eval_steps) == 0)):
accelerator.wait_for_everyone()
new_checkpoint = f'checkpoint-{IntervalStrategy.STEPS.value}-{completed_steps}'
new_eval_result = evaluate(args, accelerator, eval_dataloader, 'eval', model, new_checkpoint)[args.eval_metric]
logger.info('Evaluation result at step %d: %s = %f', completed_steps, args.eval_metric, new_eval_result)
if (checkpoints is None):
checkpoints = np.array([new_checkpoint])
eval_results = np.array([new_eval_result])
best_checkpoint = new_checkpoint
best_eval_result = new_eval_result
else:
if ((new_eval_result - best_eval_result) > args.early_stopping_threshold):
best_checkpoint = new_checkpoint
best_eval_result = new_eval_result
early_stopping_patience_counter = 0
else:
if (new_eval_result == best_eval_result):
best_checkpoint = new_checkpoint
best_eval_result = new_eval_result
early_stopping_patience_counter += 1
if (early_stopping_patience_counter >= args.early_stopping_patience):
should_training_stop = True
checkpoints = np.append(checkpoints, [new_checkpoint], axis=0)
eval_results = np.append(eval_results, [new_eval_result], axis=0)
sorted_ids = np.argsort(eval_results)
eval_results = eval_results[sorted_ids]
checkpoints = checkpoints[sorted_ids]
if (len(checkpoints) > args.keep_checkpoint_max):
(checkpoint_to_remove, *checkpoints) = checkpoints
eval_results = eval_results[1:]
if (checkpoint_to_remove != new_checkpoint):
if accelerator.is_main_process:
shutil.rmtree(os.path.join(args.output_dir, checkpoint_to_remove), ignore_errors=True)
accelerator.wait_for_everyone()
if (new_checkpoint in checkpoints):
checkpoint_output_dir = os.path.join(args.output_dir, new_checkpoint)
if accelerator.is_main_process:
if (not os.path.exists(checkpoint_output_dir)):
os.makedirs(checkpoint_output_dir)
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(checkpoint_output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(checkpoint_output_dir)
logger.info('Saving model checkpoint to %s', checkpoint_output_dir)
if (completed_steps >= args.max_steps):
break
if should_training_stop:
break
if ((eval_dataloader is not None) and (args.evaluation_strategy == IntervalStrategy.EPOCH.value)):
accelerator.wait_for_everyone()
new_checkpoint = f'checkpoint-{IntervalStrategy.EPOCH.value}-{epoch}'
new_eval_result = evaluate(args, accelerator, eval_dataloader, 'eval', model, new_checkpoint)[args.eval_metric]
logger.info('Evaluation result at epoch %d: %s = %f', epoch, args.eval_metric, new_eval_result)
if (checkpoints is None):
checkpoints = np.array([new_checkpoint])
eval_results = np.array([new_eval_result])
best_checkpoint = new_checkpoint
best_eval_result = new_eval_result
else:
if ((new_eval_result - best_eval_result) > args.early_stopping_threshold):
best_checkpoint = new_checkpoint
best_eval_result = new_eval_result
early_stopping_patience_counter = 0
else:
if (new_eval_result == best_eval_result):
best_checkpoint = new_checkpoint
best_eval_result = new_eval_result
early_stopping_patience_counter += 1
if (early_stopping_patience_counter >= args.early_stopping_patience):
should_training_stop = True
checkpoints = np.append(checkpoints, [new_checkpoint], axis=0)
eval_results = np.append(eval_results, [new_eval_result], axis=0)
sorted_ids = np.argsort(eval_results)
eval_results = eval_results[sorted_ids]
checkpoints = checkpoints[sorted_ids]
if (len(checkpoints) > args.keep_checkpoint_max):
(checkpoint_to_remove, *checkpoints) = checkpoints
eval_results = eval_results[1:]
if (checkpoint_to_remove != new_checkpoint):
if accelerator.is_main_process:
shutil.rmtree(os.path.join(args.output_dir, checkpoint_to_remove), ignore_errors=True)
accelerator.wait_for_everyone()
if (new_checkpoint in checkpoints):
checkpoint_output_dir = os.path.join(args.output_dir, new_checkpoint)
if accelerator.is_main_process:
if (not os.path.exists(checkpoint_output_dir)):
os.makedirs(checkpoint_output_dir)
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(checkpoint_output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(checkpoint_output_dir)
logger.info('Saving model checkpoint to %s', checkpoint_output_dir)
if (completed_steps >= args.max_steps):
break
if should_training_stop:
break
if (best_checkpoint is not None):
logger.info('Best checkpoint: %s', best_checkpoint)
logger.info('Best evaluation result: %s = %f', args.eval_metric, best_eval_result)
best_checkpoint_output_dir = os.path.join(args.output_dir, best_checkpoint)
if accelerator.is_main_process:
shutil.move(best_checkpoint_output_dir, os.path.join(args.output_dir, 'best-checkpoint'))
shutil.rmtree(best_checkpoint_output_dir, ignore_errors=True)
accelerator.wait_for_everyone()
else:
checkpoint_output_dir = os.path.join(args.output_dir, 'best-checkpoint')
if (not os.path.exists(checkpoint_output_dir)):
os.makedirs(checkpoint_output_dir)
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(checkpoint_output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(checkpoint_output_dir)
logger.info('Saving model checkpoint to %s', checkpoint_output_dir)
return (completed_steps, (train_loss / completed_steps)) |
(frozen=True, order=True)
class KeyInfo():
key: Qt.Key
modifiers: _ModifierType = Qt.KeyboardModifier.NoModifier
def __post_init__(self) -> None:
if machinery.IS_QT5:
modifier_classes = (Qt.KeyboardModifier, Qt.KeyboardModifiers)
elif machinery.IS_QT6:
modifier_classes = Qt.KeyboardModifier
assert isinstance(self.key, Qt.Key), self.key
assert isinstance(self.modifiers, modifier_classes), self.modifiers
_assert_plain_key(self.key)
_assert_plain_modifier(self.modifiers)
def __repr__(self) -> str:
return utils.get_repr(self, key=debug.qenum_key(Qt, self.key, klass=Qt.Key), modifiers=debug.qflags_key(Qt, self.modifiers, klass=Qt.KeyboardModifier), text=str(self))
def from_event(cls, e: QKeyEvent) -> 'KeyInfo':
try:
key = Qt.Key(e.key())
except ValueError as ex:
raise InvalidKeyError(str(ex))
key = _remap_unicode(key, e.text())
modifiers = e.modifiers()
return cls(key, modifiers)
def from_qt(cls, combination: _KeyInfoType) -> 'KeyInfo':
if machinery.IS_QT5:
assert isinstance(combination, int)
key = Qt.Key((int(combination) & (~ Qt.KeyboardModifier.KeyboardModifierMask)))
modifiers = Qt.KeyboardModifier((int(combination) & Qt.KeyboardModifier.KeyboardModifierMask))
return cls(key, modifiers)
else:
assert isinstance(combination, QKeyCombination)
try:
key = combination.key()
except ValueError as e:
raise InvalidKeyError(str(e))
return cls(key=key, modifiers=combination.keyboardModifiers())
def __str__(self) -> str:
key_string = _key_to_string(self.key)
modifiers = self.modifiers
if (self.key in _MODIFIER_MAP):
modifiers = _unset_modifier_bits(modifiers, _MODIFIER_MAP[self.key])
elif _is_printable(self.key):
if (not key_string):
raise ValueError('Got empty string for key 0x{:x}!'.format(self.key))
assert (len(key_string) == 1), key_string
if (self.modifiers == Qt.KeyboardModifier.ShiftModifier):
assert (not self.is_special())
return key_string.upper()
elif (self.modifiers == Qt.KeyboardModifier.NoModifier):
assert (not self.is_special())
return key_string.lower()
else:
key_string = key_string.lower()
modifiers = Qt.KeyboardModifier(modifiers)
assert self.is_special()
modifier_string = _modifiers_to_string(modifiers)
return '<{}{}>'.format(modifier_string, key_string)
def text(self) -> str:
control = {Qt.Key.Key_Space: ' ', Qt.Key.Key_Tab: '\t', Qt.Key.Key_Backspace: '\x08', Qt.Key.Key_Return: '\r', Qt.Key.Key_Enter: '\r', Qt.Key.Key_Escape: '\x1b'}
if (self.key in control):
return control[self.key]
elif (not _is_printable(self.key)):
return ''
text = QKeySequence(self.key).toString()
if (not (self.modifiers & Qt.KeyboardModifier.ShiftModifier)):
text = text.lower()
return text
def to_event(self, typ: QEvent.Type=QEvent.Type.KeyPress) -> QKeyEvent:
return QKeyEvent(typ, self.key, self.modifiers, self.text())
def to_qt(self) -> _KeyInfoType:
if machinery.IS_QT5:
return (int(self.key) | int(self.modifiers))
else:
return QKeyCombination(self.modifiers, self.key)
def with_stripped_modifiers(self, modifiers: Qt.KeyboardModifier) -> 'KeyInfo':
mods = _unset_modifier_bits(self.modifiers, modifiers)
return KeyInfo(key=self.key, modifiers=mods)
def is_special(self) -> bool:
return (not (_is_printable(self.key) and (self.modifiers in [Qt.KeyboardModifier.ShiftModifier, Qt.KeyboardModifier.NoModifier])))
def is_modifier_key(self) -> bool:
return (self.key in _MODIFIER_MAP) |
class MatchStmt(Statement):
__slots__ = ('subject', 'patterns', 'guards', 'bodies')
__match_args__ = ('subject', 'patterns', 'guards', 'bodies')
subject: Expression
patterns: list[Pattern]
guards: list[(Expression | None)]
bodies: list[Block]
def __init__(self, subject: Expression, patterns: list[Pattern], guards: list[(Expression | None)], bodies: list[Block]) -> None:
super().__init__()
assert (len(patterns) == len(guards) == len(bodies))
self.subject = subject
self.patterns = patterns
self.guards = guards
self.bodies = bodies
def accept(self, visitor: StatementVisitor[T]) -> T:
return visitor.visit_match_stmt(self) |
def score_target_hypo(args, a, b, c, lenpen, target_outfile, hypo_outfile, write_hypos, normalize):
print('lenpen', lenpen, 'weight1', a, 'weight2', b, 'weight3', c)
(gen_output_lst, bitext1_lst, bitext2_lst, lm_res_lst) = load_score_files(args)
dict = dictionary.Dictionary()
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
ordered_hypos = {}
ordered_targets = {}
for shard_id in range(len(bitext1_lst)):
bitext1 = bitext1_lst[shard_id]
bitext2 = bitext2_lst[shard_id]
gen_output = gen_output_lst[shard_id]
lm_res = lm_res_lst[shard_id]
total = len(bitext1.rescore_source.keys())
source_lst = []
hypo_lst = []
score_lst = []
reference_lst = []
j = 1
best_score = (- math.inf)
for i in range(total):
target_len = len(bitext1.rescore_hypo[i].split())
if (lm_res is not None):
lm_score = lm_res.score[i]
else:
lm_score = 0
if (bitext2 is not None):
bitext2_score = bitext2.rescore_score[i]
bitext2_backwards = bitext2.backwards
else:
bitext2_score = None
bitext2_backwards = None
score = rerank_utils.get_score(a, b, c, target_len, bitext1.rescore_score[i], bitext2_score, lm_score=lm_score, lenpen=lenpen, src_len=bitext1.source_lengths[i], tgt_len=bitext1.target_lengths[i], bitext1_backwards=bitext1.backwards, bitext2_backwards=bitext2_backwards, normalize=normalize)
if (score > best_score):
best_score = score
best_hypo = bitext1.rescore_hypo[i]
if ((j == gen_output.num_hypos[i]) or (j == args.num_rescore)):
j = 1
hypo_lst.append(best_hypo)
score_lst.append(best_score)
source_lst.append(bitext1.rescore_source[i])
reference_lst.append(bitext1.rescore_target[i])
best_score = (- math.inf)
best_hypo = ''
else:
j += 1
gen_keys = list(sorted(gen_output.no_bpe_target.keys()))
for key in range(len(gen_keys)):
if (args.prefix_len is None):
assert (hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]]), ((((('pred and rescore hypo mismatch: i: ' + str(key)) + ', ') + str(hypo_lst[key])) + str(gen_keys[key])) + str(gen_output.no_bpe_hypo[key]))
sys_tok = dict.encode_line(hypo_lst[key])
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
else:
full_hypo = rerank_utils.get_full_from_prefix(hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]])
sys_tok = dict.encode_line(full_hypo)
ref_tok = dict.encode_line(gen_output.no_bpe_target[gen_keys[key]])
scorer.add(ref_tok, sys_tok)
if write_hypos:
for key in range(len(gen_output.no_bpe_target)):
if (args.prefix_len is None):
assert (hypo_lst[key] in gen_output.no_bpe_hypo[gen_keys[key]]), (((('pred and rescore hypo mismatch:' + 'i:') + str(key)) + str(hypo_lst[key])) + str(gen_output.no_bpe_hypo[key]))
ordered_hypos[gen_keys[key]] = hypo_lst[key]
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[gen_keys[key]]
else:
full_hypo = rerank_utils.get_full_from_prefix(hypo_lst[key], gen_output.no_bpe_hypo[gen_keys[key]])
ordered_hypos[gen_keys[key]] = full_hypo
ordered_targets[gen_keys[key]] = gen_output.no_bpe_target[gen_keys[key]]
if (args.num_shards == len(bitext1_lst)):
with open(target_outfile, 'w') as t:
with open(hypo_outfile, 'w') as h:
for key in range(len(ordered_hypos)):
t.write(ordered_targets[key])
h.write(ordered_hypos[key])
res = scorer.result_string(4)
if write_hypos:
print(res)
score = rerank_utils.parse_bleu_scoring(res)
return score |
class ChineseCLIPProcessor(ProcessorMixin):
attributes = ['image_processor', 'tokenizer']
image_processor_class = 'ChineseCLIPImageProcessor'
tokenizer_class = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if ('feature_extractor' in kwargs):
warnings.warn('The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor` instead.', FutureWarning)
feature_extractor = kwargs.pop('feature_extractor')
image_processor = (image_processor if (image_processor is not None) else feature_extractor)
if (image_processor is None):
raise ValueError('You need to specify an `image_processor`.')
if (tokenizer is None):
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
if ((text is None) and (images is None)):
raise ValueError('You have to specify either text or images. Both cannot be none.')
if (text is not None):
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
if (images is not None):
image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
if ((text is not None) and (images is not None)):
encoding['pixel_values'] = image_features.pixel_values
return encoding
elif (text is not None):
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
return self.tokenizer.decode(*args, **kwargs)
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys((tokenizer_input_names + image_processor_input_names)))
def feature_extractor_class(self):
warnings.warn('`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', FutureWarning)
return self.image_processor_class |
class TableWidgetItem(QtWidgets.QTableWidgetItem):
def __init__(self, val, index, format=None):
QtWidgets.QTableWidgetItem.__init__(self, '')
self._blockValueChange = False
self._format = None
self._defaultFormat = '%0.3g'
self.sortMode = 'value'
self.index = index
flags = (QtCore.Qt.ItemFlag.ItemIsSelectable | QtCore.Qt.ItemFlag.ItemIsEnabled)
self.setFlags(flags)
self.setValue(val)
self.setFormat(format)
def setEditable(self, editable):
if editable:
self.setFlags((self.flags() | QtCore.Qt.ItemFlag.ItemIsEditable))
else:
self.setFlags((self.flags() & (~ QtCore.Qt.ItemFlag.ItemIsEditable)))
def setSortMode(self, mode):
modes = ('value', 'text', 'index', None)
if (mode not in modes):
raise ValueError(('Sort mode must be one of %s' % str(modes)))
self.sortMode = mode
def setFormat(self, fmt):
if ((fmt is not None) and (not isinstance(fmt, str)) and (not callable(fmt))):
raise ValueError(('Format argument must string, callable, or None. (got %s)' % fmt))
self._format = fmt
self._updateText()
def _updateText(self):
self._blockValueChange = True
try:
self._text = self.format()
self.setText(self._text)
finally:
self._blockValueChange = False
def setValue(self, value):
self.value = value
self._updateText()
def itemChanged(self):
if (self.text() != self._text):
self.textChanged()
def textChanged(self):
self._text = self.text()
if self._blockValueChange:
return
try:
self.value = type(self.value)(self.text())
except ValueError:
self.value = str(self.text())
def format(self):
if callable(self._format):
return self._format(self)
if isinstance(self.value, (float, np.floating)):
if (self._format is None):
return (self._defaultFormat % self.value)
else:
return (self._format % self.value)
else:
return str(self.value)
def __lt__(self, other):
if ((self.sortMode == 'index') and hasattr(other, 'index')):
return (self.index < other.index)
if ((self.sortMode == 'value') and hasattr(other, 'value')):
return (self.value < other.value)
else:
return (self.text() < other.text()) |
class MixedConv2d(nn.ModuleDict):
def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, padding='', dilation=1, depthwise=False, **kwargs):
super(MixedConv2d, self).__init__()
kernel_size = (kernel_size if isinstance(kernel_size, list) else [kernel_size])
num_groups = len(kernel_size)
in_splits = _split_channels(in_channels, num_groups)
out_splits = _split_channels(out_channels, num_groups)
self.in_channels = sum(in_splits)
self.out_channels = sum(out_splits)
for (idx, (k, in_ch, out_ch)) in enumerate(zip(kernel_size, in_splits, out_splits)):
conv_groups = (in_ch if depthwise else 1)
self.add_module(str(idx), create_conv2d_pad(in_ch, out_ch, k, stride=stride, padding=padding, dilation=dilation, groups=conv_groups, **kwargs))
self.splits = in_splits
def forward(self, x):
x_split = torch.split(x, self.splits, 1)
x_out = [c(x_split[i]) for (i, c) in enumerate(self.values())]
x = torch.cat(x_out, 1)
return x |
def centroid_and_bbox_from_coords(coords):
if isinstance(coords, pd.Series):
coords = coords.to_list()[0]
elif isinstance(coords, pd.DataFrame):
coords = list(zip(coords.X, coords.Y))
xs = [xys[0] for xys in coords]
ys = [xys[1] for xys in coords]
xc = (sum(xs) / len(xs))
yc = (sum(ys) / len(ys))
(x1, x2) = (min(xs), max(xs))
(y1, y2) = (min(ys), max(ys))
return ((xc, yc), [x1, y1, x2, y2]) |
class ModelSingleTagFieldInvalidTest(TagTestManager, TransactionTestCase):
manage_models = [test_models.SingleTagFieldModel]
def test_invalid_to_model(self):
with self.assertRaises(ValueError) as cm:
class FailModel_invalid_to(models.Model):
to_model = tag_models.SingleTagField(test_models.SingleTagFieldModel)
self.assertEqual(str(cm.exception), 'Tag model must be a subclass of TagModel')
def test_forbidden_to_field(self):
with self.assertRaises(ValueError) as cm:
class FailModel_forbidden_to(models.Model):
to_field = tag_models.SingleTagField(to_field='fail')
self.assertEqual(str(cm.exception), "Invalid argument 'to_field' for SingleTagField")
def test_forbidden_rel_class(self):
with self.assertRaises(ValueError) as cm:
class FailModel_forbidden_rel(models.Model):
rel_class = tag_models.SingleTagField(rel_class='fail')
self.assertEqual(str(cm.exception), "Invalid argument 'rel_class' for SingleTagField")
def test_forbidden_max_count(self):
with self.assertRaises(ValueError) as cm:
class FailModel_forbidden_max_count(models.Model):
max_count = tag_models.SingleTagField(max_count='fail')
self.assertEqual(str(cm.exception), "Invalid argument 'max_count' for SingleTagField")
def test_value_from_object_none(self):
t1 = test_models.SingleTagFieldModel.objects.create(name='Test 1')
field = test_models.SingleTagFieldModel._meta.get_field('title')
self.assertEqual(field.value_from_object(t1), '') |
class TabBar(QtWidgets.QTabBar):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setDrawBase(False)
self.setExpanding(False)
self.setElideMode(Qt.ElideRight)
def tabSizeHint(self, index):
size = QtWidgets.QTabBar.tabSizeHint(self, index)
return QSize(min(size.width(), 150), size.height()) |
class TagReader():
label2id_map = {'<START>': 0}
def read_inst(cls, file, is_labeled, number, opinion_offset):
insts = []
inputs = []
outputs = []
f = open(file, 'r', encoding='utf-8')
if (not is_labeled):
opinion_offset = 10000
for line in f:
line = line.strip()
line = line.split('####')
input = line[0].split()
t_output = line[1].split()
o_output = line[2].split()
output = ['O' for x in range(len(input))]
polarity = [0 for x in range(len(input))]
for (i, t) in enumerate(t_output):
t = t.split('=')[1]
if (t != 'O'):
output[i] = t
if (t.split('-')[1] == 'POS'):
polarity[i] = 1
elif (t.split('-')[1] == 'NEG'):
polarity[i] = 2
elif (t.split('-')[1] == 'NEU'):
polarity[i] = 0
output_t_idx = [0 for x in range(len(input))]
for (i, tag) in enumerate(output):
if (tag != 'O'):
tag = tag.split('-')[0]
output_t_idx[i] = len(tag)
output_t = cls.ot2bieos_ts(output)
output = ['O' for x in range(len(input))]
for (i, t) in enumerate(o_output):
t = t.split('=')[1]
if (t != 'O'):
output[i] = t
output_o_idx = [0 for x in range(len(input))]
for (i, tag) in enumerate(output):
if (tag != 'O'):
tag = tag.split('-')[0]
output_o_idx[i] = len(tag)
output_o = cls.ot2bieos_op(output)
output = ['O' for x in range(len(input))]
for i in range(len(output)):
if (output_t[i] != 'O'):
output[i] = output_t[i]
pairs = {}
target = [None]
for (i, t) in enumerate(output_t_idx):
if ((t != 0) and (t not in target)):
opinion_idx = [j for (j, x) in enumerate(output_o_idx) if (x == t)]
if (len(opinion_idx) > 0):
target.append(t)
dire = 0
if ((opinion_idx[0] - i) > 0):
dire = 1
if (len(opinion_idx) == 1):
if (t not in pairs.keys()):
pairs[t] = [(i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i)))]
else:
pairs[t].append((i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i))))
elif (len(opinion_idx) > 1):
split = []
for idx in range(0, (len(opinion_idx) - 1)):
if ((opinion_idx[(idx + 1)] - opinion_idx[idx]) != 1):
split.append(idx)
span = []
start = 0
if (len(split) > 0):
for s in split:
if (dire == 0):
if (t not in pairs.keys()):
pairs[t] = [(i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i)))]
else:
pairs[t].append((i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i))))
elif (t not in pairs.keys()):
pairs[t] = [(i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i)))]
else:
pairs[t].append((i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i))))
start = (s + 1)
elif (dire == 0):
if (t not in pairs.keys()):
pairs[t] = [(i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i)))]
else:
pairs[t].append((i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i))))
elif (t not in pairs.keys()):
pairs[t] = [(i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i)))]
else:
pairs[t].append((i, polarity[i], dire, abs((opinion_idx[0] - i)), abs((opinion_idx[0] - i))))
else:
for (i, idx) in enumerate(output_t_idx):
if (idx == t):
output[i] = 'O'
new_output = []
for o in output:
if (o != 'O'):
label = o.split('-')
new_output.append(label[0])
else:
new_output.append(o)
new_pairs = []
for key in pairs.keys():
if (pairs[key][0][(- 1)] < opinion_offset):
new_pairs.append(pairs[key][0])
else:
remove_target_id = pairs[key][0][0]
if (new_output[remove_target_id] == 'S'):
new_output[remove_target_id] = 'O'
elif (new_output[remove_target_id] == 'B'):
new_output[remove_target_id] = 'O'
for i in range(remove_target_id, len(input)):
if (new_output[i] != 'E'):
new_output[i] = 'O'
elif (new_output[i] == 'E'):
new_output[i] = 'O'
break
output = (new_output, new_pairs)
inst = LinearInstance((len(insts) + 1), 1, input, output)
for label in output[0]:
if ((not (label in TagReader.label2id_map)) and is_labeled):
output_id = len(TagReader.label2id_map)
TagReader.label2id_map[label] = output_id
if is_labeled:
inst.set_labeled()
else:
inst.set_unlabeled()
insts.append(inst)
if ((len(insts) >= number) and (number > 0)):
break
return insts
def ot2bieos_ts(ts_tag_sequence):
n_tags = len(ts_tag_sequence)
new_ts_sequence = []
prev_pos = '$$$'
for i in range(n_tags):
cur_ts_tag = ts_tag_sequence[i]
if (cur_ts_tag == 'O'):
new_ts_sequence.append('O')
cur_pos = 'O'
else:
(cur_pos, cur_sentiment) = cur_ts_tag.split('-')
if (cur_pos != prev_pos):
if (i == (n_tags - 1)):
new_ts_sequence.append(('S-%s' % cur_sentiment))
else:
next_ts_tag = ts_tag_sequence[(i + 1)]
if (next_ts_tag == 'O'):
new_ts_sequence.append(('S-%s' % cur_sentiment))
else:
new_ts_sequence.append(('B-%s' % cur_sentiment))
elif (i == (n_tags - 1)):
new_ts_sequence.append(('E-%s' % cur_sentiment))
else:
next_ts_tag = ts_tag_sequence[(i + 1)]
if (next_ts_tag == 'O'):
new_ts_sequence.append(('E-%s' % cur_sentiment))
else:
new_ts_sequence.append(('I-%s' % cur_sentiment))
prev_pos = cur_pos
return new_ts_sequence
def ot2bieos_op(ts_tag_sequence):
n_tags = len(ts_tag_sequence)
new_ts_sequence = []
prev_pos = '$$$'
for i in range(n_tags):
cur_ts_tag = ts_tag_sequence[i]
if (cur_ts_tag == 'O'):
new_ts_sequence.append('O')
cur_pos = 'O'
else:
cur_pos = cur_ts_tag
if (cur_pos != prev_pos):
if (i == (n_tags - 1)):
new_ts_sequence.append('s-o')
else:
next_ts_tag = ts_tag_sequence[(i + 1)]
if (next_ts_tag == 'O'):
new_ts_sequence.append('s-o')
else:
new_ts_sequence.append('b-o')
elif (i == (n_tags - 1)):
new_ts_sequence.append('e-o')
else:
next_ts_tag = ts_tag_sequence[(i + 1)]
if (next_ts_tag == 'O'):
new_ts_sequence.append('e-o')
else:
new_ts_sequence.append('i-o')
prev_pos = cur_pos
return new_ts_sequence |
def run_procedure():
global frenum
global begin_time
global end_time
global flag
read_file()
min_sigma()
store_into_vec()
begin_time = time.perf_counter()
dealingfirstlevel(freArr)
global f_level
if ((flag == 1) or (flag == 4)):
f_level = 1
gen_candidate(f_level)
while (len(freArr[f_level]) != 0):
f_level = (f_level + 1)
gen_candidate(f_level)
elif (flag == 3):
f_level = 0
gen_candidateone(f_level)
f_level = (f_level + 1)
while (len(freArr[f_level]) != 0):
gen_candidateone(f_level)
f_level = (f_level + 1)
elif (flag == 2):
for e in range(len(freArr[0])):
mineFre(freArr[0][e])
end_time = time.perf_counter()
if ((flag == 1) or (flag == 4) or (flag == 3)):
for i in range(f_level):
for j in range(len(freArr[i])):
print((freArr[i][j].name + ' '), end='')
frenum = (frenum + 1)
print()
print('\n')
print('The number of frequent patterns:', frenum, '\n')
print('The time-consuming:', (end_time - begin_time), '\n')
print('The number of calculation:', compnum, '\n')
elif (flag == 2):
for i in range(2):
for j in range(len(freArr[i])):
print((freArr[i][j].name + ' '), end='')
frenum = (frenum + 1)
print('\n')
print('The number of frequent patterns:', frenum, '\n')
print('The time-consuming:', (end_time - begin_time), '\n')
print('The number of calculation:', compnum, '\n')
for i in range(sigmasize):
print(sigma[i], '\t')
text01.delete('1.0', 'end')
if ((flag == 1) or (flag == 4) or (flag == 3)):
for i in range(f_level):
for j in range(len(freArr[i])):
var = ((freArr[i][j].name + ' '),)
text01.insert('insert', var)
text01.insert('insert', '\n')
text01.insert('insert', '\n')
ss = 'The number of frequent patterns: '
text01.insert('insert', ss)
text01.insert('insert', frenum)
text01.insert('insert', '\n')
ss1 = 'The number of calculation: '
text01.insert('insert', ss1)
text01.insert('insert', compnum)
text01.insert('insert', '\n')
ss2 = 'The time-consuming: '
text01.insert('insert', ss2)
ee = ((end_time - begin_time) * 1000)
ee1 = str(ee)
text01.insert('insert', ee1)
text01.insert('insert', 'ms')
text01.insert('insert', '\n')
elif (flag == 2):
for i in range(2):
for j in range(len(freArr[i])):
var = ((freArr[i][j].name + ' '),)
text01.insert('insert', var)
text01.insert('insert', '\n')
text01.insert('insert', '\n')
ss = 'The number of frequent patterns: '
text01.insert('insert', ss)
text01.insert('insert', frenum)
text01.insert('insert', '\n')
ss1 = 'The number of calculation: '
text01.insert('insert', ss1)
text01.insert('insert', compnum)
text01.insert('insert', '\n')
ss2 = 'The time-consuming: '
text01.insert('insert', ss2)
ee = ((end_time - begin_time) * 1000)
ee1 = str(ee)
text01.insert('insert', ee1)
text01.insert('insert', 'ms')
text01.insert('insert', '\n') |
class UploadedFile():
def __init__(self, filename, contents, mime_type):
assert isinstance(contents, bytes)
self.contents = contents
self.filename = filename
self.mime_type = mime_type
def size(self):
return len(self.contents)
def open(self):
with io.BytesIO(self.contents) as f:
(yield f) |
def run_data_migration(apps, schema_editor):
Catalog = apps.get_model('questions', 'Catalog')
Section = apps.get_model('questions', 'Section')
QuestionSet = apps.get_model('questions', 'QuestionSet')
Question = apps.get_model('questions', 'Question')
set_null_to_blank(Catalog.objects.all(), ['uri', 'uri_prefix', 'key', 'comment', 'title_lang1', 'title_lang2', 'title_lang3', 'title_lang4', 'title_lang5'])
set_null_to_blank(Section.objects.all(), ['uri', 'uri_prefix', 'key', 'path', 'comment', 'title_lang1', 'title_lang2', 'title_lang3', 'title_lang4', 'title_lang5'])
set_null_to_blank(QuestionSet.objects.all(), ['uri', 'uri_prefix', 'key', 'path', 'comment', 'title_lang1', 'title_lang2', 'title_lang3', 'title_lang4', 'title_lang5', 'help_lang1', 'help_lang2', 'help_lang3', 'help_lang4', 'help_lang5'])
set_null_to_blank(Question.objects.all(), ['uri', 'uri_prefix', 'key', 'comment', 'text_lang1', 'text_lang2', 'text_lang3', 'text_lang4', 'text_lang5', 'help_lang1', 'help_lang2', 'help_lang3', 'help_lang4', 'help_lang5', 'value_type', 'widget_type', 'unit']) |
def _interpolate(name, dim, interpolate_mode):
def symbolic_fn(g, input, output_size, *args):
(scales, align_corners) = sym_help._get_interpolate_attributes(g, interpolate_mode, args)
align_corners = sym_help._maybe_get_scalar(align_corners)
transformation_mode = ('asymmetric' if (interpolate_mode == 'nearest') else ('align_corners' if align_corners else 'pytorch_half_pixel'))
empty_tensor = g.op('Constant', value_t=torch.tensor([], dtype=torch.float32))
if (scales is None):
if (('ONNX_BACKEND' in os.environ) and (os.environ['ONNX_BACKEND'] == 'TensorRT')):
input_size = input.type().sizes()
input_size = input_size[:2]
output_size = sym_help._maybe_get_const(output_size, 'is')
input_size.extend(output_size)
output_size = g.op('Constant', value_t=torch.tensor(input_size, dtype=torch.int64))
else:
input_size = g.op('Shape', input)
input_size_beg = sym_help._slice_helper(g, input_size, axes=[0], ends=[2], starts=[0])
output_size = g.op('Cast', output_size, to_i=sym_help.cast_pytorch_to_onnx['Long'])
output_size = g.op('Concat', input_size_beg, output_size, axis_i=0)
scales = g.op('Constant', value_t=torch.tensor([], dtype=torch.float32))
return g.op('Resize', input, empty_tensor, scales, output_size, coordinate_transformation_mode_s=transformation_mode, cubic_coeff_a_f=(- 0.75), mode_s=interpolate_mode, nearest_mode_s='floor')
else:
return g.op('Resize', input, empty_tensor, scales, coordinate_transformation_mode_s=transformation_mode, cubic_coeff_a_f=(- 0.75), mode_s=interpolate_mode, nearest_mode_s='floor')
return symbolic_fn |
class XlibWindow(BaseWindow):
_x_display = None
_x_screen_id = None
_x_ic = None
_window = None
_override_redirect = False
_x = 0
_y = 0
_mouse_exclusive_client = None
_mouse_buttons = ([False] * 6)
_active = True
_applied_mouse_exclusive = False
_applied_keyboard_exclusive = False
_mapped = False
_lost_context = False
_lost_context_state = False
_enable_xsync = False
_current_sync_value = None
_current_sync_valid = False
_default_event_mask = ((( & (~ xlib.PointerMotionHintMask)) & (~ xlib.ResizeRedirectMask)) & (~ xlib.SubstructureNotifyMask))
def __init__(self, *args, **kwargs):
self._event_handlers = {}
self._view_event_handlers = {}
for name in self._platform_event_names:
if (not hasattr(self, name)):
continue
func = getattr(self, name)
for message in func._platform_event_data:
if hasattr(func, '_view'):
self._view_event_handlers[message] = func
else:
self._event_handlers[message] = func
super(XlibWindow, self).__init__(*args, **kwargs)
global _can_detect_autorepeat
if (_can_detect_autorepeat is None):
supported_rtrn = c_int()
_can_detect_autorepeat = XkbSetDetectableAutoRepeat(self.display._display, c_int(1), byref(supported_rtrn))
if _can_detect_autorepeat:
self.pressed_keys = set()
self._clipboard_str: Optional[str] = None
def _recreate(self, changes):
if (('fullscreen' in changes) or ('resizable' in changes)):
self.context.detach()
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
del self.display._window_map[self._view]
self._window = None
self._mapped = False
if ('context' in changes):
self._lost_context = True
self._lost_context_state = True
self._create()
def _create_xdnd_atoms(self, display):
self._xdnd_atoms = {'XdndAware': xlib.XInternAtom(display, asbytes('XdndAware'), False), 'XdndEnter': xlib.XInternAtom(display, asbytes('XdndEnter'), False), 'XdndTypeList': xlib.XInternAtom(display, asbytes('XdndTypeList'), False), 'XdndDrop': xlib.XInternAtom(display, asbytes('XdndDrop'), False), 'XdndFinished': xlib.XInternAtom(display, asbytes('XdndFinished'), False), 'XdndSelection': xlib.XInternAtom(display, asbytes('XdndSelection'), False), 'XdndPosition': xlib.XInternAtom(display, asbytes('XdndPosition'), False), 'XdndStatus': xlib.XInternAtom(display, asbytes('XdndStatus'), False), 'XdndActionCopy': xlib.XInternAtom(display, asbytes('XdndActionCopy'), False), 'text/uri-list': xlib.XInternAtom(display, asbytes('text/uri-list'), False)}
def _create(self):
if (self._window and self._mapped):
self._unmap()
self._x_display = self.display._display
self._x_screen_id = self.display.x_screen
if (not self._window):
root = xlib.XRootWindow(self._x_display, self._x_screen_id)
visual_info = self.config.get_visual_info()
if (self.style in ('transparent', 'overlay')):
xlib.XMatchVisualInfo(self._x_display, self._x_screen_id, 32, xlib.TrueColor, visual_info)
visual = visual_info.visual
visual_id = xlib.XVisualIDFromVisual(visual)
default_visual = xlib.XDefaultVisual(self._x_display, self._x_screen_id)
default_visual_id = xlib.XVisualIDFromVisual(default_visual)
window_attributes = xlib.XSetWindowAttributes()
if (visual_id != default_visual_id):
window_attributes.colormap = xlib.XCreateColormap(self._x_display, root, visual, xlib.AllocNone)
else:
window_attributes.colormap = xlib.XDefaultColormap(self._x_display, self._x_screen_id)
window_attributes.bit_gravity = xlib.StaticGravity
mask = ((xlib.CWColormap | xlib.CWBitGravity) | xlib.CWBackPixel)
if (self.style in ('transparent', 'overlay')):
mask |= xlib.CWBorderPixel
window_attributes.border_pixel = 0
window_attributes.background_pixel = 0
if self._fullscreen:
(width, height) = (self.screen.width, self.screen.height)
self._view_x = ((width - self._width) // 2)
self._view_y = ((height - self._height) // 2)
else:
(width, height) = (self._width, self._height)
self._view_x = self._view_y = 0
self._window = xlib.XCreateWindow(self._x_display, root, 0, 0, width, height, 0, visual_info.depth, xlib.InputOutput, visual, mask, byref(window_attributes))
self._view = xlib.XCreateWindow(self._x_display, self._window, self._view_x, self._view_y, self._width, self._height, 0, visual_info.depth, xlib.InputOutput, visual, mask, byref(window_attributes))
xlib.XMapWindow(self._x_display, self._view)
xlib.XSelectInput(self._x_display, self._view, self._default_event_mask)
self.display._window_map[self._window] = self.dispatch_platform_event
self.display._window_map[self._view] = self.dispatch_platform_event_view
self.canvas = XlibCanvas(self.display, self._view)
self.context.attach(self.canvas)
self.context.set_vsync(self._vsync)
self._enable_xsync = (pyglet.options['xsync'] and self.display._enable_xsync and self.config.double_buffer)
protocols = []
protocols.append(xlib.XInternAtom(self._x_display, asbytes('WM_DELETE_WINDOW'), False))
if self._enable_xsync:
protocols.append(xlib.XInternAtom(self._x_display, asbytes('_NET_WM_SYNC_REQUEST'), False))
protocols = (c_ulong * len(protocols))(*protocols)
xlib.XSetWMProtocols(self._x_display, self._window, protocols, len(protocols))
if self._enable_xsync:
value = xsync.XSyncValue()
self._sync_counter = xlib.XID(xsync.XSyncCreateCounter(self._x_display, value))
atom = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_SYNC_REQUEST_COUNTER'), False)
ptr = pointer(self._sync_counter)
xlib.XChangeProperty(self._x_display, self._window, atom, XA_CARDINAL, 32, xlib.PropModeReplace, cast(ptr, POINTER(c_ubyte)), 1)
self._create_xdnd_atoms(self._x_display)
self._clipboard_atom = xlib.XInternAtom(self._x_display, asbytes('CLIPBOARD'), False)
self._utf8_atom = xlib.XInternAtom(self._x_display, asbytes('UTF8_STRING'), False)
self._target_atom = xlib.XInternAtom(self._x_display, asbytes('TARGETS'), False)
self._incr_atom = xlib.XInternAtom(self._x_display, asbytes('INCR'), False)
if self._file_drops:
self._xdnd_source = None
self._xdnd_version = None
self._xdnd_format = None
self._xdnd_position = (0, 0)
VERSION = c_ulong(int(XDND_VERSION))
ptr = pointer(VERSION)
xlib.XChangeProperty(self._x_display, self._window, self._xdnd_atoms['XdndAware'], XA_ATOM, 32, xlib.PropModeReplace, cast(ptr, POINTER(c_ubyte)), 1)
attributes = xlib.XSetWindowAttributes()
attributes_mask = 0
self._override_redirect = False
if self._fullscreen:
if pyglet.options['xlib_fullscreen_override_redirect']:
attributes.override_redirect = self._fullscreen
attributes_mask |= xlib.CWOverrideRedirect
self._override_redirect = True
else:
self._set_wm_state('_NET_WM_STATE_FULLSCREEN')
if self._fullscreen:
xlib.XMoveResizeWindow(self._x_display, self._window, self.screen.x, self.screen.y, self.screen.width, self.screen.height)
else:
xlib.XResizeWindow(self._x_display, self._window, self._width, self._height)
xlib.XChangeWindowAttributes(self._x_display, self._window, attributes_mask, byref(attributes))
styles = {self.WINDOW_STYLE_DEFAULT: '_NET_WM_WINDOW_TYPE_NORMAL', self.WINDOW_STYLE_DIALOG: '_NET_WM_WINDOW_TYPE_DIALOG', self.WINDOW_STYLE_TOOL: '_NET_WM_WINDOW_TYPE_UTILITY'}
if (self._style in styles):
self._set_atoms_property('_NET_WM_WINDOW_TYPE', (styles[self._style],))
elif (self._style in (self.WINDOW_STYLE_BORDERLESS, self.WINDOW_STYLE_OVERLAY)):
MWM_HINTS_DECORATIONS = (1 << 1)
PROP_MWM_HINTS_ELEMENTS = 5
mwmhints = mwmhints_t()
mwmhints.flags = MWM_HINTS_DECORATIONS
mwmhints.decorations = 0
name = xlib.XInternAtom(self._x_display, asbytes('_MOTIF_WM_HINTS'), False)
xlib.XChangeProperty(self._x_display, self._window, name, name, 32, xlib.PropModeReplace, cast(pointer(mwmhints), POINTER(c_ubyte)), PROP_MWM_HINTS_ELEMENTS)
if ((not self._resizable) and (not self._fullscreen)):
self.set_minimum_size(self._width, self._height)
self.set_maximum_size(self._width, self._height)
self.set_caption(self._caption)
self.set_wm_class(self._caption)
_NET_WM_BYPASS_COMPOSITOR_HINT_ON = c_ulong(int(self._fullscreen))
name = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_BYPASS_COMPOSITOR'), False)
ptr = pointer(_NET_WM_BYPASS_COMPOSITOR_HINT_ON)
xlib.XChangeProperty(self._x_display, self._window, name, XA_CARDINAL, 32, xlib.PropModeReplace, cast(ptr, POINTER(c_ubyte)), 1)
if (_have_utf8 and (not self._x_ic)):
if (not self.display._x_im):
xlib.XSetLocaleModifiers(asbytes('=none'))
self.display._x_im = xlib.XOpenIM(self._x_display, None, None, None)
xlib.XFlush(self._x_display)
xlib.XCreateIC.argtypes = [xlib.XIM, c_char_p, c_int, c_char_p, xlib.Window, c_char_p, xlib.Window, c_void_p]
self._x_ic = xlib.XCreateIC(self.display._x_im, asbytes('inputStyle'), (xlib.XIMPreeditNothing | xlib.XIMStatusNothing), asbytes('clientWindow'), self._window, asbytes('focusWindow'), self._window, None)
filter_events = c_ulong()
xlib.XGetICValues(self._x_ic, 'filterEvents', byref(filter_events), None)
self._default_event_mask |= filter_events.value
xlib.XSetICFocus(self._x_ic)
self.switch_to()
if self._visible:
self.set_visible(True)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = None
self._update_exclusivity()
def _map(self):
if self._mapped:
return
xlib.XSelectInput(self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XMapRaised(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if (e.type == xlib.ConfigureNotify):
self._width = e.xconfigure.width
self._height = e.xconfigure.height
elif (e.type == xlib.MapNotify):
break
xlib.XSelectInput(self._x_display, self._window, self._default_event_mask)
self._mapped = True
if self._override_redirect:
self.activate()
self._update_view_size()
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
def _unmap(self):
if (not self._mapped):
return
xlib.XSelectInput(self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XUnmapWindow(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if (e.type == xlib.UnmapNotify):
break
xlib.XSelectInput(self._x_display, self._window, self._default_event_mask)
self._mapped = False
def _get_root(self):
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window, byref(attributes))
return attributes.root
def _is_reparented(self):
root = c_ulong()
parent = c_ulong()
children = pointer(c_ulong())
n_children = c_uint()
xlib.XQueryTree(self._x_display, self._window, byref(root), byref(parent), byref(children), byref(n_children))
return (root.value != parent.value)
def close(self):
if (not self._window):
return
self.context.destroy()
self._unmap()
if self._window:
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
del self.display._window_map[self._view]
self._window = None
self._view_event_handlers.clear()
self._event_handlers.clear()
if _have_utf8:
xlib.XDestroyIC(self._x_ic)
self._x_ic = None
super(XlibWindow, self).close()
def switch_to(self):
if self.context:
self.context.set_current()
def flip(self):
self.draw_mouse_cursor()
if self.context:
self.context.flip()
self._sync_resize()
def set_vsync(self, vsync: bool) -> None:
if (pyglet.options['vsync'] is not None):
vsync = pyglet.options['vsync']
super().set_vsync(vsync)
self.context.set_vsync(vsync)
def set_caption(self, caption):
if (caption is None):
caption = ''
self._caption = caption
self._set_text_property('WM_NAME', caption, allow_utf8=False)
self._set_text_property('WM_ICON_NAME', caption, allow_utf8=False)
self._set_text_property('_NET_WM_NAME', caption)
self._set_text_property('_NET_WM_ICON_NAME', caption)
def set_wm_class(self, name):
try:
name = name.encode('ascii')
except UnicodeEncodeError:
name = 'pyglet'
hint = xlib.XAllocClassHint()
hint.contents.res_class = asbytes(name)
hint.contents.res_name = asbytes(name.lower())
xlib.XSetClassHint(self._x_display, self._window, hint.contents)
xlib.XFree(hint)
def get_caption(self):
return self._caption
def set_size(self, width: int, height: int) -> None:
super().set_size(width, height)
if (not self._resizable):
self.set_minimum_size(width, height)
self.set_maximum_size(width, height)
xlib.XResizeWindow(self._x_display, self._window, width, height)
self._update_view_size()
self.dispatch_event('on_resize', width, height)
def _update_view_size(self):
xlib.XResizeWindow(self._x_display, self._view, self._width, self._height)
def set_location(self, x, y):
if self._is_reparented():
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window, byref(attributes))
x -= attributes.x
y -= attributes.y
xlib.XMoveWindow(self._x_display, self._window, x, y)
def get_location(self):
child = xlib.Window()
x = c_int()
y = c_int()
xlib.XTranslateCoordinates(self._x_display, self._window, self._get_root(), 0, 0, byref(x), byref(y), byref(child))
return (x.value, y.value)
def activate(self):
if (self._x_display and self._window):
xlib.XSetInputFocus(self._x_display, self._window, xlib.RevertToParent, xlib.CurrentTime)
def set_visible(self, visible: bool=True) -> None:
super().set_visible(visible)
if visible:
self._map()
else:
self._unmap()
def set_minimum_size(self, width: int, height: int) -> None:
super().set_minimum_size(width, height)
self._set_wm_normal_hints()
def set_maximum_size(self, width: int, height: int) -> None:
super().set_maximum_size(width, height)
self._set_wm_normal_hints()
def minimize(self):
xlib.XIconifyWindow(self._x_display, self._window, self._x_screen_id)
def maximize(self):
self._set_wm_state('_NET_WM_STATE_MAXIMIZED_HORZ', '_NET_WM_STATE_MAXIMIZED_VERT')
def _downsample_1bit(pixelarray):
byte_list = []
value = 0
for (i, pixel) in enumerate(pixelarray):
index = (i % 8)
if pixel:
value |= (1 << index)
if (index == 7):
byte_list.append(value)
value = 0
return bytes(byte_list)
_cache()
def _create_cursor_from_image(self, cursor):
texture = cursor.texture
width = texture.width
height = texture.height
alpha_luma_bytes = texture.get_image_data().get_data('AL', ((- width) * 2))
mask_data = self._downsample_1bit(alpha_luma_bytes[0::2])
bmp_data = self._downsample_1bit(alpha_luma_bytes[1::2])
bitmap = xlib.XCreateBitmapFromData(self._x_display, self._window, bmp_data, width, height)
mask = xlib.XCreateBitmapFromData(self._x_display, self._window, mask_data, width, height)
white = xlib.XColor(red=65535, green=65535, blue=65535)
black = xlib.XColor()
hot_x = min(max(0, int(self._mouse_cursor.hot_x)), width)
hot_y = min(max(0, int((height - self._mouse_cursor.hot_y))), height)
cursor = xlib.XCreatePixmapCursor(self._x_display, bitmap, mask, white, black, hot_x, hot_y)
xlib.XFreePixmap(self._x_display, bitmap)
xlib.XFreePixmap(self._x_display, mask)
return cursor
def set_mouse_platform_visible(self, platform_visible=None):
if (not self._window):
return
if (platform_visible is None):
platform_visible = (self._mouse_visible and (not self._mouse_cursor.gl_drawable))
if (platform_visible is False):
black = xlib.XColor()
bitmap = xlib.XCreateBitmapFromData(self._x_display, self._window, bytes(8), 8, 8)
cursor = xlib.XCreatePixmapCursor(self._x_display, bitmap, bitmap, black, black, 0, 0)
xlib.XDefineCursor(self._x_display, self._window, cursor)
xlib.XFreeCursor(self._x_display, cursor)
xlib.XFreePixmap(self._x_display, bitmap)
elif (isinstance(self._mouse_cursor, ImageMouseCursor) and self._mouse_cursor.hw_drawable):
cursor = self._create_cursor_from_image(self._mouse_cursor)
xlib.XDefineCursor(self._x_display, self._window, cursor)
elif isinstance(self._mouse_cursor, XlibMouseCursor):
xlib.XDefineCursor(self._x_display, self._window, self._mouse_cursor.cursor)
else:
xlib.XUndefineCursor(self._x_display, self._window)
def set_mouse_position(self, x, y):
xlib.XWarpPointer(self._x_display, 0, self._window, 0, 0, 0, 0, x, (self._height - y))
def _update_exclusivity(self):
mouse_exclusive = (self._active and self._mouse_exclusive)
keyboard_exclusive = (self._active and self._keyboard_exclusive)
if (mouse_exclusive != self._applied_mouse_exclusive):
if mouse_exclusive:
self.set_mouse_platform_visible(False)
xlib.XGrabPointer(self._x_display, self._window, True, 0, xlib.GrabModeAsync, xlib.GrabModeAsync, self._window, 0, xlib.CurrentTime)
x = (self._width // 2)
y = (self._height // 2)
self._mouse_exclusive_client = (x, y)
self.set_mouse_position(x, y)
elif (self._fullscreen and (not self.screen._xinerama)):
self.set_mouse_position(0, 0)
r = xlib.XGrabPointer(self._x_display, self._view, True, 0, xlib.GrabModeAsync, xlib.GrabModeAsync, self._view, 0, xlib.CurrentTime)
if r:
self._applied_mouse_exclusive = None
return
self.set_mouse_platform_visible()
else:
xlib.XUngrabPointer(self._x_display, xlib.CurrentTime)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = mouse_exclusive
if (keyboard_exclusive != self._applied_keyboard_exclusive):
if keyboard_exclusive:
xlib.XGrabKeyboard(self._x_display, self._window, False, xlib.GrabModeAsync, xlib.GrabModeAsync, xlib.CurrentTime)
else:
xlib.XUngrabKeyboard(self._x_display, xlib.CurrentTime)
self._applied_keyboard_exclusive = keyboard_exclusive
def set_exclusive_mouse(self, exclusive=True):
if (exclusive == self._mouse_exclusive):
return
super().set_exclusive_mouse(exclusive)
self._update_exclusivity()
def set_exclusive_keyboard(self, exclusive=True):
if (exclusive == self._keyboard_exclusive):
return
super().set_exclusive_keyboard(exclusive)
self._update_exclusivity()
def get_system_mouse_cursor(self, name):
if (name == self.CURSOR_DEFAULT):
return DefaultMouseCursor()
cursor_shapes = {self.CURSOR_CROSSHAIR: cursorfont.XC_crosshair, self.CURSOR_HAND: cursorfont.XC_hand2, self.CURSOR_HELP: cursorfont.XC_question_arrow, self.CURSOR_NO: cursorfont.XC_pirate, self.CURSOR_SIZE: cursorfont.XC_fleur, self.CURSOR_SIZE_UP: cursorfont.XC_top_side, self.CURSOR_SIZE_UP_RIGHT: cursorfont.XC_top_right_corner, self.CURSOR_SIZE_RIGHT: cursorfont.XC_right_side, self.CURSOR_SIZE_DOWN_RIGHT: cursorfont.XC_bottom_right_corner, self.CURSOR_SIZE_DOWN: cursorfont.XC_bottom_side, self.CURSOR_SIZE_DOWN_LEFT: cursorfont.XC_bottom_left_corner, self.CURSOR_SIZE_LEFT: cursorfont.XC_left_side, self.CURSOR_SIZE_UP_LEFT: cursorfont.XC_top_left_corner, self.CURSOR_SIZE_UP_DOWN: cursorfont.XC_sb_v_double_arrow, self.CURSOR_SIZE_LEFT_RIGHT: cursorfont.XC_sb_h_double_arrow, self.CURSOR_TEXT: cursorfont.XC_xterm, self.CURSOR_WAIT: cursorfont.XC_watch, self.CURSOR_WAIT_ARROW: cursorfont.XC_watch}
if (name not in cursor_shapes):
raise MouseCursorException(('Unknown cursor name "%s"' % name))
cursor = xlib.XCreateFontCursor(self._x_display, cursor_shapes[name])
return XlibMouseCursor(cursor)
def set_icon(self, *images):
import sys
fmt = {('little', 4): 'BGRA', ('little', 8): 'BGRAAAAA', ('big', 4): 'ARGB', ('big', 8): 'AAAAARGB'}[(sys.byteorder, sizeof(c_ulong))]
data = asbytes('')
for image in images:
image = image.get_image_data()
pitch = (- (image.width * len(fmt)))
s = c_buffer((sizeof(c_ulong) * 2))
memmove(s, cast((c_ulong * 2)(image.width, image.height), POINTER(c_ubyte)), len(s))
data += (s.raw + image.get_data(fmt, pitch))
buffer = (c_ubyte * len(data))()
memmove(buffer, data, len(data))
atom = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_ICON'), False)
xlib.XChangeProperty(self._x_display, self._window, atom, XA_CARDINAL, 32, xlib.PropModeReplace, buffer, (len(data) // sizeof(c_ulong)))
def set_clipboard_text(self, text: str):
xlib.XSetSelectionOwner(self._x_display, self._clipboard_atom, self._window, xlib.CurrentTime)
if (xlib.XGetSelectionOwner(self._x_display, self._clipboard_atom) == self._window):
self._clipboard_str = text
str_bytes = text.encode('utf-8')
size = len(str_bytes)
xlib.XChangeProperty(self._x_display, self._window, self._clipboard_atom, self._utf8_atom, 8, xlib.PropModeReplace, (c_ubyte * size).from_buffer_copy(str_bytes), size)
elif _debug:
print("X11: Couldn't become owner of clipboard.")
def get_clipboard_text(self) -> str:
if (self._clipboard_str is not None):
return self._clipboard_str
owner = xlib.XGetSelectionOwner(self._x_display, self._clipboard_atom)
if (not owner):
return ''
text = ''
if (owner == self._window):
(data, size, actual_atom) = self.get_single_property(self._window, self._clipboard_atom, self._utf8_atom)
else:
notification = xlib.XEvent()
xlib.XConvertSelection(self._x_display, self._clipboard_atom, self._utf8_atom, self._clipboard_atom, self._window, xlib.CurrentTime)
while (not xlib.XCheckTypedWindowEvent(self._x_display, self._window, xlib.SelectionNotify, byref(notification))):
self.dispatch_platform_event(notification)
if (not notification.xselection.property):
return ''
(data, size, actual_atom) = self.get_single_property(notification.xselection.requestor, notification.xselection.property, self._utf8_atom)
if (actual_atom == self._incr_atom):
if _debug:
print('X11: Clipboard data is too large, not implemented.')
elif (actual_atom == self._utf8_atom):
if data:
text_bytes = string_at(data, size)
text = text_bytes.decode('utf-8')
self._clipboard_str = text
xlib.XFree(data)
return text
def _set_wm_normal_hints(self):
hints = xlib.XAllocSizeHints().contents
if self._minimum_size:
hints.flags |= xlib.PMinSize
(hints.min_width, hints.min_height) = self._minimum_size
if self._maximum_size:
hints.flags |= xlib.PMaxSize
(hints.max_width, hints.max_height) = self._maximum_size
xlib.XSetWMNormalHints(self._x_display, self._window, byref(hints))
def _set_text_property(self, name, value, allow_utf8=True):
atom = xlib.XInternAtom(self._x_display, asbytes(name), False)
if (not atom):
raise XlibException(('Undefined atom "%s"' % name))
text_property = xlib.XTextProperty()
if (_have_utf8 and allow_utf8):
buf = create_string_buffer(value.encode('utf8'))
result = xlib.Xutf8TextListToTextProperty(self._x_display, cast(pointer(buf), c_char_p), 1, xlib.XUTF8StringStyle, byref(text_property))
if (result < 0):
raise XlibException('Could not create UTF8 text property')
else:
buf = create_string_buffer(value.encode('ascii', 'ignore'))
result = xlib.XStringListToTextProperty(cast(pointer(buf), c_char_p), 1, byref(text_property))
if (result < 0):
raise XlibException('Could not create text property')
xlib.XSetTextProperty(self._x_display, self._window, byref(text_property), atom)
def _set_atoms_property(self, name, values, mode=xlib.PropModeReplace):
name_atom = xlib.XInternAtom(self._x_display, asbytes(name), False)
atoms = []
for value in values:
atoms.append(xlib.XInternAtom(self._x_display, asbytes(value), False))
atom_type = xlib.XInternAtom(self._x_display, asbytes('ATOM'), False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window, name_atom, atom_type, 32, mode, cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
net_wm_state = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_STATE'), False)
if net_wm_state:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
def _set_wm_state(self, *states):
net_wm_state = xlib.XInternAtom(self._x_display, asbytes('_NET_WM_STATE'), False)
atoms = []
for state in states:
atoms.append(xlib.XInternAtom(self._x_display, asbytes(state), False))
atom_type = xlib.XInternAtom(self._x_display, asbytes('ATOM'), False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window, net_wm_state, atom_type, 32, xlib.PropModePrepend, cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = net_wm_state
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = xlib.PropModePrepend
for (i, atom) in enumerate(atoms):
e.xclient.data.l[(i + 1)] = atom
xlib.XSendEvent(self._x_display, self._get_root(), False, xlib.SubstructureRedirectMask, byref(e))
def dispatch_events(self):
self.dispatch_pending_events()
self._allow_dispatch_event = True
e = xlib.XEvent()
_x_display = self._x_display
_window = self._window
_view = self._view
while xlib.XCheckWindowEvent(_x_display, _window, , byref(e)):
if (e.xany.type not in (xlib.KeyPress, xlib.KeyRelease)):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event(e)
while xlib.XCheckWindowEvent(_x_display, _view, , byref(e)):
if (e.xany.type not in (xlib.KeyPress, xlib.KeyRelease)):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event_view(e)
while xlib.XCheckTypedWindowEvent(_x_display, _window, xlib.ClientMessage, byref(e)):
self.dispatch_platform_event(e)
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
if self._lost_context:
self._lost_context = False
EventDispatcher.dispatch_event(self, 'on_context_lost')
if self._lost_context_state:
self._lost_context_state = False
EventDispatcher.dispatch_event(self, 'on_context_state_lost')
def dispatch_platform_event(self, e):
if (self._applied_mouse_exclusive is None):
self._update_exclusivity()
event_handler = self._event_handlers.get(e.type)
if event_handler:
event_handler(e)
def dispatch_platform_event_view(self, e):
event_handler = self._view_event_handlers.get(e.type)
if event_handler:
event_handler(e)
def _translate_modifiers(state):
modifiers = 0
if (state & xlib.ShiftMask):
modifiers |= key.MOD_SHIFT
if (state & xlib.ControlMask):
modifiers |= key.MOD_CTRL
if (state & xlib.LockMask):
modifiers |= key.MOD_CAPSLOCK
if (state & xlib.Mod1Mask):
modifiers |= key.MOD_ALT
if (state & xlib.Mod2Mask):
modifiers |= key.MOD_NUMLOCK
if (state & xlib.Mod4Mask):
modifiers |= key.MOD_WINDOWS
if (state & xlib.Mod5Mask):
modifiers |= key.MOD_SCROLLLOCK
return modifiers
'\n def _event_symbol(self, event):\n # pyglet.self.key keysymbols are identical to X11 keysymbols, no\n # need to map the keysymbol.\n symbol = xlib.XKeycodeToKeysym(self._x_display, event.xkey.keycode, 0)\n if symbol == 0:\n # XIM event\n return None\n elif symbol not in key._key_names.keys():\n symbol = key.user_key(event.xkey.keycode)\n return symbol\n '
def _event_text_symbol(self, ev):
text = None
symbol = xlib.KeySym()
buffer = create_string_buffer(128)
count = xlib.XLookupString(ev.xkey, buffer, (len(buffer) - 1), byref(symbol), None)
filtered = xlib.XFilterEvent(ev, ev.xany.window)
if ((ev.type == xlib.KeyPress) and (not filtered)):
status = c_int()
if _have_utf8:
encoding = 'utf8'
count = xlib.Xutf8LookupString(self._x_ic, ev.xkey, buffer, (len(buffer) - 1), byref(symbol), byref(status))
if (status.value == xlib.XBufferOverflow):
raise NotImplementedError('TODO: XIM buffer resize')
else:
encoding = 'ascii'
count = xlib.XLookupString(ev.xkey, buffer, (len(buffer) - 1), byref(symbol), None)
if count:
status.value = xlib.XLookupBoth
if (status.value & (xlib.XLookupChars | xlib.XLookupBoth)):
text = buffer.value[:count].decode(encoding)
if (text and (unicodedata.category(text) == 'Cc') and (text != '\r')):
text = None
symbol = symbol.value
if ((ev.xkey.keycode == 0) and (not filtered)):
symbol = None
if (symbol and (symbol not in key._key_names) and ev.xkey.keycode):
try:
symbol = ord(chr(symbol).lower())
except ValueError:
symbol = key.user_key(ev.xkey.keycode)
else:
if (symbol not in key._key_names):
symbol = key.user_key(ev.xkey.keycode)
if filtered:
return (None, symbol)
return (text, symbol)
def _event_text_motion(symbol, modifiers):
if (modifiers & key.MOD_ALT):
return None
ctrl = ((modifiers & key.MOD_CTRL) != 0)
return _motion_map.get((symbol, ctrl), None)
(xlib.KeyPress)
(xlib.KeyRelease)
def _event_key_view(self, ev):
global _can_detect_autorepeat
if ((not _can_detect_autorepeat) and (ev.type == xlib.KeyRelease)):
saved = []
while True:
auto_event = xlib.XEvent()
result = xlib.XCheckWindowEvent(self._x_display, self._window, (xlib.KeyPress | xlib.KeyRelease), byref(auto_event))
if (not result):
break
saved.append(auto_event)
if (auto_event.type == xlib.KeyRelease):
continue
if (ev.xkey.keycode == auto_event.xkey.keycode):
(text, symbol) = self._event_text_symbol(auto_event)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = (modifiers & (key.MOD_CTRL | key.MOD_ALT))
motion = self._event_text_motion(symbol, modifiers)
if motion:
if (modifiers & key.MOD_SHIFT):
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif (text and (not modifiers_ctrl)):
self.dispatch_event('on_text', text)
ditched = saved.pop()
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
return
else:
break
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
(text, symbol) = self._event_text_symbol(ev)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = (modifiers & (key.MOD_CTRL | key.MOD_ALT))
motion = self._event_text_motion(symbol, modifiers)
if (ev.type == xlib.KeyPress):
if (symbol and ((not _can_detect_autorepeat) or (symbol not in self.pressed_keys))):
self.dispatch_event('on_key_press', symbol, modifiers)
if _can_detect_autorepeat:
self.pressed_keys.add(symbol)
if motion:
if (modifiers & key.MOD_SHIFT):
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif (text and (not modifiers_ctrl)):
self.dispatch_event('on_text', text)
elif (ev.type == xlib.KeyRelease):
if symbol:
self.dispatch_event('on_key_release', symbol, modifiers)
if (_can_detect_autorepeat and (symbol in self.pressed_keys)):
self.pressed_keys.remove(symbol)
(xlib.KeyPress)
(xlib.KeyRelease)
def _event_key(self, ev):
return self._event_key_view(ev)
(xlib.MotionNotify)
def _event_motionnotify_view(self, ev):
x = ev.xmotion.x
y = ((self.height - ev.xmotion.y) - 1)
if self._mouse_in_window:
dx = (x - self._mouse_x)
dy = (y - self._mouse_y)
else:
dx = dy = 0
if (self._applied_mouse_exclusive and ((ev.xmotion.x, ev.xmotion.y) == self._mouse_exclusive_client)):
self._mouse_x = x
self._mouse_y = y
return
if self._applied_mouse_exclusive:
(ex, ey) = self._mouse_exclusive_client
xlib.XWarpPointer(self._x_display, 0, self._window, 0, 0, 0, 0, ex, ey)
self._mouse_x = x
self._mouse_y = y
self._mouse_in_window = True
buttons = 0
if (ev.xmotion.state & xlib.Button1MotionMask):
buttons |= mouse.LEFT
if (ev.xmotion.state & xlib.Button2MotionMask):
buttons |= mouse.MIDDLE
if (ev.xmotion.state & xlib.Button3MotionMask):
buttons |= mouse.RIGHT
if buttons:
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag', x, y, dx, dy, buttons, modifiers)
else:
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
(xlib.MotionNotify)
def _event_motionnotify(self, ev):
buttons = 0
if (ev.xmotion.state & xlib.Button1MotionMask):
buttons |= mouse.LEFT
if (ev.xmotion.state & xlib.Button2MotionMask):
buttons |= mouse.MIDDLE
if (ev.xmotion.state & xlib.Button3MotionMask):
buttons |= mouse.RIGHT
if buttons:
x = (ev.xmotion.x - self._view_x)
y = (self._height - ((ev.xmotion.y - self._view_y) - 1))
if self._mouse_in_window:
dx = (x - self._mouse_x)
dy = (y - self._mouse_y)
else:
dx = dy = 0
self._mouse_x = x
self._mouse_y = y
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag', x, y, dx, dy, buttons, modifiers)
(xlib.ClientMessage)
def _event_clientmessage(self, ev):
atom = ev.xclient.data.l[0]
if (atom == xlib.XInternAtom(ev.xclient.display, asbytes('WM_DELETE_WINDOW'), False)):
self.dispatch_event('on_close')
elif (self._enable_xsync and (atom == xlib.XInternAtom(ev.xclient.display, asbytes('_NET_WM_SYNC_REQUEST'), False))):
lo = ev.xclient.data.l[2]
hi = ev.xclient.data.l[3]
self._current_sync_value = xsync.XSyncValue(hi, lo)
elif (ev.xclient.message_type == self._xdnd_atoms['XdndPosition']):
self._event_drag_position(ev)
elif (ev.xclient.message_type == self._xdnd_atoms['XdndDrop']):
self._event_drag_drop(ev)
elif (ev.xclient.message_type == self._xdnd_atoms['XdndEnter']):
self._event_drag_enter(ev)
def _event_drag_drop(self, ev):
if (self._xdnd_version > XDND_VERSION):
return
time = xlib.CurrentTime
if self._xdnd_format:
if (self._xdnd_version >= 1):
time = ev.xclient.data.l[2]
xlib.XConvertSelection(self._x_display, self._xdnd_atoms['XdndSelection'], self._xdnd_format, self._xdnd_atoms['XdndSelection'], self._window, time)
xlib.XFlush(self._x_display)
elif (self._xdnd_version >= 2):
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = self._xdnd_atoms['XdndFinished']
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = self._window
e.xclient.data.l[1] = 0
e.xclient.data.l[2] = None
xlib.XSendEvent(self._x_display, self._xdnd_source, False, xlib.NoEventMask, byref(e))
xlib.XFlush(self._x_display)
def _event_drag_position(self, ev):
if (self._xdnd_version > XDND_VERSION):
return
xoff = ((ev.xclient.data.l[2] >> 16) & 65535)
yoff = (ev.xclient.data.l[2] & 65535)
child = xlib.Window()
x = c_int()
y = c_int()
xlib.XTranslateCoordinates(self._x_display, self._get_root(), self._window, xoff, yoff, byref(x), byref(y), byref(child))
self._xdnd_position = (x.value, y.value)
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = self._xdnd_atoms['XdndStatus']
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = ev.xclient.data.l[0]
e.xclient.format = 32
e.xclient.data.l[0] = self._window
e.xclient.data.l[2] = 0
e.xclient.data.l[3] = 0
if self._xdnd_format:
e.xclient.data.l[1] = 1
if (self._xdnd_version >= 2):
e.xclient.data.l[4] = self._xdnd_atoms['XdndActionCopy']
xlib.XSendEvent(self._x_display, self._xdnd_source, False, xlib.NoEventMask, byref(e))
xlib.XFlush(self._x_display)
def _event_drag_enter(self, ev):
self._xdnd_source = ev.xclient.data.l[0]
self._xdnd_version = (ev.xclient.data.l[1] >> 24)
self._xdnd_format = None
if (self._xdnd_version > XDND_VERSION):
return
three_or_more = (ev.xclient.data.l[1] & 1)
if three_or_more:
(data, count, _) = self.get_single_property(self._xdnd_source, self._xdnd_atoms['XdndTypeList'], XA_ATOM)
data = cast(data, POINTER(xlib.Atom))
else:
count = 3
data = (ev.xclient.data.l + 2)
for i in range(count):
if (data[i] == self._xdnd_atoms['text/uri-list']):
self._xdnd_format = self._xdnd_atoms['text/uri-list']
break
if data:
xlib.XFree(data)
def get_single_property(self, window, atom_property, atom_type):
actualAtom = xlib.Atom()
actualFormat = c_int()
itemCount = c_ulong()
bytesAfter = c_ulong()
data = POINTER(c_ubyte)()
xlib.XGetWindowProperty(self._x_display, window, atom_property, 0, , False, atom_type, byref(actualAtom), byref(actualFormat), byref(itemCount), byref(bytesAfter), data)
return (data, itemCount.value, actualAtom.value)
(xlib.SelectionNotify)
def _event_selection_notification(self, ev):
if ((ev.xselection.property != 0) and (ev.xselection.selection == self._xdnd_atoms['XdndSelection'])):
if self._xdnd_format:
(data, count, _) = self.get_single_property(ev.xselection.requestor, ev.xselection.property, ev.xselection.target)
buffer = create_string_buffer(count)
memmove(buffer, data, count)
formatted_paths = self.parse_filenames(buffer.value.decode())
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = self._xdnd_atoms['XdndFinished']
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = self._xdnd_source
e.xclient.data.l[1] = 1
e.xclient.data.l[2] = self._xdnd_atoms['XdndActionCopy']
xlib.XSendEvent(self._x_display, self._get_root(), False, xlib.NoEventMask, byref(e))
xlib.XFlush(self._x_display)
xlib.XFree(data)
self.dispatch_event('on_file_drop', self._xdnd_position[0], (self._height - self._xdnd_position[1]), formatted_paths)
def parse_filenames(decoded_string):
import sys
different_files = decoded_string.splitlines()
parsed = []
for filename in different_files:
if filename:
filename = urllib.parse.urlsplit(filename).path
encoding = sys.getfilesystemencoding()
parsed.append(urllib.parse.unquote(filename, encoding))
return parsed
def _sync_resize(self):
if (self._enable_xsync and self._current_sync_valid):
if xsync.XSyncValueIsZero(self._current_sync_value):
self._current_sync_valid = False
return
xsync.XSyncSetCounter(self._x_display, self._sync_counter, self._current_sync_value)
self._current_sync_value = None
self._current_sync_valid = False
(xlib.ButtonPress)
(xlib.ButtonRelease)
def _event_button(self, ev):
x = ev.xbutton.x
y = (self.height - ev.xbutton.y)
button = (ev.xbutton.button - 1)
if ((button == 7) or (button == 8)):
button -= 4
modifiers = self._translate_modifiers(ev.xbutton.state)
if (ev.type == xlib.ButtonPress):
if (self._override_redirect and (not self._active)):
self.activate()
if (ev.xbutton.button == 4):
self.dispatch_event('on_mouse_scroll', x, y, 0, 1)
elif (ev.xbutton.button == 5):
self.dispatch_event('on_mouse_scroll', x, y, 0, (- 1))
elif (ev.xbutton.button == 6):
self.dispatch_event('on_mouse_scroll', x, y, (- 1), 0)
elif (ev.xbutton.button == 7):
self.dispatch_event('on_mouse_scroll', x, y, 1, 0)
elif (button < 5):
self.dispatch_event('on_mouse_press', x, y, (1 << button), modifiers)
elif (button < 5):
self.dispatch_event('on_mouse_release', x, y, (1 << button), modifiers)
(xlib.Expose)
def _event_expose(self, ev):
if (ev.xexpose.count > 0):
return
self.dispatch_event('on_expose')
(xlib.EnterNotify)
def _event_enternotify(self, ev):
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = (self.height - ev.xcrossing.y)
self._mouse_in_window = True
self.dispatch_event('on_mouse_enter', x, y)
(xlib.LeaveNotify)
def _event_leavenotify(self, ev):
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = (self.height - ev.xcrossing.y)
self._mouse_in_window = False
self.dispatch_event('on_mouse_leave', x, y)
(xlib.ConfigureNotify)
def _event_configurenotify(self, ev):
if (self._enable_xsync and self._current_sync_value):
self._current_sync_valid = True
if self._fullscreen:
return
self.switch_to()
(w, h) = (ev.xconfigure.width, ev.xconfigure.height)
(x, y) = (ev.xconfigure.x, ev.xconfigure.y)
if ((self._width != w) or (self._height != h)):
self._width = w
self._height = h
self._update_view_size()
self.dispatch_event('on_resize', self._width, self._height)
if ((self._x != x) or (self._y != y)):
self.dispatch_event('on_move', x, y)
self._x = x
self._y = y
(xlib.FocusIn)
def _event_focusin(self, ev):
self._active = True
self._update_exclusivity()
self.dispatch_event('on_activate')
xlib.XSetICFocus(self._x_ic)
(xlib.FocusOut)
def _event_focusout(self, ev):
self._active = False
self._update_exclusivity()
self.dispatch_event('on_deactivate')
xlib.XUnsetICFocus(self._x_ic)
(xlib.MapNotify)
def _event_mapnotify(self, ev):
self._mapped = True
self.dispatch_event('on_show')
self._update_exclusivity()
(xlib.UnmapNotify)
def _event_unmapnotify(self, ev):
self._mapped = False
self.dispatch_event('on_hide')
(xlib.SelectionClear)
def _event_selection_clear(self, ev):
if (ev.xselectionclear.selection == self._clipboard_atom):
self._clipboard_str = None
(xlib.SelectionRequest)
def _event_selection_request(self, ev):
request = ev.xselectionrequest
if _debug:
rt = xlib.XGetAtomName(self._x_display, request.target)
rp = xlib.XGetAtomName(self._x_display, request.property)
print(f'X11 debug: request target {rt}')
print(f'X11 debug: request property {rp}')
out_event = xlib.XEvent()
out_event.xany.type = xlib.SelectionNotify
out_event.xselection.selection = request.selection
out_event.xselection.display = request.display
out_event.xselection.target = 0
out_event.xselection.property = 0
out_event.xselection.requestor = request.requestor
out_event.xselection.time = request.time
if ((xlib.XGetSelectionOwner(self._x_display, self._clipboard_atom) == self._window) and (ev.xselection.target == self._clipboard_atom)):
if (request.target == self._target_atom):
atoms_ar = (xlib.Atom * 1)(self._utf8_atom)
ptr = cast(pointer(atoms_ar), POINTER(c_ubyte))
xlib.XChangeProperty(self._x_display, request.requestor, request.property, XA_ATOM, 32, xlib.PropModeReplace, ptr, (sizeof(atoms_ar) // sizeof(c_ulong)))
out_event.xselection.property = request.property
out_event.xselection.target = request.target
elif (request.target == self._utf8_atom):
text = self._clipboard_str.encode('utf-8')
size = len(self._clipboard_str)
xlib.XChangeProperty(self._x_display, request.requestor, request.property, request.target, 8, xlib.PropModeReplace, (c_ubyte * size).from_buffer_copy(text), size)
out_event.xselection.property = request.property
out_event.xselection.target = request.target
xlib.XSendEvent(self._x_display, request.requestor, 0, 0, byref(out_event)) |
def all_gather_list(data, group=None, max_size=16384):
if (group is None):
group = get_global_group()
rank = get_rank(group=group)
world_size = get_world_size(group=group)
buffer_size = (max_size * world_size)
if ((not hasattr(all_gather_list, '_buffer')) or (all_gather_list._buffer.numel() < buffer_size)):
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
data = utils.move_to_cpu(data)
enc = pickle.dumps(data)
enc_size = len(enc)
header_size = 4
size = (header_size + enc_size)
if (size > max_size):
raise ValueError('encoded data size ({}) exceeds max_size ({})'.format(size, max_size))
header = struct.pack('>I', enc_size)
cpu_buffer[:size] = torch.ByteTensor(list((header + enc)))
start = (rank * max_size)
buffer[start:(start + size)].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
buffer = buffer.cpu()
try:
result = []
for i in range(world_size):
out_buffer = buffer[(i * max_size):((i + 1) * max_size)]
(enc_size,) = struct.unpack('>I', bytes(out_buffer[:header_size].tolist()))
if (enc_size > 0):
result.append(pickle.loads(bytes(out_buffer[header_size:(header_size + enc_size)].tolist())))
return result
except pickle.UnpicklingError:
raise Exception('Unable to unpickle data from other workers. all_gather_list requires all workers to enter the function together, so this error usually indicates that the workers have fallen out of sync somehow. Workers can fall out of sync if one of them runs out of memory, or if there are other conditions in your training script that can cause one worker to finish an epoch while other workers are still iterating over their portions of the data. Try rerunning with --ddp-backend=no_c10d and see if that helps.') |
class TabbedBrowserStub(QObject):
def __init__(self, parent=None):
super().__init__(parent)
self.widget = TabWidgetStub()
self.is_shutting_down = False
self.loaded_url = None
self.cur_url = None
self.undo_stack = None
def on_tab_close_requested(self, idx):
del self.widget.tabs[idx]
def widgets(self):
return self.widget.tabs
def tabopen(self, url):
self.loaded_url = url
def load_url(self, url, *, newtab):
self.loaded_url = url
def current_url(self):
if (self.current_url is None):
raise ValueError('current_url got called with cur_url None!')
return self.cur_url |
class TripletsNet6c(VGGNet):
def __init__(self, config):
super(TripletsNet6c, self).__init__()
self.trunk = ClusterNet6cTrunk(config)
self.head = TripletsNet6cHead(config)
self._initialize_weights()
def forward(self, x, kmeans_use_features=False):
x = self.trunk(x)
x = self.head(x, kmeans_use_features=kmeans_use_features)
return x |
class MultiTree():
def __init__(self, value, parent):
self.parent = parent
self.value = value
self.children = []
def add_children(self, children):
self.children.append(children)
def add_value(self, values: str, ac, separators: str='-'):
value = values.split(separators)
cls = self
while value:
v = value.pop(0)
for node in cls.children:
if (node.value == v):
cls = node
break
else:
new_cls = MultiTree(value=v, parent=cls)
cls.add_children(new_cls)
if (v in ac):
ac.get(v).append(new_cls)
else:
ac.add_word(v, [new_cls])
cls = new_cls |
def _check_structure_field(name: str, dtype_tuple: Tuple[(np.dtype, int)], target: 'Structure', type_per_name_with_wildcard: Dict[(str, type)]) -> bool:
dtype = dtype_tuple[0]
target_type_name = target.get_type(name)
target_type_shape_match = re.search(_REGEX_FIELD_SHAPE, target_type_name)
actual_type = dtype.type
if target_type_shape_match:
if (not dtype.subdtype):
return False
actual_type = dtype.subdtype[0].type
target_type_shape = target_type_shape_match.group(1)
shape_corresponds = check_shape(dtype.shape, Shape[target_type_shape])
if (not shape_corresponds):
return False
target_type_name = target_type_name.replace(target_type_shape_match.group(0), '')
check_type_name(target_type_name, type_per_name_with_wildcard)
target_type = type_per_name_with_wildcard[target_type_name]
return issubclass(actual_type, target_type) |
class DataTrainingArguments():
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a text file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to evaluate the perplexity on (a text file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
max_seq_length: int = field(default=384, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'})
pad_to_max_length: bool = field(default=False, metadata={'help': 'Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU).'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'})
max_predict_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of prediction examples to this value if set.'})
version_2_with_negative: bool = field(default=False, metadata={'help': 'If true, some of the examples do not have an answer.'})
null_score_diff_threshold: float = field(default=0.0, metadata={'help': 'The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example. Only useful when `version_2_with_negative=True`.'})
doc_stride: int = field(default=128, metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'})
n_best_size: int = field(default=20, metadata={'help': 'The total number of n-best predictions to generate when looking for an answer.'})
max_answer_length: int = field(default=30, metadata={'help': 'The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another.'})
def __post_init__(self):
if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None) and (self.test_file is None)):
raise ValueError('Need either a dataset name or a training/validation file/test_file.')
else:
if (self.train_file is not None):
extension = self.train_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.'
if (self.validation_file is not None):
extension = self.validation_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.'
if (self.test_file is not None):
extension = self.test_file.split('.')[(- 1)]
assert (extension in ['csv', 'json']), '`test_file` should be a csv or a json file.' |
def test_realloc_3_1_5_4_6():
allocator = RegionAllocator(1000)
regions = []
for i in range(10):
regions.append(allocator.alloc(3))
for region in regions:
allocator.realloc(region, 1)
for region in regions:
allocator.realloc(region, 5)
for region in regions:
allocator.realloc(region, 4)
for region in regions:
allocator.realloc(region, 6)
for region in regions:
allocator.dealloc(region)
assert (allocator.get_free_size() == allocator.capacity) |
def prototype_view(proto, org_members):
def prototype_user_view(user):
return {'name': user.username, 'is_robot': user.robot, 'kind': 'user', 'is_org_member': (user.robot or (user.username in org_members)), 'avatar': avatar.get_data_for_user(user)}
if proto.delegate_user:
delegate_view = prototype_user_view(proto.delegate_user)
else:
delegate_view = {'name': proto.delegate_team.name, 'kind': 'team', 'avatar': avatar.get_data_for_team(proto.delegate_team)}
return {'activating_user': (prototype_user_view(proto.activating_user) if proto.activating_user else None), 'delegate': delegate_view, 'role': proto.role.name, 'id': proto.uuid} |
def is_torch_tpu_available():
if (not _torch_available):
return False
if (importlib.util.find_spec('torch_xla') is None):
return False
if (importlib.util.find_spec('torch_xla.core') is None):
return False
return (importlib.util.find_spec('torch_xla.core.xla_model') is not None) |
def evaluate_one_dataset(LOG, dataloader, model, opt, save=True, give_return=True, epoch=0):
start = time.time()
image_paths = np.array(dataloader.dataset.image_list)
with torch.no_grad():
(F1, NMI, recall_at_ks, feature_matrix_all) = aux.eval_metrics_one_dataset(model, dataloader, device=opt.device, k_vals=opt.k_vals, opt=opt)
result_str = ', '.join(('{0}: {1:.4f}'.format(k, rec) for (k, rec) in zip(opt.k_vals, recall_at_ks)))
result_str = 'Epoch (Test) {0}: NMI [{1:.4f}] | F1 [{2:.4f}] | Recall [{3}]'.format(epoch, NMI, F1, result_str)
if (LOG is not None):
if save:
if ((not len(LOG.progress_saver['val']['Recall 1'])) or (recall_at_ks[0] > np.max(LOG.progress_saver['val']['Recall 1']))):
aux.set_checkpoint(model, opt, LOG.progress_saver, (LOG.prop.save_path + '/checkpoint.pth.tar'))
aux.recover_closest_one_dataset(feature_matrix_all, image_paths, (LOG.prop.save_path + '/sample_recoveries.png'))
LOG.log('val', LOG.metrics_to_log['val'], ([epoch, np.round((time.time() - start)), NMI, F1] + recall_at_ks))
print(result_str)
if give_return:
return (recall_at_ks, NMI, F1)
else:
None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.