code
stringlengths
281
23.7M
.parametrize('replication_globs, expected_replicated_paths', [(([['**']] * _WORLD_SIZE), ['0/my_stateful/foo', '0/my_stateful/bar', '0/my_stateful/baz/0', '0/my_stateful/baz/1', '0/my_stateful/qux/quux', '0/my_stateful/qux/quuz']), (([['my_stateful/baz/*', 'my_stateful/qux/*']] * _WORLD_SIZE), ['0/my_stateful/baz/0', '0/my_stateful/baz/1', '0/my_stateful/qux/quux', '0/my_stateful/qux/quuz']), ([['my_stateful/foo', 'my_stateful/qux/*'], ['my_stateful/foo', 'my_stateful/bax/*']], ['0/my_stateful/foo'])]) _with_pet(nproc=_WORLD_SIZE) def test_replication_glob(replication_globs: List[List[str]], expected_replicated_paths: List[str], tmp_path: Path) -> None: dist.init_process_group(backend='gloo') app_state = {'my_stateful': _TestStateful()} snapshot = Snapshot.take(path=str(tmp_path), app_state=app_state, replicated=replication_globs[dist.get_rank()]) replicated_paths = [path for (path, entry) in snapshot.get_manifest().items() if is_fully_replicated_entry(entry)] assert (set(replicated_paths) == set(expected_replicated_paths))
class Dev(Cog): def __init__(self, bot: Quotient): self.bot = bot def cog_check(self, ctx: Context): return (ctx.author.id in ctx.config.DEVS) (hidden=True, invoke_without_command=True) async def bl(self, ctx: Context): (await ctx.send_help(ctx.command)) (name='add') async def bl_add(self, ctx: Context, item: (discord.User | int), *, reason: str=None): block_id_type = (BlockIdType.USER if isinstance(item, discord.User) else BlockIdType.GUILD) block_id = (item.id if isinstance(item, discord.User) else item) record = (await BlockList.get_or_none(block_id=block_id, block_id_type=block_id_type)) if record: return (await ctx.error(f'{item} is already blocked.')) (await BlockList.create(block_id=block_id, block_id_type=block_id_type, reason=reason)) self.bot.cache.blocked_ids.add(block_id) (await ctx.success(f'{item} has been blocked.')) (name='remove') async def bl_remove(self, ctx: Context, item: (discord.User | int)): block_id = (item.id if isinstance(item, discord.User) else item) record = (await BlockList.get_or_none(block_id=block_id)) if (not record): return (await ctx.error(f'{item} is not blocked.')) (await record.delete()) self.bot.cache.blocked_ids.remove(block_id) (await ctx.success(f'{item} has been unblocked.')) (hidden=True) async def sync(self, ctx: commands.Context, guilds: commands.Greedy[discord.Object], spec: T.Optional[T.Literal[('~', '*', '^')]]=None) -> None: if (not guilds): if (spec == '~'): synced = (await self.bot.tree.sync(guild=ctx.guild)) elif (spec == '*'): self.bot.tree.copy_global_to(guild=ctx.guild) synced = (await self.bot.tree.sync(guild=ctx.guild)) elif (spec == '^'): self.bot.tree.clear_commands(guild=ctx.guild) (await self.bot.tree.sync(guild=ctx.guild)) synced = [] else: synced = (await self.bot.tree.sync()) (await ctx.send(f"Synced {len(synced)} commands {('globally' if (spec is None) else 'to the current guild.')}")) return ret = 0 for guild in guilds: try: (await self.bot.tree.sync(guild=guild)) except discord.HTTPException: pass else: ret += 1 (await ctx.send(f'Synced the tree to {ret}/{len(guilds)}.')) (hidden=True, invoke_without_command=True) async def botupdate(self, ctx: Context): (await ctx.send_help(ctx.command)) (name='on') async def botmaintenance_on(self, ctx: Context, *, msg: str=None): self.bot.lockdown = True self.bot.lockdown_msg = msg (await ctx.success('Now in maintenance mode')) (await asyncio.sleep(120)) if (not self.bot.lockdown): return (await ctx.error('Lockdown mode has been cancelled')) (await ctx.success('Reloading...')) self.bot.reboot() (name='off') async def botmaintenance_off(self, ctx: Context): (self.bot.lockdown, self.bot.lockdown_msg) = (False, None) (await ctx.success('Okay, stopped reload.')) (hidden=True) async def cmds(self, ctx: Context): total_uses = (await Commands.all().count()) records = (await ctx.db.fetch('SELECT cmd, COUNT(*) AS uses FROM commands GROUP BY cmd ORDER BY uses DESC LIMIT 15 ')) table = PrettyTable() table.field_names = ['Command', 'Invoke Count'] for record in records: table.add_row([record['cmd'], record['uses']]) table = table.get_string() embed = self.bot.embed(ctx, title=f'Command Usage ({total_uses})') embed.description = f'```{table}```' cmds = sum((1 for i in self.bot.walk_commands())) embed.set_footer(text='Total Commands: {} | Invoke rate per minute: {}'.format(cmds, round(get_ipm(ctx.bot), 2))) (await ctx.send(embed=embed)) (hidden=True, invoke_without_command=True, name='history') async def command_history(self, ctx): query = 'SELECT\n CASE failed\n WHEN TRUE THEN cmd || \' [!]\'\n ELSE cmd\n END AS "cmd",\n to_char(used_at, \'Mon DD HH12:MI:SS AM\') AS "invoked",\n user_id,\n guild_id\n FROM commands\n ORDER BY used_at DESC\n LIMIT 15;\n ' (await tabulate_query(ctx, query)) _history.command(name='for') async def command_history_for(self, ctx, days: T.Optional[int]=7, *, command: str): query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT guild_id,\n SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",\n SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"\n FROM commands\n WHERE cmd=$1\n AND used_at > (CURRENT_TIMESTAMP - $2::interval)\n GROUP BY guild_id\n ) AS t\n ORDER BY "total" DESC\n LIMIT 30;\n ' (await tabulate_query(ctx, query, command, datetime.timedelta(days=days))) _history.command(name='guild', aliases=['server']) async def command_history_guild(self, ctx, guild_id: int): query = 'SELECT\n CASE failed\n WHEN TRUE THEN cmd || \' [!]\'\n ELSE cmd\n END AS "cmd",\n channel_id,\n user_id,\n used_at\n FROM commands\n WHERE guild_id=$1\n ORDER BY used_at DESC\n LIMIT 15;\n ' (await tabulate_query(ctx, query, guild_id)) _history.command(name='user', aliases=['member']) async def command_history_user(self, ctx, user_id: int): query = 'SELECT\n CASE failed\n WHEN TRUE THEN cmd || \' [!]\'\n ELSE cmd\n END AS "cmd",\n guild_id,\n used_at\n FROM commands\n WHERE user_id=$1\n ORDER BY used_at DESC\n LIMIT 20;\n ' (await tabulate_query(ctx, query, user_id)) _history.command(name='cog') async def command_history_cog(self, ctx, days: T.Optional[int]=7, *, cog: str=None): interval = datetime.timedelta(days=days) if (cog is not None): cog = self.bot.get_cog(cog) if (cog is None): return (await ctx.send(f'Unknown cog: {cog}')) query = 'SELECT *, t.success + t.failed AS "total"\n FROM (\n SELECT command,\n SUM(CASE WHEN failed THEN 0 ELSE 1 END) AS "success",\n SUM(CASE WHEN failed THEN 1 ELSE 0 END) AS "failed"\n FROM commands\n WHERE cmd = any($1::text[])\n AND used_at > (CURRENT_TIMESTAMP - $2::interval)\n GROUP BY cmd\n ) AS t\n ORDER BY "total" DESC\n LIMIT 30;\n ' return (await tabulate_query(ctx, query, [c.qualified_name for c in cog.walk_commands()], interval))
class TestBernoulli(QiskitAquaTestCase): def setUp(self): super().setUp() warnings.filterwarnings(action='ignore', category=DeprecationWarning) self._statevector = QuantumInstance(backend=BasicAer.get_backend('statevector_simulator'), seed_simulator=2, seed_transpiler=2) self._unitary = QuantumInstance(backend=BasicAer.get_backend('unitary_simulator'), shots=1, seed_simulator=42, seed_transpiler=91) def qasm(shots=100): return QuantumInstance(backend=BasicAer.get_backend('qasm_simulator'), shots=shots, seed_simulator=2, seed_transpiler=2) self._qasm = qasm def tearDown(self): super().tearDown() warnings.filterwarnings(action='always', category=DeprecationWarning) ([[0.2, AmplitudeEstimation(2), {'estimation': 0.5, 'mle': 0.2}], [0.4, AmplitudeEstimation(4), {'estimation': 0.30866, 'mle': 0.4}], [0.82, AmplitudeEstimation(5), {'estimation': 0.85355, 'mle': 0.82}], [0.49, AmplitudeEstimation(3), {'estimation': 0.5, 'mle': 0.49}], [0.2, MaximumLikelihoodAmplitudeEstimation(2), {'estimation': 0.2}], [0.4, MaximumLikelihoodAmplitudeEstimation(4), {'estimation': 0.4}], [0.82, MaximumLikelihoodAmplitudeEstimation(5), {'estimation': 0.82}], [0.49, MaximumLikelihoodAmplitudeEstimation(3), {'estimation': 0.49}], [0.2, IterativeAmplitudeEstimation(0.1, 0.1), {'estimation': 0.2}], [0.4, IterativeAmplitudeEstimation(1e-05, 0.01), {'estimation': 0.4}], [0.82, IterativeAmplitudeEstimation(1e-05, 0.05), {'estimation': 0.82}], [0.49, IterativeAmplitudeEstimation(0.001, 0.01), {'estimation': 0.49}]]) def test_statevector(self, prob, qae, expect): qae.a_factory = BernoulliAFactory(prob) qae.q_factory = BernoulliQFactory(qae.a_factory) result = qae.run(self._statevector) for (key, value) in expect.items(): self.assertAlmostEqual(value, result[key], places=3, msg='estimate `{}` failed'.format(key)) ([[0.2, 100, AmplitudeEstimation(4), {'estimation': 0.14644, 'mle': 0.193888}], [0.0, 1000, AmplitudeEstimation(2), {'estimation': 0.0, 'mle': 0.0}], [0.8, 10, AmplitudeEstimation(7), {'estimation': 0.79784, 'mle': 0.801612}], [0.2, 100, MaximumLikelihoodAmplitudeEstimation(4), {'estimation': 0.199606}], [0.4, 1000, MaximumLikelihoodAmplitudeEstimation(6), {'estimation': 0.399488}], [0.2, 100, IterativeAmplitudeEstimation(0.0001, 0.01), {'estimation': 0.199987}], [0.4, 1000, IterativeAmplitudeEstimation(0.001, 0.05), {'estimation': 0.400071}], [0.8, 10, IterativeAmplitudeEstimation(0.1, 0.05), {'estimation': 0.811711}]]) def test_qasm(self, prob, shots, qae, expect): qae.a_factory = BernoulliAFactory(prob) qae.q_factory = BernoulliQFactory(qae.a_factory) result = qae.run(self._qasm(shots)) for (key, value) in expect.items(): self.assertAlmostEqual(value, result[key], places=3, msg='estimate `{}` failed'.format(key)) (True, False) def test_qae_circuit(self, efficient_circuit): prob = 0.5 for m in range(2, 7): warnings.filterwarnings('ignore', category=DeprecationWarning) qae = AmplitudeEstimation(m, a_factory=BernoulliAFactory(prob)) angle = (2 * np.arcsin(np.sqrt(prob))) q_ancilla = QuantumRegister(m, 'a') q_objective = QuantumRegister(1, 'q') circuit = QuantumCircuit(q_ancilla, q_objective) for i in range(m): circuit.h(q_ancilla[i]) circuit.ry(angle, q_objective) if efficient_circuit: qae.q_factory = BernoulliQFactory(qae.a_factory) for power in range(m): circuit.cry(((2 * (2 ** power)) * angle), q_ancilla[power], q_objective[0]) else: q_factory = QFactory(qae.a_factory, i_objective=0) for power in range(m): for _ in range((2 ** power)): q_factory.build_controlled(circuit, q_objective, q_ancilla[power]) warnings.filterwarnings('always', category=DeprecationWarning) iqft = QFT(m, do_swaps=False).inverse().reverse_bits() circuit.append(iqft.to_instruction(), q_ancilla) expected_unitary = self._unitary.execute(circuit).get_unitary() actual_circuit = qae.construct_circuit(measurement=False) actual_unitary = self._unitary.execute(actual_circuit).get_unitary() diff = np.sum(np.abs((actual_unitary - expected_unitary))) self.assertAlmostEqual(diff, 0) (True, False) def test_iqae_circuits(self, efficient_circuit): prob = 0.5 for k in range(2, 7): warnings.filterwarnings('ignore', category=DeprecationWarning) qae = IterativeAmplitudeEstimation(0.01, 0.05, a_factory=BernoulliAFactory(prob)) angle = (2 * np.arcsin(np.sqrt(prob))) q_objective = QuantumRegister(1, 'q') circuit = QuantumCircuit(q_objective) circuit.ry(angle, q_objective) if efficient_circuit: qae.q_factory = BernoulliQFactory(qae.a_factory) circuit.ry(((2 * k) * angle), q_objective[0]) else: q_factory = QFactory(qae.a_factory, i_objective=0) for _ in range(k): q_factory.build(circuit, q_objective) warnings.filterwarnings('always', category=DeprecationWarning) expected_unitary = self._unitary.execute(circuit).get_unitary() actual_circuit = qae.construct_circuit(k, measurement=False) actual_unitary = self._unitary.execute(actual_circuit).get_unitary() diff = np.sum(np.abs((actual_unitary - expected_unitary))) self.assertAlmostEqual(diff, 0) (True, False) def test_mlae_circuits(self, efficient_circuit): prob = 0.5 for k in range(1, 7): warnings.filterwarnings('ignore', category=DeprecationWarning) qae = MaximumLikelihoodAmplitudeEstimation(k, a_factory=BernoulliAFactory(prob)) angle = (2 * np.arcsin(np.sqrt(prob))) circuits = [] q_objective = QuantumRegister(1, 'q') circuit = QuantumCircuit(q_objective) circuit.ry(angle, q_objective) circuits += [circuit] for power in range(k): q_objective = QuantumRegister(1, 'q') circuit = QuantumCircuit(q_objective) circuit.ry(angle, q_objective) if efficient_circuit: qae.q_factory = BernoulliQFactory(qae.a_factory) circuit.ry(((2 * (2 ** power)) * angle), q_objective[0]) else: q_factory = QFactory(qae.a_factory, i_objective=0) for _ in range((2 ** power)): q_factory.build(circuit, q_objective) warnings.filterwarnings('always', category=DeprecationWarning) actual_circuits = qae.construct_circuits(measurement=False) for (actual, expected) in zip(actual_circuits, circuits): expected_unitary = self._unitary.execute(expected).get_unitary() actual_unitary = self._unitary.execute(actual).get_unitary() diff = np.sum(np.abs((actual_unitary - expected_unitary))) self.assertAlmostEqual(diff, 0)
def _perform_date_checks(self, date_checks): errors = {} for (model_class, lookup_type, field, unique_for) in date_checks: lookup_kwargs = {} date = getattr(self, unique_for) if (date is None): continue if (lookup_type == 'date'): lookup_kwargs[('%s__day' % unique_for)] = date.day lookup_kwargs[('%s__month' % unique_for)] = date.month lookup_kwargs[('%s__year' % unique_for)] = date.year else: lookup_kwargs[('%s__%s' % (unique_for, lookup_type))] = getattr(date, lookup_type) lookup_kwargs[field] = getattr(self, field) qs = model_class._default_manager.filter(**lookup_kwargs) if ((not self._state.adding) and (self.pk is not None)): qs = qs.exclude(pk=self.pk) if qs.exists(): errors.setdefault(field, []).append(self.date_error_message(lookup_type, field, unique_for)) return errors
class EchoesRemoteConnector(PrimeRemoteConnector): def __init__(self, version: EchoesDolVersion, executor: MemoryOperationExecutor): super().__init__(version, executor) def _asset_id_format(self): return '>I' def multiworld_magic_item(self) -> ItemResourceInfo: return self.game.resource_database.get_item(echoes_items.MULTIWORLD_ITEM) async def current_game_status(self) -> tuple[(bool, (Region | None))]: cstate_manager_global = self.version.cstate_manager_global asset_id_size = struct.calcsize(self._asset_id_format()) mlvl_offset = 4 cplayer_offset = 5372 memory_ops = [MemoryOperation(self.version.game_state_pointer, offset=mlvl_offset, read_byte_count=asset_id_size), MemoryOperation((cstate_manager_global + 2), read_byte_count=1), MemoryOperation((cstate_manager_global + cplayer_offset), offset=0, read_byte_count=4)] results = (await self.executor.perform_memory_operations(memory_ops)) pending_op_byte = results[memory_ops[1]] has_pending_op = (pending_op_byte != b'\x00') return (has_pending_op, self._current_status_world(results.get(memory_ops[0]), results.get(memory_ops[2]))) async def _memory_op_for_items(self, items: list[ItemResourceInfo]) -> list[MemoryOperation]: player_state_pointer = (self.version.cstate_manager_global + 5388) return [MemoryOperation(address=player_state_pointer, offset=_echoes_powerup_offset(item.extra['item_id']), read_byte_count=8) for item in items] async def _patches_for_pickup(self, provider_name: str, pickup: PickupEntry, inventory: Inventory): (item_name, resources_to_give) = self._resources_to_give_for_pickup(pickup, inventory) self.logger.debug(f'Resource changes for {pickup.name} from {provider_name}: {resources_to_give}') patches = [all_prime_dol_patches.adjust_item_amount_and_capacity_patch(self.version.powerup_functions, self.version.game, item.extra['item_id'], delta) for (item, delta) in resources_to_give.as_resource_gain()] return (patches, format_received_item(item_name, provider_name)) def _resources_to_give_for_pickup(self, pickup: PickupEntry, inventory: Inventory) -> tuple[(str, ResourceCollection)]: (item_name, resources_to_give) = super()._resources_to_give_for_pickup(pickup, inventory) resources_to_give.remove_resource(self.game.resource_database.get_item('Percent')) return (item_name, resources_to_give) async def get_inventory(self) -> Inventory: inventory = (await super().get_inventory()) PASSES = 2 arr_raws = [(await self.executor.perform_single_memory_operation(MemoryOperation(address=(((self.version.cstate_manager_global + 2240) + 4) + 12), read_byte_count=(4 * (32 // PASSES)), offset=((i * 4) * (32 // PASSES))))) for i in range(PASSES)] arr = struct.unpack('>32L', b''.join(arr_raws)) count = 0 for i in range(1024): f0 = arr[(i // 32)] f4 = (1 << (i % 32)) if ((f4 & f0) != 0): count += 1 inventory[self.game.resource_database.get_item('ObjectCount')] = InventoryItem(count, 1024) return inventory
class HBaseCollector(diamond.collector.Collector): re_log = re.compile('^(?P<timestamp>\\d+) (?P<name>\\S+): (?P<metrics>.*)$') def get_default_config_help(self): config_help = super(HBaseCollector, self).get_default_config_help() config_help.update({'metrics': 'List of paths to process metrics from'}) return config_help def get_default_config(self): config = super(HBaseCollector, self).get_default_config() config.update({'path': 'hbase', 'metrics': ['/var/log/hbase/*.metrics']}) return config def collect(self): metrics = self.config['metrics'] if (not isinstance(metrics, list)): metrics = [str(metrics)] for pattern in metrics: for filename in glob.glob(pattern): self.collect_from(filename) def collect_from(self, filename): if (not os.access(filename, os.R_OK)): self.log.error('HBaseCollector unable to read "%s"', filename) return False fd = open(filename, 'r+') for line in fd: match = self.re_log.match(line) if (not match): continue metrics = {} data = match.groupdict() for metric in data['metrics'].split(','): metric = metric.strip() if ('=' in metric): (key, value) = metric.split('=', 1) metrics[key] = value for metric in metrics.keys(): try: if (data['name'] == 'jvm.metrics'): path = self.get_metric_path('.'.join([data['name'], metrics['hostName'].replace('.', '_'), metrics['processName'].replace(' ', '_'), metric])) elif (data['name'] == 'mapred.job'): path = self.get_metric_path('.'.join([data['name'], metrics['hostName'].replace('.', '_'), metrics['group'].replace(' ', '_'), metrics['counter'].replace(' ', '_'), metric])) elif (data['name'] == 'rpc.metrics'): if (metric == 'port'): continue path = self.get_metric_path('.'.join([data['name'], metrics['hostName'].replace('.', '_'), metrics['port'], metric])) else: path = self.get_metric_path('.'.join([data['name'], metric])) value = float(metrics[metric]) self.publish_metric(Metric(path, value, timestamp=(int(data['timestamp']) / 1000))) except ValueError: pass fd.seek(0) fd.truncate() fd.close()
('/v1/user/starred') class StarredRepositoryList(ApiResource): schemas = {'NewStarredRepository': {'type': 'object', 'required': ['namespace', 'repository'], 'properties': {'namespace': {'type': 'string', 'description': 'Namespace in which the repository belongs'}, 'repository': {'type': 'string', 'description': 'Repository name'}}}} ('listStarredRepos') _args() _user_admin() _support() def get(self, page_token, parsed_args): repo_query = model.repository.get_user_starred_repositories(get_authenticated_user()) (repos, next_page_token) = model.modelutil.paginate(repo_query, RepositoryTable, page_token=page_token, limit=REPOS_PER_PAGE) def repo_view(repo_obj): return {'namespace': repo_obj.namespace_user.username, 'name': repo_obj.name, 'description': repo_obj.description, 'is_public': model.repository.is_repository_public(repo_obj)} return ({'repositories': [repo_view(repo) for repo in repos]}, next_page_token) _scope(scopes.READ_REPO) ('createStar') _json_request('NewStarredRepository') _user_admin() def post(self): user = get_authenticated_user() req = request.get_json() namespace = req['namespace'] repository = req['repository'] repo = model.repository.get_repository(namespace, repository) if repo: try: model.repository.star_repository(user, repo) except IntegrityError: pass return ({'namespace': namespace, 'repository': repository}, 201)
.parametrize(('test_input', 'expected'), [(nodes.reference('', text='', refuri='mailto:'), '<raw format="html" xml:space="preserve">user&#32;&#97;t&#32;example.com</raw>'), (nodes.reference('', text='Introduction', refid='introduction'), '<reference refid="introduction">Introduction</reference>')]) def test_generate_list_url(test_input, expected): out = pep_zero._mask_email(test_input) assert (str(out) == expected)
def collect_results_cpu(result_part, size, tmpdir=None): (rank, world_size) = get_dist_info() if (tmpdir is None): MAX_LEN = 512 dir_tensor = torch.full((MAX_LEN,), 32, dtype=torch.uint8, device='cuda') if (rank == 0): mmcv.mkdir_or_exist('.dist_test') tmpdir = tempfile.mkdtemp(dir='.dist_test') tmpdir = torch.tensor(bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') dir_tensor[:len(tmpdir)] = tmpdir dist.broadcast(dir_tensor, 0) tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() else: mmcv.mkdir_or_exist(tmpdir) mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) dist.barrier() if (rank != 0): return None else: part_list = [] for i in range(world_size): part_file = osp.join(tmpdir, f'part_{i}.pkl') part_result = mmcv.load(part_file) if part_result: part_list.append(part_result) ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) ordered_results = ordered_results[:size] shutil.rmtree(tmpdir) return ordered_results
(python=USE_PYTHON_VERSIONS) ('command_a', install_commands) ('command_b', install_commands) def session_cross_pkg_resources_pkgutil(session, command_a, command_b): session.install('--upgrade', 'setuptools', 'pip') install_packages(session, 'pkg_resources/pkg_a', 'pkgutil/pkg_b', command_a, command_b) session.run('python', 'verify_packages.py')
def distort_color(image): def fn1(): return contrast(saturation(brightness(image))) def fn2(): return saturation(contrast(brightness(image))) def fn3(): return contrast(brightness(saturation(image))) def fn4(): return brightness(contrast(saturation(image))) def fn5(): return saturation(brightness(contrast(image))) def fn6(): return brightness(saturation(contrast(image))) def fn(): return image cond = random_num(6) image = tf.cond(tf.equal(cond, 1), fn1, fn) image = tf.cond(tf.equal(cond, 2), fn2, fn) image = tf.cond(tf.equal(cond, 3), fn3, fn) image = tf.cond(tf.equal(cond, 4), fn4, fn) image = tf.cond(tf.equal(cond, 5), fn5, fn) image = tf.cond(tf.equal(cond, 6), fn6, fn) return image
class BaseInstance(Instance): def __init__(self, instance_id, weight, input, output): super().__init__(instance_id, weight, input, output) def size(self): return len(self.input) def duplicate(self): dup = BaseInstance(self.instance_id, self.weight, self.input, self.output) return dup def removeOutput(self): self.output = None def removePrediction(self): self.prediction = None def get_input(self): return self.input def get_output(self): return self.output def get_prediction(self): return self.prediction def set_prediction(self, prediction): self.prediction = prediction def has_output(self): return (self.output != None) def has_prediction(self): return (self.prediction != None) def __str__(self): return ((((('input:' + str(self.input)) + '\toutput:') + str(self.output)) + ' is_labeled:') + str(self.is_labeled))
class ResnetUtilsTest(tf.test.TestCase): def testSubsampleThreeByThree(self): x = tf.reshape(tf.to_float(tf.range(9)), [1, 3, 3, 1]) x = resnet_utils.subsample(x, 2) expected = tf.reshape(tf.constant([0, 2, 6, 8]), [1, 2, 2, 1]) with self.test_session(): self.assertAllClose(x.eval(), expected.eval()) def testSubsampleFourByFour(self): x = tf.reshape(tf.to_float(tf.range(16)), [1, 4, 4, 1]) x = resnet_utils.subsample(x, 2) expected = tf.reshape(tf.constant([0, 2, 8, 10]), [1, 2, 2, 1]) with self.test_session(): self.assertAllClose(x.eval(), expected.eval()) def testConv2DSameEven(self): (n, n2) = (4, 2) x = create_test_input(1, n, n, 1) w = create_test_input(1, 3, 3, 1) w = tf.reshape(w, [3, 3, 1, 1]) tf.get_variable('Conv/weights', initializer=w) tf.get_variable('Conv/biases', initializer=tf.zeros([1])) tf.get_variable_scope().reuse_variables() y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv') y1_expected = tf.to_float([[14, 28, 43, 26], [28, 48, 66, 37], [43, 66, 84, 46], [26, 37, 46, 22]]) y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = tf.to_float([[14, 43], [43, 84]]) y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv') y3_expected = y2_expected y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv') y4_expected = tf.to_float([[48, 37], [37, 22]]) y4_expected = tf.reshape(y4_expected, [1, n2, n2, 1]) with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval()) def testConv2DSameOdd(self): (n, n2) = (5, 3) x = create_test_input(1, n, n, 1) w = create_test_input(1, 3, 3, 1) w = tf.reshape(w, [3, 3, 1, 1]) tf.get_variable('Conv/weights', initializer=w) tf.get_variable('Conv/biases', initializer=tf.zeros([1])) tf.get_variable_scope().reuse_variables() y1 = slim.conv2d(x, 1, [3, 3], stride=1, scope='Conv') y1_expected = tf.to_float([[14, 28, 43, 58, 34], [28, 48, 66, 84, 46], [43, 66, 84, 102, 55], [58, 84, 102, 120, 64], [34, 46, 55, 64, 30]]) y1_expected = tf.reshape(y1_expected, [1, n, n, 1]) y2 = resnet_utils.subsample(y1, 2) y2_expected = tf.to_float([[14, 43, 34], [43, 84, 55], [34, 55, 30]]) y2_expected = tf.reshape(y2_expected, [1, n2, n2, 1]) y3 = resnet_utils.conv2d_same(x, 1, 3, stride=2, scope='Conv') y3_expected = y2_expected y4 = slim.conv2d(x, 1, [3, 3], stride=2, scope='Conv') y4_expected = y2_expected with self.test_session() as sess: sess.run(tf.global_variables_initializer()) self.assertAllClose(y1.eval(), y1_expected.eval()) self.assertAllClose(y2.eval(), y2_expected.eval()) self.assertAllClose(y3.eval(), y3_expected.eval()) self.assertAllClose(y4.eval(), y4_expected.eval()) def _resnet_plain(self, inputs, blocks, output_stride=None, scope=None): with tf.variable_scope(scope, values=[inputs]): with slim.arg_scope([slim.conv2d], outputs_collections='end_points'): net = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) end_points = dict(tf.get_collection('end_points')) return (net, end_points) def testEndPointsV2(self): bottleneck = resnet_v2.bottleneck blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 1)])] inputs = create_test_input(2, 32, 16, 3) with slim.arg_scope(resnet_utils.resnet_arg_scope()): (_, end_points) = self._resnet_plain(inputs, blocks, scope='tiny') expected = ['tiny/block1/unit_1/bottleneck_v2/shortcut', 'tiny/block1/unit_1/bottleneck_v2/conv1', 'tiny/block1/unit_1/bottleneck_v2/conv2', 'tiny/block1/unit_1/bottleneck_v2/conv3', 'tiny/block1/unit_2/bottleneck_v2/conv1', 'tiny/block1/unit_2/bottleneck_v2/conv2', 'tiny/block1/unit_2/bottleneck_v2/conv3', 'tiny/block2/unit_1/bottleneck_v2/shortcut', 'tiny/block2/unit_1/bottleneck_v2/conv1', 'tiny/block2/unit_1/bottleneck_v2/conv2', 'tiny/block2/unit_1/bottleneck_v2/conv3', 'tiny/block2/unit_2/bottleneck_v2/conv1', 'tiny/block2/unit_2/bottleneck_v2/conv2', 'tiny/block2/unit_2/bottleneck_v2/conv3'] self.assertItemsEqual(expected, end_points) def _stack_blocks_nondense(self, net, blocks): for block in blocks: with tf.variable_scope(block.scope, 'block', [net]): for (i, unit) in enumerate(block.args): (depth, depth_bottleneck, stride) = unit with tf.variable_scope(('unit_%d' % (i + 1)), values=[net]): net = block.unit_fn(net, depth=depth, depth_bottleneck=depth_bottleneck, stride=stride, rate=1) return net def _atrousValues(self, bottleneck): blocks = [resnet_utils.Block('block1', bottleneck, [(4, 1, 1), (4, 1, 2)]), resnet_utils.Block('block2', bottleneck, [(8, 2, 1), (8, 2, 2)]), resnet_utils.Block('block3', bottleneck, [(16, 4, 1), (16, 4, 2)]), resnet_utils.Block('block4', bottleneck, [(32, 8, 1), (32, 8, 1)])] nominal_stride = 8 height = 30 width = 31 with slim.arg_scope(resnet_utils.resnet_arg_scope()): with slim.arg_scope([slim.batch_norm], is_training=False): for output_stride in [1, 2, 4, 8, None]: with tf.Graph().as_default(): with self.test_session() as sess: tf.set_random_seed(0) inputs = create_test_input(1, height, width, 3) output = resnet_utils.stack_blocks_dense(inputs, blocks, output_stride) if (output_stride is None): factor = 1 else: factor = (nominal_stride // output_stride) output = resnet_utils.subsample(output, factor) tf.get_variable_scope().reuse_variables() expected = self._stack_blocks_nondense(inputs, blocks) sess.run(tf.global_variables_initializer()) (output, expected) = sess.run([output, expected]) self.assertAllClose(output, expected, atol=0.0001, rtol=0.0001) def testAtrousValuesBottleneck(self): self._atrousValues(resnet_v2.bottleneck)
def test_while_with_if_complex() -> None: src = "\n while n > 10:\n print(n)\n if n > 20:\n print('hi')\n elif n > 25:\n continue\n print('unr')\n else:\n break\n print('bye')\n print('after')\n " cfg = build_cfg(src) (unreachable, reachable) = extract_blocks(cfg) assert ({"print('bye')", "print('unr')"} == unreachable) assert ({'n > 10', 'print(n)', '', "print('hi')", 'break', "print('after')", 'continue', 'n > 25'} == reachable)
class SelfAttention(nn.Module): def __init__(self, out_channels, embed_dim, num_heads, project_input=False, gated=False, downsample=False): super().__init__() self.attention = DownsampledMultiHeadAttention(out_channels, embed_dim, num_heads, dropout=0, bias=True, project_input=project_input, gated=gated, downsample=downsample) self.in_proj_q = Linear(out_channels, embed_dim) self.in_proj_k = Linear(out_channels, embed_dim) self.in_proj_v = Linear(out_channels, embed_dim) self.ln = LayerNorm(out_channels) def forward(self, x): residual = x query = self.in_proj_q(x) key = self.in_proj_k(x) value = self.in_proj_v(x) (x, _) = self.attention(query, key, value, mask_future_timesteps=True, use_scalar_bias=True) return self.ln((x + residual))
class Water(unittest.TestCase): def setUpClass(cls): mol = gto.Mole() mol.verbose = 4 mol.output = '/dev/null' mol.atom = '\n O 0.00000 0.00000 0.11779\n H 0.00000 0.75545 -0.47116\n H 0.00000 -0.75545 -0.47116\n ' mol.pseudo = 'gth-hf-rev' mol.basis = 'cc-pvdz' mol.precision = 1e-10 mol.build() mf = scf.RHF(mol).run() cls.mol = mol cls.mf = mf def tearDownClass(cls): cls.mol.stdout.close() del cls.mol, cls.mf def kernel(self, CC, **kwargs): mcc = CC(self.mf, **kwargs) eris = mcc.ao2mo() mcc.kernel(eris=eris) et = CCSD_T(mcc, eris=eris) return (mcc.e_corr, et) ('fail due to updates of pp_int?') def test_fno_by_thresh(self): threshs = [0.01, 0.001, 0.0001] refs = [[(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)]] for (thresh, ref) in zip(threshs, refs): (eccsd, et) = self.kernel(cc.FNOCCSD, thresh=thresh) self.assertAlmostEqual(eccsd, ref[0], 6) self.assertAlmostEqual(et, ref[1], 6) (eccsd0, et0) = self.kernel(cc.CCSD) (eccsd, et) = self.kernel(cc.FNOCCSD, thresh=1e-100) self.assertAlmostEqual(eccsd, eccsd0, 6) self.assertAlmostEqual(et, et0, 6) ('fail due to updates of pp_int?') def test_fno_by_thresh_frozen(self): threshs = [0.01, 0.001, 0.0001] refs = [[(- 0.), (- 0.)], [(- 0.), (- 0.)], [(- 0.), (- 0.)]] for (thresh, ref) in zip(threshs, refs): (eccsd, et) = self.kernel(cc.FNOCCSD, thresh=thresh, frozen=1) self.assertAlmostEqual(eccsd, ref[0], 6) self.assertAlmostEqual(et, ref[1], 6) (eccsd0, et0) = self.kernel(cc.CCSD, frozen=1) (eccsd, et) = self.kernel(cc.FNOCCSD, thresh=1e-100, frozen=1) self.assertAlmostEqual(eccsd, eccsd0, 6) self.assertAlmostEqual(et, et0, 6)
def test_non_top_portmap(do_test): def tv_in(m, tv): m.in_ = Bits32(tv[0]) def tv_out(m, tv): if (tv[1] != '*'): assert (m.out == Bits32(tv[1])) class VReg(Component, VerilogPlaceholder): def construct(s): s.in_ = InPort(Bits32) s.out = OutPort(Bits32) s.set_metadata(VerilogPlaceholderPass.port_map, {s.in_: 'd', s.out: 'q'}) class Top(Component): def construct(s): s.in_ = InPort(Bits32) s.out = OutPort(Bits32) s.v = VReg() s.v.in_ //= s.in_ s.v.out //= s.out a = Top() a._tvs = [[1, '*'], [2, 1], [(- 1), 2], [(- 2), (- 1)], [42, (- 2)], [(- 42), 42]] a._tv_in = tv_in a._tv_out = tv_out do_test(a)
def count_gates(qobj, basis, qubits): warn('The function `count_gates` will be deprecated. Gate count is integrated into `gates_per_clifford` function.', category=DeprecationWarning) nexp = len(qobj.experiments) ngates = np.zeros([nexp, len(qubits), len(basis)], dtype=int) basis_ind = {basis[i]: i for i in range(len(basis))} for i in range(nexp): for instr in qobj.experiments[i].instructions: if (instr.name in basis): for (qind, qubit) in enumerate(qubits): if (qubit in instr.qubits): ngates[i][qind][basis_ind[instr.name]] += 1 return ngates
def get_extensions(): this_dir = os.path.dirname(os.path.abspath(__file__)) extra_compile_args = {'cxx': []} define_macros = [] if (torch.cuda.is_available() and (CUDA_HOME is not None)): define_macros += [('WITH_CUDA', None)] extra_compile_args['nvcc'] = ['-DCUDA_HAS_FP16=1', '-D__CUDA_NO_HALF_OPERATORS__', '-D__CUDA_NO_HALF_CONVERSIONS__', '-D__CUDA_NO_HALF2_OPERATORS__'] else: raise NotImplementedError('Cuda is not availabel') ext_modules = [CUDAExtension('roi_align.Align1D', ['roi_align/src/roi_align_cuda.cpp', 'roi_align/src/roi_align_kernel.cu'])] return ext_modules
def selfdestruct_eip150(computation: ComputationAPI) -> None: beneficiary = force_bytes_to_address(computation.stack_pop1_bytes()) if (not computation.state.account_exists(beneficiary)): computation.consume_gas(constants.GAS_SELFDESTRUCT_NEWACCOUNT, reason=mnemonics.SELFDESTRUCT) _selfdestruct(computation, beneficiary)
def add_global_options(parser): group = parser.add_argument_group('global options') group.add_argument('-v', dest='verbosity', help='Set verbosity (log levels)', default='') group.add_argument('-V', dest='verbosity_shortcuts', help='Set verbosity (shortcut-filter list)', default='') group.add_argument('-D', '--dir', dest='electrum_path', help='electrum directory') group.add_argument('-P', '--portable', action='store_true', dest='portable', default=False, help="Use local 'electrum_data' directory") group.add_argument('--testnet', action='store_true', dest='testnet', default=False, help='Use Testnet') group.add_argument('--regtest', action='store_true', dest='regtest', default=False, help='Use Regtest') group.add_argument('--simnet', action='store_true', dest='simnet', default=False, help='Use Simnet') group.add_argument('-o', '--offline', action='store_true', dest='offline', default=False, help='Run offline')
def main(): running_reward = 10 for i_episode in count(1): (state, _) = env.reset() ep_reward = 0 for t in range(1, 10000): action = select_action(state) (state, reward, done, _, _) = env.step(action) if args.render: env.render() policy.rewards.append(reward) ep_reward += reward if done: break running_reward = ((0.05 * ep_reward) + ((1 - 0.05) * running_reward)) finish_episode() if ((i_episode % args.log_interval) == 0): print('Episode {}\tLast reward: {:.2f}\tAverage reward: {:.2f}'.format(i_episode, ep_reward, running_reward)) if (running_reward > env.spec.reward_threshold): print('Solved! Running reward is now {} and the last episode runs to {} time steps!'.format(running_reward, t)) break
_on_failure .parametrize('number_of_nodes', [2]) def test_channel_withdraw_expired(raiden_network: List[RaidenService], network_wait: float, number_of_nodes: int, token_addresses: List[TokenAddress], deposit: TokenAmount, retry_timeout: float, pfs_mock) -> None: (alice_app, bob_app) = raiden_network pfs_mock.add_apps(raiden_network) token_address = token_addresses[0] token_network_address = views.get_token_network_address_by_token_address(views.state_from_raiden(alice_app), alice_app.default_registry.address, token_address) assert token_network_address msg = 'hold event handler necessary to control messages' assert isinstance(alice_app.raiden_event_handler, HoldRaidenEventHandler), msg assert isinstance(alice_app.message_handler, WaitForMessage), msg msg = 'hold event handler necessary to control messages' assert isinstance(bob_app.raiden_event_handler, HoldRaidenEventHandler), msg assert isinstance(bob_app.message_handler, WaitForMessage), msg send_withdraw_confirmation_event = alice_app.raiden_event_handler.hold(SendWithdrawConfirmation, {}) alice_to_bob_amount = PaymentAmount(10) total_withdraw = WithdrawAmount((deposit + alice_to_bob_amount)) wait_for_withdraw_expired_message = alice_app.message_handler.wait_for_message(WithdrawExpired, {'total_withdraw': total_withdraw}) identifier = PaymentID(1) target = TargetAddress(bob_app.address) secret = factories.make_secret() payment_status = alice_app.mediated_transfer_async(token_network_address=token_network_address, amount=alice_to_bob_amount, target=target, identifier=identifier, secret=secret, route_states=[create_route_state_for_route([alice_app, bob_app], token_address)]) wait_for_unlock = bob_app.message_handler.wait_for_message(Unlock, {'payment_identifier': identifier}) with block_offset_timeout(alice_app): wait_for_unlock.get() msg = f'transfer from {to_checksum_address(alice_app.address)} to {to_checksum_address(bob_app.address)} failed.' assert payment_status.payment_done.get(), msg bob_alice_channel_state = get_channelstate(bob_app, alice_app, token_network_address) alice_metadata = pfs_mock.query_address_metadata(bob_app.config.pfs_config, alice_app.address) bob_app.withdraw(canonical_identifier=bob_alice_channel_state.canonical_identifier, total_withdraw=total_withdraw, recipient_metadata=alice_metadata) with block_offset_timeout(bob_app): send_withdraw_confirmation_event.wait() bob_alice_channel_state = get_channelstate(bob_app, alice_app, token_network_address) assert (bob_alice_channel_state.our_total_withdraw == total_withdraw) assert (bob_alice_channel_state.our_state.withdraws_pending.get(total_withdraw) is not None) alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_address) assert (alice_bob_channel_state.partner_total_withdraw == total_withdraw) assert (alice_bob_channel_state.partner_state.withdraws_pending.get(total_withdraw) is not None) withdraw_expiration = bob_alice_channel_state.our_state.withdraws_pending[total_withdraw].expiration expiration_threshold = channel.get_sender_expiration_threshold(withdraw_expiration) waiting.wait_for_block(raiden=bob_app, block_number=BlockNumber((expiration_threshold + 1)), retry_timeout=retry_timeout) bob_alice_channel_state = get_channelstate(bob_app, alice_app, token_network_address) assert (bob_alice_channel_state.our_total_withdraw == 0) assert (bob_alice_channel_state.our_state.withdraws_pending.get(total_withdraw) is None) with gevent.Timeout((network_wait * number_of_nodes)): wait_for_withdraw_expired_message.wait() alice_bob_channel_state = get_channelstate(alice_app, bob_app, token_network_address) assert (alice_bob_channel_state.partner_total_withdraw == 0) assert (alice_bob_channel_state.partner_state.withdraws_pending.get(total_withdraw) is None)
class TestURIReferenceParsesURIs(base.BaseTestParsesURIs): test_class = URIReference def test_authority_info_raises_InvalidAuthority(self, invalid_uri): uri = URIReference.from_string(invalid_uri) with pytest.raises(InvalidAuthority): uri.authority_info() def test_attributes_catch_InvalidAuthority(self, invalid_uri): uri = URIReference.from_string(invalid_uri) assert (uri.host is None) assert (uri.userinfo is None) assert (uri.port is None) def test_handles_absolute_path_uri(self, absolute_path_uri): uri = URIReference.from_string(absolute_path_uri) assert (uri.path == absolute_path_uri) assert (uri.authority_info() == {'userinfo': None, 'host': None, 'port': None}) def test_scheme_and_path_uri_is_valid(self, scheme_and_path_uri): uri = self.test_class.from_string(scheme_and_path_uri) assert (uri.is_valid() is True) def test_handles_scheme_and_path_uri(self, scheme_and_path_uri): uri = self.test_class.from_string(scheme_and_path_uri) assert (uri.path == '') assert (uri.scheme == 'mailto') assert (uri.query is None) assert (uri.host is None) assert (uri.port is None) assert (uri.userinfo is None) assert (uri.authority is None) def test_parses_ipv6_to_path(self): uri = self.test_class.from_string('[::1]') assert (uri.scheme is None) assert (uri.authority is None) assert (uri.path == '[::1]')
def generate_optimal_model(): env = SM_env(max_hidden_block=HIDDEN_BLOCK, attacker_fraction=ALPHA, follower_fraction=GAMMA) grid = 100 optimal_policy_all = np.zeros((grid, env._state_space_n), dtype=np.int) for i in range(grid): cur_env = SM_env(max_hidden_block=HIDDEN_BLOCK, attacker_fraction=((i * 1.0) / grid), follower_fraction=GAMMA, dev=0) print(i) optimal_policy_all[i] = cur_env.optimal_mdp_solver() output = open('optimal_policy.txt', 'w') print(grid, file=output) for i in range(grid): for j in range(optimal_policy_all.shape[1]): print(optimal_policy_all[(i, j)], end=' ', file=output) print(file=output) output.close() output = open('optimal_policy_visual.txt', 'w') for i in range(optimal_policy_all.shape[1]): s = env._index_to_name(i) if ((s[0] > 8) or (s[1] > 8)): continue print(s, end=' ', file=output) for j in range(grid): if ((j == 0) or (optimal_policy_all[(j, i)] != optimal_policy_all[((j - 1), i)])): print((j * (1.0 / grid)), env.mapped_name_of_action(i, optimal_policy_all[(j, i)]), end=' ', file=output) print(file=output) output.close() return optimal_policy_all
def is_html(ct_headers, url=None, allow_xhtml=False): if (not ct_headers): return is_html_file_extension(url, allow_xhtml) headers = split_header_words(ct_headers) if (len(headers) < 1): return is_html_file_extension(url, allow_xhtml) first_header = headers[0] first_parameter = first_header[0] ct = first_parameter[0] html_types = ['text/html'] if allow_xhtml: html_types += ['text/xhtml', 'text/xml', 'application/xml', 'application/xhtml+xml'] return (ct in html_types)
class TestCluster(unittest.TestCase): def test_multi_sites(self): struc = pyxtal() struc.from_random(0, 1, ['C'], [60], 1.0) self.assertTrue(struc.valid) struc = pyxtal() struc.from_random(0, 3, ['C'], [60], 1.0) self.assertTrue(struc.valid) def test_single_specie(self): struc = pyxtal() struc.from_random(0, 'Ih', ['C'], [60], 1.0) self.assertTrue(struc.valid) def test_mutiple_species(self): struc = pyxtal() struc.from_random(0, 4, ['Mo', 'S'], [2, 4], 1.0) self.assertTrue(struc.valid)
def blocks(tmin, tmax, deltat, nsamples_block=100000): tblock = nice_time_tick_inc_approx_secs(util.to_time_float((deltat * nsamples_block))) iblock_min = int(math.floor((tmin / tblock))) iblock_max = int(math.ceil((tmax / tblock))) for iblock in range(iblock_min, iblock_max): (yield ((iblock * tblock), ((iblock + 1) * tblock)))
class SearchParams(BaseModel, extra='forbid'): hnsw_ef: Optional[int] = Field(default=None, description='Params relevant to HNSW index Size of the beam in a beam-search. Larger the value - more accurate the result, more time required for search.') exact: Optional[bool] = Field(default=False, description='Search without approximation. If set to true, search may run long but with exact results.') quantization: Optional['QuantizationSearchParams'] = Field(default=None, description='Quantization params') indexed_only: Optional[bool] = Field(default=False, description='If enabled, the engine will only perform search among indexed or small segments. Using this option prevents slow searches in case of delayed index, but does not guarantee that all uploaded vectors will be included in search results')
class nnUNetTrainer_DASegOrd0(nnUNetTrainer): def get_dataloaders(self): patch_size = self.configuration_manager.patch_size dim = len(patch_size) deep_supervision_scales = self._get_deep_supervision_scales() (rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes) = self.configure_rotation_dummyDA_mirroring_and_inital_patch_size() tr_transforms = self.get_training_transforms(patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug, order_resampling_data=3, order_resampling_seg=0, use_mask_for_norm=self.configuration_manager.use_mask_for_norm, is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, regions=(self.label_manager.foreground_regions if self.label_manager.has_regions else None), ignore_label=self.label_manager.ignore_label) val_transforms = self.get_validation_transforms(deep_supervision_scales, is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.all_labels, regions=(self.label_manager.foreground_regions if self.label_manager.has_regions else None), ignore_label=self.label_manager.ignore_label) (dl_tr, dl_val) = self.get_plain_dataloaders(initial_patch_size, dim) allowed_num_processes = get_allowed_n_proc_DA() if (allowed_num_processes == 0): mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms) mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms) else: mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, dl_tr, tr_transforms, allowed_num_processes, 6, None, True, 0.02) mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, dl_val, val_transforms, max(1, (allowed_num_processes // 2)), 3, None, True, 0.02) return (mt_gen_train, mt_gen_val)
class PointsTable(QuotientView): def __init__(self, ctx: Context): super().__init__(ctx, timeout=100) self.teams: T.List[Team] = [] self.header: str = None self.footer: str = None def initial_msg(self): _e = discord.Embed(color=self.bot.color, title='Points Table Maker') _e.description = (((('S.No. ' + 'Team Name'.ljust(22)) + 'Place Pts'.ljust(5)) + 'Kills'.ljust(5)) + 'Total\n```\n') for (idx, team) in enumerate(self.teams, 1): _e.description += f'''{idx:02}. {team.name.ljust(22)} {str(team.placepts).ljust(5)} {str(team.kills).ljust(5)}{str(team.totalpts)} ''' _e.description += '```' _e.set_footer(text=f'''Header: {(self.header or 'Not Set')} Footer: {(self.footer or 'Not Set')}''') return _e async def refresh_view(self): self.message = (await self.message.edit(embed=self.initial_msg, view=self)) .button(label='Title & Footer') async def set_title(self, inter: discord.Interaction, btn: discord.Button): modal = ... (await inter.response.send_modal()) (await modal.wait()) self.header = modal.header.value self.footer = modal.footer.value (await self.refresh_view()) .button(label='Add Team') async def add_team(self, inter: discord.Interaction, btn: discord.Button): modal = TeamInput() (await inter.response.send_modal(modal)) (await modal.wait()) (kills, placepts) = (None, None) with suppress(ValueError): kills = int(modal.kills.value) placepts = int(modal.placepts.value) if (not all((kills, placepts))): return (await self.ctx.error('Invalid input', 5)) self.teams.append(Team(name=modal.team_name.value, matches=modal.matches.value, kills=kills, placepts=placepts, totalpts=(kills + placepts))) (await self.refresh_view()) .button(label='Remove Team') async def remove_team(self, inter: discord.Interaction, btn: discord.Button): (await inter.response.defer()) if (not self.teams): return (await self.ctx.error('No teams to remove.', 5)) v = QuotientView(self.ctx) v.add_item(TeamSelector(self.teams)) v.message = (await inter.followup.send('', view=v, ephemeral=True)) (await v.wait()) for _ in self.teams: if (str(_.id) in v.custom_id): self.teams.remove(_) (await self.refresh_view()) .button(label='Create Image') async def create_image(self, inter: discord.Interaction, btn: discord.Button): (await inter.response.send_message(self.teams))
class attrlist_t(ctypes.Structure): _fields_ = (('bitmapcount', ctypes.c_ushort), ('reserved', ctypes.c_uint16), ('commonattr', ctypes.c_uint32), ('volattr', ctypes.c_uint32), ('dirattr', ctypes.c_uint32), ('fileattr', ctypes.c_uint32), ('forkattr', ctypes.c_uint32)) def __init__(self, ql, base): self.ql = ql self.base = base def updateToMem(self): self.ql.mem.write(self.base, bytes(self)) def loadFromMem(self): data = self.ql.mem.read(self.base, ctypes.sizeof(self)) newObj = type(self).from_buffer(data) newObj.ql = self.ql newObj.base = self.base return newObj
def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs, qid_to_has_ans, out_image_dir): if (out_image_dir and (not os.path.exists(out_image_dir))): os.makedirs(out_image_dir) num_true_pos = sum((1 for v in qid_to_has_ans.values() if v)) if (num_true_pos == 0): return pr_exact = make_precision_recall_eval(exact_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_exact.png'), title='Precision-Recall curve for Exact Match score') pr_f1 = make_precision_recall_eval(f1_raw, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_f1.png'), title='Precision-Recall curve for F1 score') oracle_scores = {k: float(v) for (k, v) in qid_to_has_ans.items()} pr_oracle = make_precision_recall_eval(oracle_scores, na_probs, num_true_pos, qid_to_has_ans, out_image=os.path.join(out_image_dir, 'pr_oracle.png'), title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)') merge_eval(main_eval, pr_exact, 'pr_exact') merge_eval(main_eval, pr_f1, 'pr_f1') merge_eval(main_eval, pr_oracle, 'pr_oracle')
def render_pep8_errors_e306(msg, _node, source_lines=None): line = (msg.line - 1) (yield from render_context((line - 1), (line + 1), source_lines)) body = source_lines[line] indentation = (len(body) - len(body.lstrip())) (yield (None, slice(None, None), LineType.ERROR, ((body[:indentation] + NEW_BLANK_LINE_MESSAGE) + (' ' * indentation)))) (yield from render_context(msg.line, (msg.line + 2), source_lines))
def content_type_to_writer_kwargs(content_type: str) -> Dict[(str, Any)]: if (content_type == ContentType.UNESCAPED_TSV.value): return {'sep': '\t', 'header': False, 'na_rep': [''], 'line_terminator': '\n', 'quoting': csv.QUOTE_NONE, 'index': False} if (content_type == ContentType.TSV.value): return {'sep': '\t', 'header': False, 'line_terminator': '\n', 'index': False} if (content_type == ContentType.CSV.value): return {'sep': ',', 'header': False, 'line_terminator': '\n', 'index': False} if (content_type == ContentType.PSV.value): return {'sep': '|', 'header': False, 'line_terminator': '\n', 'index': False} if (content_type == ContentType.PARQUET.value): return {'index': False} if (content_type == ContentType.FEATHER.value): return {} if (content_type == ContentType.JSON.value): return {'index': False} raise ValueError(f'Unsupported content type: {content_type}')
class TwRwSparseFeaturesDist(BaseSparseFeaturesDist[KeyedJaggedTensor]): def __init__(self, pg: dist.ProcessGroup, local_size: int, features_per_rank: List[int], feature_hash_sizes: List[int], device: Optional[torch.device]=None, has_feature_processor: bool=False, need_pos: bool=False) -> None: super().__init__() assert ((pg.size() % local_size) == 0), 'currently group granularity must be node' self._world_size: int = pg.size() self._local_size: int = local_size self._num_cross_nodes: int = (self._world_size // self._local_size) feature_block_sizes = [math.ceil((hash_size / self._local_size)) for hash_size in feature_hash_sizes] self._sf_staggered_shuffle: List[int] = self._staggered_shuffle(features_per_rank) self.register_buffer('_feature_block_sizes_tensor', torch.tensor(feature_block_sizes, device=device, dtype=torch.int32)) self.register_buffer('_sf_staggered_shuffle_tensor', torch.tensor(self._sf_staggered_shuffle, device=device, dtype=torch.int32)) self._dist = KJTAllToAll(pg=pg, splits=features_per_rank, stagger=self._num_cross_nodes) self._has_feature_processor = has_feature_processor self._need_pos = need_pos def forward(self, sparse_features: KeyedJaggedTensor) -> Awaitable[Awaitable[KeyedJaggedTensor]]: bucketized_features = bucketize_kjt_before_all2all(sparse_features, num_buckets=self._local_size, block_sizes=self._feature_block_sizes_tensor, output_permute=False, bucketize_pos=(self._has_feature_processor if (sparse_features.weights_or_none() is None) else self._need_pos))[0].permute(self._sf_staggered_shuffle, self._sf_staggered_shuffle_tensor) return self._dist(bucketized_features) def _staggered_shuffle(self, features_per_rank: List[int]) -> List[int]: nodes = (self._world_size // self._local_size) features_per_node = [features_per_rank[(node * self._local_size)] for node in range(nodes)] node_offsets = ([0] + list(itertools.accumulate(features_per_node))) num_features = node_offsets[(- 1)] return [((bucket * num_features) + feature) for node in range(nodes) for bucket in range(self._local_size) for feature in range(node_offsets[node], node_offsets[(node + 1)])]
class SpeechT5Processor(ProcessorMixin): feature_extractor_class = 'SpeechT5FeatureExtractor' tokenizer_class = 'SpeechT5Tokenizer' def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, *args, **kwargs): audio = kwargs.pop('audio', None) text = kwargs.pop('text', None) text_target = kwargs.pop('text_target', None) audio_target = kwargs.pop('audio_target', None) sampling_rate = kwargs.pop('sampling_rate', None) if ((audio is not None) and (text is not None)): raise ValueError('Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?') if ((audio_target is not None) and (text_target is not None)): raise ValueError('Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?') if ((audio is None) and (audio_target is None) and (text is None) and (text_target is None)): raise ValueError('You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.') if (audio is not None): inputs = self.feature_extractor(audio, *args, sampling_rate=sampling_rate, **kwargs) elif (text is not None): inputs = self.tokenizer(text, **kwargs) else: inputs = None if (audio_target is not None): audio_target_features = self.feature_extractor(*args, audio_target=audio_target, sampling_rate=sampling_rate, **kwargs) if (inputs is None): return audio_target_features else: inputs['labels'] = audio_target_features['input_values'] inputs['stop_labels'] = audio_target_features['stop_labels'] decoder_attention_mask = audio_target_features.get('attention_mask') if (decoder_attention_mask is not None): inputs['decoder_attention_mask'] = decoder_attention_mask if (text_target is not None): encodings_target = self.tokenizer(text_target, **kwargs) if (inputs is None): return encodings_target else: inputs['labels'] = encodings_target['input_ids'] decoder_attention_mask = encodings_target.get('attention_mask') if (decoder_attention_mask is not None): inputs['decoder_attention_mask'] = decoder_attention_mask return inputs def pad(self, *args, **kwargs): input_features = kwargs.pop('input_features', None) labels = kwargs.pop('labels', None) if (len(args) > 0): input_features = args[0] args = args[1:] if (input_features is not None): input_features = self.feature_extractor.pad(input_features, *args, **kwargs) if (labels is not None): labels = self.tokenizer.pad(labels, **kwargs) if (labels is None): return input_features elif (input_features is None): return labels else: input_features['labels'] = labels['input_ids'] return input_features def batch_decode(self, *args, **kwargs): return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): return self.tokenizer.decode(*args, **kwargs)
def delete_old_venv(venv_dir: pathlib.Path) -> None: if (not venv_dir.exists()): return markers = [(venv_dir / '.tox-config1'), (venv_dir / 'pyvenv.cfg'), (venv_dir / 'Scripts'), (venv_dir / 'bin')] if (not any((m.exists() for m in markers))): raise Error('{} does not look like a virtualenv, cowardly refusing to remove it.'.format(venv_dir)) print_command('rm -r', venv_dir, venv=False) shutil.rmtree(venv_dir)
def _get_suffixes_dawg_data(endings, ending_counts, min_ending_freq): counted_suffixes_dawg_data = [] for ending in endings: if (ending_counts[ending] < min_ending_freq): continue for POS in endings[ending]: common_form_counts = largest_elements(iterable=endings[ending][POS].items(), key=_pick_second_item, n=1) for (form, cnt) in common_form_counts: counted_suffixes_dawg_data.append((ending, ((cnt,) + form))) return counted_suffixes_dawg_data
def make_pose_ren_net(output_dir, iter_num=2, test_iter_num=3): for iter_idx in xrange(1, (iter_num + 1)): with open('{}/model/train_nyu_pose_ren_s{}.prototxt'.format(output_dir, iter_idx), 'w') as f: f.write(pose_ren_net('train', iter_idx, output_dir)) with open('{}/model/solver_nyu_pose_ren_s{}.prototxt'.format(output_dir, iter_idx), 'w') as f: f.write(make_solver(iter_idx, output_dir)) for iter_idx in xrange(1, (test_iter_num + 1)): with open('{}/model/test_nyu_pose_ren_s{}_trainset.prototxt'.format(output_dir, iter_idx), 'w') as f: f.write(pose_ren_net('test-train', iter_idx, output_dir)) with open('{}/model/test_nyu_pose_ren_s{}_testset.prototxt'.format(output_dir, iter_idx), 'w') as f: f.write(pose_ren_net('test-test', iter_idx, output_dir))
def generate_experiment(trainable_class, variant_spec, command_line_args): params = variant_spec.get('algorithm_params') local_dir = os.path.join(params.get('log_dir'), params.get('domain')) resources_per_trial = _normalize_trial_resources(command_line_args.resources_per_trial, command_line_args.trial_cpus, command_line_args.trial_gpus, command_line_args.trial_extra_cpus, command_line_args.trial_extra_gpus) experiment_id = params.get('exp_name') if ('pool_load_max_size' in variant_spec['algorithm_params']['kwargs']): max_size = variant_spec['algorithm_params']['kwargs']['pool_load_max_size'] experiment_id = '{}_{}e3'.format(experiment_id, int((max_size / 1000))) variant_spec = add_command_line_args_to_variant_spec(variant_spec, command_line_args) if (command_line_args.video_save_frequency is not None): assert ('algorithm_params' in variant_spec) variant_spec['algorithm_params']['kwargs']['video_save_frequency'] = command_line_args.video_save_frequency def create_trial_name_creator(trial_name_template=None): if (not trial_name_template): return None def trial_name_creator(trial): return trial_name_template.format(trial=trial) return tune.function(trial_name_creator) experiment = {'run': trainable_class, 'resources_per_trial': resources_per_trial, 'config': variant_spec, 'local_dir': local_dir, 'num_samples': command_line_args.num_samples, 'upload_dir': command_line_args.upload_dir, 'checkpoint_freq': variant_spec['run_params']['checkpoint_frequency'], 'checkpoint_at_end': variant_spec['run_params']['checkpoint_at_end'], 'trial_name_creator': create_trial_name_creator(command_line_args.trial_name_template), 'restore': command_line_args.restore} return (experiment_id, experiment)
def mandatory_keys_exists(config_type: str) -> bool: config = ConfigParser() config.read(freshenv_config_location) if ('personal' in config_type): if ('provider' not in config[config_type]): return False if ('aws_profile' not in config[config_type]): return False if ('bucket' not in config[config_type]): return False elif ('freshenv' in config_type): if ('apikey' not in config[config_type]): return False else: return False return True
def test_assert_raises_on_assertthis_not_equals_lists(): context = Context({'assert': {'this': [1, 2, 8, 4.5], 'equals': [1, 2, 3, 4.5]}}) with pytest.raises(AssertionError) as err_info: assert_step.run_step(context) assert (str(err_info.value) == "assert assert['this'] is of type list and does not equal assert['equals'] of type list.")
class CheckpointFunction(torch.autograd.Function): def forward(ctx, run_function, parent_ctx_dict, kwarg_keys, *args): if torch.is_grad_enabled(): checkpoint.check_backward_validity(args) ctx.run_function = run_function ctx.kwarg_keys = kwarg_keys ctx.fwd_rng_state = utils.get_rng_state() (tensor_inputs, packed_non_tensor_inputs) = split_non_tensors(args) if parent_ctx_dict['offload']: ctx.fwd_device = tuple((x.device for x in tensor_inputs)) ctx.grad_requirements = tuple((x.requires_grad for x in tensor_inputs)) tensor_inputs = tuple((x.cpu() for x in tensor_inputs)) else: (ctx.fwd_device, ctx.grad_requirements) = (None, None) ctx.save_for_backward(*tensor_inputs) ctx.packed_non_tensor_inputs = packed_non_tensor_inputs with torch.no_grad(): (unpacked_args, unpacked_kwargs) = unpack_kwargs(kwarg_keys, args) outputs = run_function(*unpacked_args, **unpacked_kwargs) if isinstance(outputs, torch.Tensor): return outputs else: (outputs, packed_non_tensor_outputs) = split_non_tensors(outputs) parent_ctx_dict['packed_non_tensor_outputs'] = packed_non_tensor_outputs return outputs def backward(ctx, *args): if (not torch.autograd._is_checkpoint_valid()): raise RuntimeError('Checkpointing is not compatible with .grad(), please use .backward() if possible') tensor_inputs: Tuple = ctx.saved_tensors tensor_inputs = checkpoint.detach_variable(tensor_inputs) if (ctx.fwd_device is not None): tensor_inputs = [t.to(ctx.fwd_device[i]) for (i, t) in enumerate(tensor_inputs)] for (i, need_grad) in enumerate(ctx.grad_requirements): tensor_inputs[i].requires_grad = need_grad inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs) bwd_rng_state = utils.get_rng_state() utils.set_rng_state(ctx.fwd_rng_state) with torch.enable_grad(): (unpacked_args, unpacked_kwargs) = unpack_kwargs(ctx.kwarg_keys, inputs) outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs) (tensor_outputs, _) = split_non_tensors(outputs) utils.set_rng_state(bwd_rng_state) outputs_with_grad = [] args_with_grad = [] for i in range(len(tensor_outputs)): if tensor_outputs[i].requires_grad: outputs_with_grad.append(tensor_outputs[i]) args_with_grad.append(args[i]) if (len(outputs_with_grad) == 0): raise RuntimeError('None of the outputs have requires_grad=True, this checkpoint() is not necessary') torch.autograd.backward(outputs_with_grad, args_with_grad) grads = tuple(((inp.grad if isinstance(inp, torch.Tensor) else None) for inp in inputs)) return ((None, None, None) + grads)
class TeacherModelArguments(): teacher_name_or_path: Optional[str] = field(default='roberta-large-mnli', metadata={'help': 'The NLI/zero-shot teacher model to be distilled.'}) hypothesis_template: Optional[str] = field(default='This example is {}.', metadata={'help': 'Template used to turn class names into mock hypotheses for teacher NLI model. Must include {{}}where class name is inserted.'}) teacher_batch_size: Optional[int] = field(default=32, metadata={'help': 'Batch size for generating teacher predictions.'}) multi_label: Optional[bool] = field(default=False, metadata={'help': 'Allow multiple classes to be true rather than forcing them to sum to 1 (sometimes calledmulti-class multi-label classification).'}) temperature: Optional[float] = field(default=1.0, metadata={'help': 'Temperature applied to teacher softmax for distillation.'})
class CalcAddImplantCommand(wx.Command): def __init__(self, fitID, implantInfo, position=None): wx.Command.__init__(self, True, 'Add Implant') self.fitID = fitID self.newImplantInfo = implantInfo self.newPosition = position self.oldImplantInfo = None self.oldPosition = None def Do(self): pyfalog.debug('Doing addition of implant {} to fit {}'.format(self.newImplantInfo, self.fitID)) fit = Fit.getInstance().getFit(self.fitID) if any(((self.newImplantInfo.itemID == i.itemID) for i in fit.implants)): pyfalog.debug('Skipping as such implant is already on the fit') return False newImplant = self.newImplantInfo.toImplant() if (newImplant is None): return False (self.oldPosition, self.oldImplantInfo) = fit.implants.makeRoom(newImplant) if (self.newPosition is not None): fit.implants.insert(self.newPosition, newImplant) if (newImplant not in fit.implants): pyfalog.warning('Failed to insert to list') cmd = CalcAddImplantCommand(fitID=self.fitID, implantInfo=self.oldImplantInfo, position=self.oldPosition) cmd.Do() return False else: fit.implants.append(newImplant) if (newImplant not in fit.implants): pyfalog.warning('Failed to append to list') cmd = CalcAddImplantCommand(fitID=self.fitID, implantInfo=self.oldImplantInfo, position=self.oldPosition) cmd.Do() return False self.newPosition = fit.implants.index(newImplant) return True def Undo(self): pyfalog.debug('Undo addition of implant {} to fit {}'.format(self.newImplantInfo, self.fitID)) if ((self.oldImplantInfo is not None) and (self.oldPosition is not None)): cmd = CalcAddImplantCommand(fitID=self.fitID, implantInfo=self.oldImplantInfo, position=self.oldPosition) return cmd.Do() from .remove import CalcRemoveImplantCommand cmd = CalcRemoveImplantCommand(fitID=self.fitID, position=self.newPosition) return cmd.Do()
.parametrize('change_default', [None, 'ini', 'cmdline']) def test_removed_in_x_warning_as_error(pytester: Pytester, change_default) -> None: pytester.makepyfile('\n import warnings, pytest\n def test():\n warnings.warn(pytest.PytestRemovedIn8Warning("some warning"))\n ') if (change_default == 'ini'): pytester.makeini('\n [pytest]\n filterwarnings =\n ignore::pytest.PytestRemovedIn8Warning\n ') args = (('-Wignore::pytest.PytestRemovedIn8Warning',) if (change_default == 'cmdline') else ()) result = pytester.runpytest(*args) if (change_default is None): result.stdout.fnmatch_lines(['* 1 failed in *']) else: assert (change_default in ('ini', 'cmdline')) result.stdout.fnmatch_lines(['* 1 passed in *'])
def fix_database_local_govt_mdm(sqlite_file): print('Editing database', sqlite_file) conn = sqlite3.connect(sqlite_file) conn.text_factory = (lambda b: b.decode(errors='ignore')) c = conn.cursor() data_command = f'INSERT INTO Council_Tax ( council_tax_id, cmi_cross_ref_id ) VALUES ( 6, 102 );' c.execute(data_command) data_command = f'INSERT INTO Council_Tax ( council_tax_id, cmi_cross_ref_id ) VALUES ( 10, 105 );' c.execute(data_command) conn.commit() conn.close()
class Socks5(BaseProtocol): async def guess(self, reader, **kw): header = (await reader.read_w(1)) if (header == b'\x05'): return True reader.rollback(header) async def accept(self, reader, user, writer, users, authtable, **kw): methods = (await reader.read_n((await reader.read_n(1))[0])) user = authtable.authed() if (users and ((not user) or (b'\x00' not in methods))): if (b'\x02' not in methods): raise Exception(f'Unauthorized SOCKS') writer.write(b'\x05\x02') assert ((await reader.read_n(1))[0] == 1), 'Unknown SOCKS auth' u = (await reader.read_n((await reader.read_n(1))[0])) p = (await reader.read_n((await reader.read_n(1))[0])) user = ((u + b':') + p) if (user not in users): raise Exception(f'Unauthorized SOCKS {u}:{p}') writer.write(b'\x01\x00') elif (users and (not user)): raise Exception(f'Unauthorized SOCKS') else: writer.write(b'\x05\x00') if users: authtable.set_authed(user) assert ((await reader.read_n(3)) == b'\x05\x01\x00'), 'Unknown SOCKS protocol' header = (await reader.read_n(1)) (host_name, port, data) = (await socks_address_stream(reader, header[0])) writer.write(((b'\x05\x00\x00' + header) + data)) return (user, host_name, port) async def connect(self, reader_remote, writer_remote, rauth, host_name, port, **kw): if rauth: writer_remote.write(b'\x05\x01\x02') assert ((await reader_remote.read_n(2)) == b'\x05\x02') writer_remote.write((b'\x01' + b''.join((packstr(i) for i in rauth.split(b':', 1))))) assert ((await reader_remote.read_n(2)) == b'\x01\x00'), 'Unknown SOCKS auth' else: writer_remote.write(b'\x05\x01\x00') assert ((await reader_remote.read_n(2)) == b'\x05\x00') writer_remote.write(((b'\x05\x01\x00\x03' + packstr(host_name.encode())) + port.to_bytes(2, 'big'))) assert ((await reader_remote.read_n(3)) == b'\x05\x00\x00') header = (await reader_remote.read_n(1))[0] (await reader_remote.read_n((6 if (header == 1) else (18 if (header == 4) else ((await reader_remote.read_n(1))[0] + 2))))) def udp_accept(self, data, **kw): reader = io.BytesIO(data) if (reader.read(3) != b'\x00\x00\x00'): return n = reader.read(1)[0] if (n not in (1, 3, 4)): return (host_name, port) = socks_address(reader, n) return (True, host_name, port, reader.read()) def udp_connect(self, rauth, host_name, port, data, **kw): return (((b'\x00\x00\x00\x03' + packstr(host_name.encode())) + port.to_bytes(2, 'big')) + data)
class Voc2007Classification(data.Dataset): def __init__(self, root, set, transform=None, target_transform=None): self.root = root self.path_devkit = os.path.join(root, 'VOCdevkit') self.path_images = os.path.join(root, 'VOCdevkit', 'VOC2007', 'JPEGImages') self.set = set self.transform = transform self.target_transform = target_transform download_voc2007(self.root) path_csv = os.path.join(self.root, 'files', 'VOC2007') file_csv = os.path.join(path_csv, (('classification_' + set) + '.csv')) if (not os.path.exists(file_csv)): if (not os.path.exists(path_csv)): os.makedirs(path_csv) labeled_data = read_object_labels(self.root, 'VOC2007', self.set) write_object_labels_csv(file_csv, labeled_data) self.classes = object_categories self.images = read_object_labels_csv(file_csv) def __getitem__(self, index): (path, target) = self.images[index] img = Image.open(os.path.join(self.path_images, (path + '.jpg'))).convert('RGB') if (self.transform is not None): img = self.transform(img) if (self.target_transform is not None): target = self.target_transform(target) return (img, target) def __len__(self): return len(self.images) def get_number_classes(self): return len(self.classes)
class GRUNetwork(object): def __init__(self, input_shape, output_dim, hidden_dim, hidden_nonlinearity=LN.rectify, output_nonlinearity=None, name=None, input_var=None, input_layer=None): if (input_layer is None): l_in = L.InputLayer(shape=((None, None) + input_shape), input_var=input_var, name='input') else: l_in = input_layer l_step_input = L.InputLayer(shape=((None,) + input_shape)) l_step_prev_hidden = L.InputLayer(shape=(None, hidden_dim)) l_gru = GRULayer(l_in, num_units=hidden_dim, hidden_nonlinearity=hidden_nonlinearity, hidden_init_trainable=False) l_gru_flat = L.ReshapeLayer(l_gru, shape=((- 1), hidden_dim)) l_output_flat = L.DenseLayer(l_gru_flat, num_units=output_dim, nonlinearity=output_nonlinearity) l_output = OpLayer(l_output_flat, op=(lambda flat_output, l_input: flat_output.reshape((l_input.shape[0], l_input.shape[1], (- 1)))), shape_op=(lambda flat_output_shape, l_input_shape: (l_input_shape[0], l_input_shape[1], flat_output_shape[(- 1)])), extras=[l_in]) l_step_hidden = l_gru.get_step_layer(l_step_input, l_step_prev_hidden) l_step_output = L.DenseLayer(l_step_hidden, num_units=output_dim, nonlinearity=output_nonlinearity, W=l_output_flat.W, b=l_output_flat.b) self._l_in = l_in self._hid_init_param = l_gru.h0 self._l_gru = l_gru self._l_out = l_output self._l_step_input = l_step_input self._l_step_prev_hidden = l_step_prev_hidden self._l_step_hidden = l_step_hidden self._l_step_output = l_step_output def input_layer(self): return self._l_in def input_var(self): return self._l_in.input_var def output_layer(self): return self._l_out def step_input_layer(self): return self._l_step_input def step_prev_hidden_layer(self): return self._l_step_prev_hidden def step_hidden_layer(self): return self._l_step_hidden def step_output_layer(self): return self._l_step_output def hid_init_param(self): return self._hid_init_param
def create_cfg(): script_name = 'file_fixtures/my_file.py' dot_file_path = os.path.splitext(os.path.basename(script_name))[0] svg_file_path = (dot_file_path + '.svg') cfg_generator.generate_cfg(mod=script_name, auto_open=False) gv_file_io = open(dot_file_path) (yield (dot_file_path, svg_file_path, gv_file_io)) gv_file_io.close() os.remove(dot_file_path) os.remove(svg_file_path)
class Block_Resnet_GCN(nn.Module): def __init__(self, kernel_size, in_channels, out_channels, stride=1): super(Block_Resnet_GCN, self).__init__() self.conv11 = nn.Conv2d(in_channels, out_channels, bias=False, stride=stride, kernel_size=(kernel_size, 1), padding=((kernel_size // 2), 0)) self.bn11 = nn.BatchNorm2d(out_channels) self.relu11 = nn.ReLU(inplace=True) self.conv12 = nn.Conv2d(out_channels, out_channels, bias=False, stride=stride, kernel_size=(1, kernel_size), padding=(0, (kernel_size // 2))) self.bn12 = nn.BatchNorm2d(out_channels) self.relu12 = nn.ReLU(inplace=True) self.conv21 = nn.Conv2d(in_channels, out_channels, bias=False, stride=stride, kernel_size=(1, kernel_size), padding=(0, (kernel_size // 2))) self.bn21 = nn.BatchNorm2d(out_channels) self.relu21 = nn.ReLU(inplace=True) self.conv22 = nn.Conv2d(out_channels, out_channels, bias=False, stride=stride, kernel_size=(kernel_size, 1), padding=((kernel_size // 2), 0)) self.bn22 = nn.BatchNorm2d(out_channels) self.relu22 = nn.ReLU(inplace=True) def forward(self, x): x1 = self.conv11(x) x1 = self.bn11(x1) x1 = self.relu11(x1) x1 = self.conv12(x1) x1 = self.bn12(x1) x1 = self.relu12(x1) x2 = self.conv21(x) x2 = self.bn21(x2) x2 = self.relu21(x2) x2 = self.conv22(x2) x2 = self.bn22(x2) x2 = self.relu22(x2) x = (x1 + x2) return x
def periodicity(f): (f) def wrapper(business_logic, query): if ('periodicity' in query): periodicity = query['periodicity'][(- 1)] if re.match('[\\d.]+$', periodicity): query['periodicity'] = float(periodicity) else: query['periodicity'] = periodicity return f(business_logic, query) return wrapper
def test_complicated_target_register(): bloq = TestMultiCNOT() cbloq = qlt_testing.assert_valid_bloq_decomposition(bloq) assert (len(cbloq.bloq_instances) == (2 * 3)) binst_graph = _create_binst_graph(cbloq.connections) assert (len(list(nx.topological_generations(binst_graph))) == ((2 * 3) + 2)) (circuit, _) = cbloq.to_cirq_circuit(**get_named_qubits(bloq.signature.lefts())) cirq.testing.assert_has_diagram(circuit, 'control: \n \ntarget[0, 0]: X\n \ntarget[0, 1]: X\n \ntarget[0, 2]: X\n \ntarget[1, 0]: X\n \ntarget[1, 1]: X\n \ntarget[1, 2]: X\n')
class TestRelativeEntropy(): def _simple_relative_entropy_implementation(self, rho, sigma, log_base=np.log, tol=1e-12): (rvals, rvecs) = rho.eigenstates() (svals, svecs) = sigma.eigenstates() rvecs = np.hstack([vec.full() for vec in rvecs]).T svecs = np.hstack([vec.full() for vec in svecs]).T S = 0 for i in range(len(rvals)): if (abs(rvals[i]) >= tol): S += (rvals[i] * log_base(rvals[i])) for j in range(len(svals)): P_ij = (np.dot(rvecs[i], svecs[j].conjugate()) * np.dot(svecs[j], rvecs[i].conjugate())) if ((abs(svals[j]) < tol) and (not ((abs(rvals[i]) < tol) or (abs(P_ij) < tol)))): return np.inf if (abs(svals[j]) >= tol): S -= ((rvals[i] * P_ij) * log_base(svals[j])) return np.real(S) def test_rho_or_sigma_not_oper(self): rho = qutip.bra('00') sigma = qutip.bra('01') with pytest.raises(TypeError) as exc: qutip.entropy_relative(rho.dag(), sigma) assert (str(exc.value) == 'Inputs must be density matrices.') with pytest.raises(TypeError) as exc: qutip.entropy_relative(rho, sigma.dag()) assert (str(exc.value) == 'Inputs must be density matrices.') with pytest.raises(TypeError) as exc: qutip.entropy_relative(rho, sigma) assert (str(exc.value) == 'Inputs must be density matrices.') def test_rho_and_sigma_have_different_shape_and_dims(self): rho = qutip.ket('00') sigma = qutip.ket('0') with pytest.raises(ValueError) as exc: qutip.entropy_relative(rho, sigma) assert (str(exc.value) == 'Inputs must have the same shape and dims.') rho = qutip.basis([2, 3], [0, 0]) sigma = qutip.basis([3, 2], [0, 0]) with pytest.raises(ValueError) as exc: qutip.entropy_relative(rho, sigma) assert (str(exc.value) == 'Inputs must have the same shape and dims.') def test_base_not_2_or_e(self): rho = qutip.ket('00') sigma = qutip.ket('01') with pytest.raises(ValueError) as exc: qutip.entropy_relative(rho, sigma, base=3) assert (str(exc.value) == 'Base must be 2 or e.') def test_infinite_relative_entropy(self): rho = qutip.ket('00') sigma = qutip.ket('01') assert (qutip.entropy_relative(rho, sigma) == np.inf) def test_base_2_or_e(self): rho = qutip.ket2dm(qutip.ket('00')) sigma = (rho + qutip.ket2dm(qutip.ket('01'))) sigma = sigma.unit() assert (qutip.entropy_relative(rho, sigma) == pytest.approx(np.log(2))) assert (qutip.entropy_relative(rho, sigma, base=np.e) == pytest.approx(0.)) assert (qutip.entropy_relative(rho, sigma, base=2) == pytest.approx(1)) def test_pure_vs_maximally_mixed_state(self): rho = qutip.ket('00') sigma = sum((qutip.ket2dm(qutip.ket(psi)) for psi in ['00', '01', '10', '11'])).unit() assert (qutip.entropy_relative(rho, sigma, base=2) == pytest.approx(2)) def test_density_matrices_with_non_real_eigenvalues(self): rho = qutip.ket2dm(qutip.ket('00')) sigma = qutip.ket2dm(qutip.ket('01')) with pytest.raises(ValueError) as exc: qutip.entropy_relative((rho + 1j), sigma) assert (str(exc.value) == 'Input rho has non-real eigenvalues.') with pytest.raises(ValueError) as exc: qutip.entropy_relative((rho - 1j), sigma) assert (str(exc.value) == 'Input rho has non-real eigenvalues.') with pytest.raises(ValueError) as exc: qutip.entropy_relative(rho, (sigma + 1j)) assert (str(exc.value) == 'Input sigma has non-real eigenvalues.') with pytest.raises(ValueError) as exc: qutip.entropy_relative(rho, (sigma - 1j)) assert (str(exc.value) == 'Input sigma has non-real eigenvalues.') .repeat(20) def test_random_dm_with_self(self): rho = qutip.rand_dm(8) rel = qutip.entropy_relative(rho, rho) assert (abs(rel) < 1e-13) .repeat(20) def test_random_rho_sigma(self): rho = qutip.rand_dm(8) sigma = qutip.rand_dm(8) rel = qutip.entropy_relative(rho, sigma) assert (rel >= 0) assert (rel == pytest.approx(self._simple_relative_entropy_implementation(rho, sigma, np.log)))
class TestParsePep440Version(): .parametrize('original, expected', [('0.7.0-alpha.1', '0.7.0a1'), ('0.8.0-alpha.2', '0.8.0a2')]) def test_semver2_to_pep440(self, original: str, expected: str): from reana.reana_dev.utils import parse_pep440_version assert (str(parse_pep440_version(original)) == expected)
('sys.stderr', new_callable=StringIO) ('sys.stdout', new_callable=StringIO) def test_logger_stdout_vs_stderr(mock_stdout, mock_stderr): with temp_logger('pypyr.xxx') as logger: for handler in pypyr.log.logger.get_log_handlers(logging.DEBUG, None): logger.addHandler(handler) logger.setLevel(logging.DEBUG) logger.debug('to debug') logger.info('to info') logger.notify('to notify') logger.warning('to warning') logger.error('to error') logger.critical('to critical') assert (mock_stdout.getvalue() == 'to debug\nto info\nto notify\n') assert (mock_stderr.getvalue() == 'to warning\nto error\nto critical\n')
def power_iteration(W, u_, update=True, eps=1e-12): (us, vs, svs) = ([], [], []) for (i, u) in enumerate(u_): with torch.no_grad(): v = torch.matmul(u, W) v = F.normalize(gram_schmidt(v, vs), eps=eps) vs += [v] u = torch.matmul(v, W.t()) u = F.normalize(gram_schmidt(u, us), eps=eps) us += [u] if update: u_[i][:] = u svs += [torch.squeeze(torch.matmul(torch.matmul(v, W.t()), u.t()))] return (svs, us, vs)
def pretty_seq(args: Sequence[str], conjunction: str) -> str: quoted = [(('"' + a) + '"') for a in args] if (len(quoted) == 1): return quoted[0] if (len(quoted) == 2): return f'{quoted[0]} {conjunction} {quoted[1]}' last_sep = ((', ' + conjunction) + ' ') return ((', '.join(quoted[:(- 1)]) + last_sep) + quoted[(- 1)])
class SettingsCM(object): def __init__(self, database, settings_to_set): self.database = database self.settings_to_set = settings_to_set def __enter__(self): if hasattr(self, 'stored_settings'): raise RuntimeError('cannot re-use setting CMs') self.stored_settings = self.database.settings.getset(self.settings_to_set.keys()) self.database.settings.update(self.settings_to_set) def __exit__(self, typ, val, tb): self.database.settings.update(self.stored_settings)
class Poll(models.Model): question = models.CharField(max_length=200) pub_date = models.DateTimeField('date published') def __unicode__(self): return self.question def was_published_recently(self): now = timezone.now() return ((now - datetime.timedelta(days=1)) <= self.pub_date <= now) was_published_recently.admin_order_field = 'pub_date' was_published_recently.boolean = True was_published_recently.short_description = 'Published recently?'
class Preprocessor(object): def __init__(self, vocab, subgoal_ann=False, is_test_split=False, frame_size=300): self.subgoal_ann = subgoal_ann self.is_test_split = is_test_split self.frame_size = frame_size if (vocab is None): self.vocab = {'word': Vocab(['<<pad>>', '<<seg>>', '<<goal>>', '<<mask>>']), 'action_low': Vocab(['<<pad>>', '<<seg>>', '<<stop>>', '<<mask>>']), 'action_high': Vocab(['<<pad>>', '<<seg>>', '<<stop>>', '<<mask>>'])} else: self.vocab = vocab self.word_seg = self.vocab['word'].word2index('<<seg>>', train=False) def numericalize(vocab, words, train=True): if (not train): new_words = (set(words) - set(vocab.counts.keys())) if new_words: words = [(w if (w not in new_words) else '<<pad>>') for w in words] return vocab.word2index(words, train=train) def process_language(self, ex, traj, r_idx): if (not self.subgoal_ann): goal_ann = ex['turk_annotations']['anns'][r_idx]['task_desc'] instr_anns = ex['turk_annotations']['anns'][r_idx]['high_descs'] goal_ann = revtok.tokenize(py_util.remove_spaces_and_lower(goal_ann)) instr_anns = [revtok.tokenize(py_util.remove_spaces_and_lower(instr_ann)) for instr_ann in instr_anns] goal_ann = [w.strip().lower() for w in goal_ann] instr_anns = [[w.strip().lower() for w in instr_ann] for instr_ann in instr_anns] else: goal_ann = ['<<seg>>'] instr_anns = [([a['action']] + a['action_high_args']) for a in traj['num']['action_high']] instr_anns = [[self.vocab['action_high'].index2word(w) for w in instr_ann] for instr_ann in instr_anns] traj['ann'] = {'goal': (goal_ann + ['<<goal>>']), 'instr': [(instr_ann + ['<<instr>>']) for instr_ann in instr_anns], 'repeat_idx': r_idx} if (not self.subgoal_ann): traj['ann']['instr'] += [['<<stop>>']] if ('num' not in traj): traj['num'] = {} traj['num']['lang_goal'] = self.numericalize(self.vocab['word'], traj['ann']['goal'], train=(not self.is_test_split)) traj['num']['lang_instr'] = [self.numericalize(self.vocab['word'], x, train=(not self.is_test_split)) for x in traj['ann']['instr']] def process_actions(self, ex, traj): self.fix_missing_high_pddl_end_action(ex) end_action = {'api_action': {'action': 'NoOp'}, 'discrete_action': {'action': '<<stop>>', 'args': {}}, 'high_idx': ex['plan']['high_pddl'][(- 1)]['high_idx']} num_hl_actions = len(ex['plan']['high_pddl']) if ('num' not in traj): traj['num'] = {} traj['num']['action_low'] = [list() for _ in range(num_hl_actions)] traj['num']['action_high'] = [] low_to_high_idx = [] for a in (ex['plan']['low_actions'] + [end_action]): high_idx = a['high_idx'] low_to_high_idx.append(high_idx) traj['num']['action_low'][high_idx].append({'high_idx': a['high_idx'], 'action': self.vocab['action_low'].word2index(a['discrete_action']['action'], train=True), 'action_high_args': a['discrete_action']['args']}) if ('bbox' in a['discrete_action']['args']): (xmin, ymin, xmax, ymax) = [(float(x) if (x != 'NULL') else (- 1)) for x in a['discrete_action']['args']['bbox']] traj['num']['action_low'][high_idx][(- 1)]['centroid'] = [((xmin + ((xmax - xmin) / 2)) / self.frame_size), ((ymin + ((ymax - ymin) / 2)) / self.frame_size)] else: traj['num']['action_low'][high_idx][(- 1)]['centroid'] = [(- 1), (- 1)] if ('mask' in a['discrete_action']['args']): mask = a['discrete_action']['args']['mask'] else: mask = None traj['num']['action_low'][high_idx][(- 1)]['mask'] = mask valid_interact = (1 if model_util.has_interaction(a['discrete_action']['action']) else 0) traj['num']['action_low'][high_idx][(- 1)]['valid_interact'] = valid_interact traj['num']['low_to_high_idx'] = low_to_high_idx for a in ex['plan']['high_pddl']: if ('parameter' in a['discrete_action']): low_len = len(a['discrete_action']['parameter']) full_args = [] for l_idx in range(low_len): full_args += a['discrete_action']['parameter'][l_idx]['args'] a['discrete_action']['args'] = [w.strip().lower() for w in full_args] else: a['discrete_action']['args'] = [w.strip().lower() for w in a['discrete_action']['args']] traj['num']['action_high'].append({'high_idx': a['high_idx'], 'action': self.vocab['action_high'].word2index(a['discrete_action']['action'], train=True), 'action_high_args': self.numericalize(self.vocab['action_high'], a['discrete_action']['args'])}) action_low_seg_len = len(traj['num']['action_low']) if self.subgoal_ann: lang_instr_seg_len = len(traj['num']['action_high']) else: lang_instr_seg_len = (len(ex['turk_annotations']['anns'][0]['high_descs']) + 1) seg_len_diff = (action_low_seg_len - lang_instr_seg_len) if (seg_len_diff != 0): assert (seg_len_diff == 1) self.merge_last_two_low_actions(traj) def fix_missing_high_pddl_end_action(self, ex): if (ex['plan']['high_pddl'][(- 1)]['planner_action']['action'] != 'End'): ex['plan']['high_pddl'].append({'discrete_action': {'action': 'NoOp', 'args': []}, 'planner_action': {'value': 1, 'action': 'End'}, 'high_idx': len(ex['plan']['high_pddl'])}) def merge_last_two_low_actions(self, conv): extra_seg = copy.deepcopy(conv['num']['action_low'][(- 2)]) for sub in extra_seg: sub['high_idx'] = conv['num']['action_low'][(- 3)][0]['high_idx'] conv['num']['action_low'][(- 3)].append(sub) del conv['num']['action_low'][(- 2)] conv['num']['action_low'][(- 1)][0]['high_idx'] = (len(conv['plan']['high_pddl']) - 1)
class DonutFeatureExtractor(DonutImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn('The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please use DonutImageProcessor instead.', FutureWarning) super().__init__(*args, **kwargs)
def check_extras(dist, attr, value): try: list(itertools.starmap(_check_extra, value.items())) except (TypeError, ValueError, AttributeError) as e: raise DistutilsSetupError("'extras_require' must be a dictionary whose values are strings or lists of strings containing valid project/version requirement specifiers.") from e
class HeatMap(base.ScriptBase): ARGS_HELP = '<index> <column> <value>' VERSION = '1.0' COPYRIGHT = u'Copyright (c) 2018 PyroScope Project' CMAP_MIN_MAX = 4.0 def add_options(self): super(HeatMap, self).add_options() self.add_bool_option('-o', '--open', help='open the resulting image file in your viewer') def heatmap(self, df, imagefile): import seaborn as sns import matplotlib.ticker as tkr import matplotlib.pyplot as plt from matplotlib.colors import LinearSegmentedColormap sns.set() with sns.axes_style('whitegrid'): (fig, ax) = plt.subplots(figsize=(5, 11)) cmax = max(df[self.args[2]].max(), self.CMAP_MIN_MAX) csteps = {0.0: 'darkred', (0.3 / cmax): 'red', (0.6 / cmax): 'orangered', (0.9 / cmax): 'coral', (1.0 / cmax): 'skyblue', (1.5 / cmax): 'blue', (1.9 / cmax): 'darkblue', (2.0 / cmax): 'darkgreen', (3.0 / cmax): 'green', ((self.CMAP_MIN_MAX - 0.1) / cmax): 'palegreen', 1.0: 'yellow'} cmap = LinearSegmentedColormap.from_list('RdGrYl', sorted(csteps.items()), N=256) dataset = df.pivot(*self.args) sns.heatmap(dataset, mask=dataset.isnull(), annot=False, linewidths=0.5, square=True, ax=ax, cmap=cmap, annot_kws=dict(stretch='condensed')) ax.tick_params(axis='y', labelrotation=30, labelsize=8) plt.savefig(imagefile) def mainloop(self): if (len(self.args) != 3): self.fatal('You MUST provide names for index (row), column, and value!') df = pd.read_json(sys.stdin, orient='records') df[self.args[0]] = df[self.args[0]].str.slice(0, 42) df = df.groupby(self.args[:2], as_index=False).mean() imagefile = 'heatmap.png' self.heatmap(df, imagefile) if self.options.open: subprocess.call('xdg-open {} &'.format(quoted(imagefile)), shell=True)
class KannelBackend(BackendBase): def configure(self, sendsms_url=' sendsms_params=None, charset=None, coding=None, encode_errors=None, delivery_report_url=None, **kwargs): self.sendsms_url = sendsms_url self.sendsms_params = (sendsms_params or {}) self.charset = (charset or 'ascii') self.coding = (coding or 0) self.encode_errors = (encode_errors or 'ignore') self.delivery_report_url = delivery_report_url def prepare_request(self, id_, text, identities, context): kwargs = {'url': self.sendsms_url} query = copy.copy(self.sendsms_params) query['to'] = ' '.join(identities) query['text'] = text.encode(self.charset, self.encode_errors) query['coding'] = self.coding query['charset'] = self.charset if self.delivery_report_url: query['dlr-mask'] = 31 dlr_url_params = (('message_id=%s' % id_), 'status=%d', 'status_text=%A', 'smsc=%i', 'sms_id=%I', 'date_sent=%t', 'identity=%p') dlr_url_params = '&'.join(dlr_url_params) dlr_url = ('%s%s' % (self.delivery_report_url, reverse('kannel-delivery-report'))) query['dlr-url'] = '?'.join([dlr_url, dlr_url_params]) kwargs['params'] = query return kwargs def send(self, id_, text, identities, context=None): logger.debug('Sending message: %s', text) context = (context or {}) kwargs = self.prepare_request(id_, text, identities, context) r = requests.get(**kwargs) if (r.status_code != requests.codes.ok): r.raise_for_status()
def settings_converter(loaded_settings: dict, input_data: str) -> dict[(str, Any)]: if (not input_data): return {} parsed = SETTINGS_DELIMITER.split(input_data) if (not parsed): return {} try: settings = {setting: value for (setting, value) in [part.split('=', maxsplit=1) for part in parsed]} except ValueError: raise BadArgument('The settings provided are not in the correct format.') for setting in settings: if (setting not in loaded_settings): raise BadArgument(f'{setting!r} is not a recognized setting.') type_ = loaded_settings[setting][2] try: parsed_value = parse_value(settings.pop(setting), type_) settings[setting] = parsed_value except (TypeError, ValueError) as e: raise BadArgument(e) return settings
def get_pipeline_definition(pipeline_name, parent=None): logger.debug('starting') pipeline = get_pipeline_yaml(pipeline_name) info = PipelineFileInfo(pipeline_name='', parent=parent, loader=__name__, path=None) definition = PipelineDefinition(pipeline=pipeline, info=info) logger.debug('found %d stages in pipeline.', len(definition.pipeline)) logger.debug('done') return definition
.parametrize('repo_name, extended_repo_names, expected_failure', [('something', False, None), ('something', True, None), ('some/slash', False, Failures.SLASH_REPOSITORY), ('some/slash', True, None), ('some/more/slash', False, Failures.SLASH_REPOSITORY), ('some/more/slash', True, None), pytest.param(('x' * 255), False, None, id='Valid long name'), pytest.param(('x' * 255), True, None, id='Valid long name'), pytest.param(('x' * 256), False, Failures.INVALID_REPOSITORY, id='Name too long'), pytest.param(('x' * 256), True, Failures.INVALID_REPOSITORY, id='Name too long')]) def test_v2_push_reponame(repo_name, extended_repo_names, expected_failure, v2_pusher, v2_puller, basic_images, liveserver_session, app_reloader, liveserver, registry_server_executor): credentials = ('devtable', 'password') with FeatureFlagValue('EXTENDED_REPOSITORY_NAMES', extended_repo_names, registry_server_executor.on(liveserver)): v2_pusher.push(liveserver_session, 'devtable', repo_name, 'latest', basic_images, credentials=credentials, expected_failure=expected_failure) if (expected_failure is None): v2_puller.pull(liveserver_session, 'devtable', repo_name, 'latest', basic_images, credentials=credentials)
class PyramidPooling(nn.Module): def __init__(self, in_channels, sizes=(1, 2, 3, 6), norm_layer=nn.BatchNorm2d, **kwargs): super(PyramidPooling, self).__init__() out_channels = int((in_channels / 4)) self.avgpools = nn.ModuleList() self.convs = nn.ModuleList() for size in sizes: self.avgpools.append(nn.AdaptiveAvgPool2d(size)) self.convs.append(_ConvBNReLU(in_channels, out_channels, 1, norm_layer=norm_layer)) def forward(self, x): size = x.size()[2:] feats = [x] for (avgpool, conv) in zip(self.avgpools, self.convs): feats.append(F.interpolate(conv(avgpool(x)), size, mode='bilinear', align_corners=True)) return torch.cat(feats, dim=1)
_test def test_layer_sharing_at_heterogenous_depth(): x_val = np.random.random((10, 5)) x = Input(shape=(5,)) A = Dense(5, name='A') B = Dense(5, name='B') output = A(B(A(B(x)))) M = Model(x, output) output_val = M.predict(x_val) config = M.get_config() weights = M.get_weights() M2 = Model.from_config(config) M2.set_weights(weights) output_val_2 = M2.predict(x_val) np.testing.assert_allclose(output_val, output_val_2, atol=1e-06)
def chain_species_base(base, basesite, subunit, site1, site2, size, comp=1): _verify_sites(base, basesite) _verify_sites(subunit, site1, site2) if (size <= 0): raise ValueError('size must be an integer greater than 0') if (comp == 1): compbase = base({basesite: 1}) else: compbase = (comp % base({basesite: 1})) if (size == 1): chainlink = (compbase % subunit({site1: 1, site2: None})) elif (size == 2): chainlink = ((compbase % subunit({site1: 1, site2: 2})) % subunit({site1: 2, site2: None})) else: chainbase = compbase chainlink = (chainbase % subunit({site1: 1, site2: 2})) for i in range(2, size): chainlink %= subunit({site1: i, site2: (i + 1)}) chainlink %= subunit({site1: size, site2: None}) chainlink.match_once = True return chainlink
def exec_cmd_in_pod(command, pod_name, namespace, container=None, base_command='bash'): exec_command = [base_command, '-c', command] try: if container: ret = stream(cli.connect_get_namespaced_pod_exec, pod_name, namespace, container=container, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) else: ret = stream(cli.connect_get_namespaced_pod_exec, pod_name, namespace, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) except Exception: return False return ret
def test_from_snc_profiler(): file_name = get_data_file('test_varian_open.prs') x_profile = Profile().from_snc_profiler(file_name, 'tvs') y_profile = Profile().from_snc_profiler(file_name, 'rad') assert np.isclose(x_profile.get_y(0), 45.) assert np.isclose(y_profile.get_y(0), 45.) assert (x_profile.meta['SSD'] == y_profile.meta['SSD'])
class OneofPattern(Pattern): def __init__(self, sub_patterns): self._sub_patterns = sub_patterns def match(self, op, tensor): for sub_pattern in self._sub_patterns: match_result = sub_pattern.match(op, tensor) if (match_result is not None): return match_result return None
def test_requests_pool_one_param(vk): (users, error) = vk_request_one_param_pool(vk, 'users.get', key='user_ids', values=['durov', 'python273'], default_values={'fields': 'city'}) assert (error == {}) assert isinstance(users, dict) assert (users['durov'][0]['city']['id'] == 2) assert (users['python273'][0]['id'] == )
class KnownValues(unittest.TestCase): def test_ip_adc2(self): (e, t_amp1, t_amp2) = myadc.kernel_gs() self.assertAlmostEqual(e, (- 0.), 6) (e, v, p, x, es) = myadc.ip_adc(nroots=3) es.analyze() self.assertAlmostEqual(e[0], 0., 6) self.assertAlmostEqual(e[1], 0., 6) self.assertAlmostEqual(e[2], 0., 6) self.assertAlmostEqual(p[0], 1., 6) self.assertAlmostEqual(p[1], 1., 6) self.assertAlmostEqual(p[2], 1., 6) def test_ip_adc2_oneroot(self): (e, v, p, x) = myadc.kernel() self.assertAlmostEqual(e[0], 0., 6) self.assertAlmostEqual(p[0], 1., 6) def test_ip_adc2x(self): myadc.method = 'adc(2)-x' (e, v, p, x) = myadc.kernel(nroots=3) e_corr = myadc.e_corr self.assertAlmostEqual(e_corr, (- 0.), 6) self.assertAlmostEqual(e[0], 0., 6) self.assertAlmostEqual(e[1], 0., 6) self.assertAlmostEqual(e[2], 0., 6) self.assertAlmostEqual(p[0], 1., 6) self.assertAlmostEqual(p[1], 1., 6) self.assertAlmostEqual(p[2], 1., 6) def test_ip_adc3(self): myadc.method = 'adc(3)' myadc.method_type = 'ip' (e, v, p, x) = myadc.kernel(nroots=3) e_corr = myadc.e_corr self.assertAlmostEqual(e_corr, (- 0.), 6) self.assertAlmostEqual(e[0], 0., 6) self.assertAlmostEqual(e[1], 0., 6) self.assertAlmostEqual(e[2], 0., 6) self.assertAlmostEqual(p[0], 1., 6) self.assertAlmostEqual(p[1], 1., 6) self.assertAlmostEqual(p[2], 1., 6)
def read_dataset(lang_id='eng'): lexicon = read_lexicon(lang_id) phoneme_lst = [] grapheme_lst = [] phoneme_set = set() grapheme_set = set() for (grapheme_str, phonemes) in lexicon.word2phoneme.items(): graphemes = list(grapheme_str) phoneme_lst.append(phonemes) grapheme_lst.append(graphemes) phoneme_set.update(phonemes) grapheme_set.update(graphemes) phoneme_vocab = Vocab(phoneme_set) grapheme_vocab = Vocab(grapheme_set) return Dataset(phoneme_lst, grapheme_lst, phoneme_vocab, grapheme_vocab)
def apply_tree(x, func, path=()): if isinstance(x, Object): for (name, val) in x.inamevals(): if isinstance(val, (list, tuple)): for (iele, ele) in enumerate(val): apply_tree(ele, func, path=(path + ((name, iele),))) elif isinstance(val, dict): for (ele_k, ele_v) in val.items(): apply_tree(ele_v, func, path=(path + ((name, ele_k),))) else: apply_tree(val, func, path=(path + (name,))) func(path, x)
_fixtures(WebFixture, PanelSwitchFixture, TabbedPanelAjaxFixture) def test_clicking_on_multi_tab(web_fixture, panel_switch_fixture, tabbed_panel_ajax_fixture): if (not panel_switch_fixture.enable_js): panel_switch_fixture.ensure_disabled_js_files_not_cached() wsgi_app = tabbed_panel_ajax_fixture.new_wsgi_app(enable_js=panel_switch_fixture.enable_js) web_fixture.reahl_server.set_app(wsgi_app) browser = web_fixture.driver_browser browser.open('/') browser.click(XPath.link().with_text('tab 3 name')) assert browser.wait_for(tabbed_panel_ajax_fixture.tab_is_active, 'tab 3 name') assert browser.wait_for(tabbed_panel_ajax_fixture.tab_contents_equals, '<p>tab 3 content</p>') assert browser.wait_for_element_not_visible(XPath.link().with_text('tab 2 name')) browser.click(XPath.link().with_text('multitab name')) assert browser.wait_for_element_visible(XPath.link().with_text('tab 2 name')) assert browser.wait_for(tabbed_panel_ajax_fixture.tab_is_active, 'tab 3 name') assert browser.wait_for(tabbed_panel_ajax_fixture.tab_contents_equals, '<p>tab 3 content</p>') browser.click(XPath.link().with_text('multitab name')) assert browser.wait_for(tabbed_panel_ajax_fixture.tab_is_active, 'tab 3 name') assert browser.wait_for(tabbed_panel_ajax_fixture.tab_contents_equals, '<p>tab 3 content</p>')
class SetPartitioner(object): def __init__(self, client, path, set, partition_func=None, identifier=None, time_boundary=30, max_reaction_time=1, state_change_event=None): self.state_id = 0 self.state = PartitionState.ALLOCATING self.state_change_event = (state_change_event or client.handler.event_object()) self._client = client self._path = path self._set = set self._partition_set = [] self._partition_func = (partition_func or self._partitioner) self._identifier = (identifier or ('%s-%s' % (socket.getfqdn(), os.getpid()))) self._locks = [] self._lock_path = '/'.join([path, 'locks']) self._party_path = '/'.join([path, 'party']) self._time_boundary = time_boundary self._max_reaction_time = max_reaction_time self._acquire_event = client.handler.event_object() client.ensure_path(path) client.ensure_path(self._lock_path) client.ensure_path(self._party_path) self._party = client.ShallowParty(self._party_path, identifier=self._identifier) self._party.join() self._state_change = client.handler.rlock_object() client.add_listener(self._establish_sessionwatch) self._child_watching(self._allocate_transition, client_handler=True) def __iter__(self): for partition in self._partition_set: (yield partition) def failed(self): return (self.state == PartitionState.FAILURE) def release(self): return (self.state == PartitionState.RELEASE) def allocating(self): return (self.state == PartitionState.ALLOCATING) def acquired(self): return (self.state == PartitionState.ACQUIRED) def wait_for_acquire(self, timeout=30): self._acquire_event.wait(timeout) def release_set(self): self._release_locks() if self._locks: self._fail_out() return else: with self._state_change: if self.failed: return self._set_state(PartitionState.ALLOCATING) self._child_watching(self._allocate_transition, client_handler=True) def finish(self): self._release_locks() self._fail_out() def _fail_out(self): with self._state_change: self._set_state(PartitionState.FAILURE) if self._party.participating: try: self._party.leave() except KazooException: pass def _allocate_transition(self, result): if result.exception: self._fail_out() return (children, async_result) = result.get() children_changed = self._client.handler.event_object() def updated(result): with self._state_change: children_changed.set() if self.acquired: self._set_state(PartitionState.RELEASE) with self._state_change: if (not self.allocating): return state_id = self.state_id async_result.rawlink(updated) def abort_if_needed(): if (self.state_id == state_id): if children_changed.is_set(): self._abort_lock_acquisition() return True else: return False else: if (self.allocating or self.acquired): with self._state_change: self._set_state(PartitionState.RELEASE) return True partition_set = self._partition_func(self._identifier, list(self._party), self._set) for member in partition_set: lock = self._client.Lock(((self._lock_path + '/') + str(member))) while True: try: lock.acquire(timeout=self._max_reaction_time) except LockTimeout: if abort_if_needed(): return except KazooException: return self.finish() else: break self._locks.append(lock) if abort_if_needed(): return with self._state_change: if ((self.state_id == state_id) and (not children_changed.is_set())): self._partition_set = partition_set self._set_state(PartitionState.ACQUIRED) self._acquire_event.set() return if (not abort_if_needed()): self._fail_out() def _release_locks(self): self._acquire_event.clear() for lock in self._locks[:]: try: lock.release() except KazooException: pass else: self._locks.remove(lock) def _abort_lock_acquisition(self): self._release_locks() if self._locks: self._fail_out() return self._child_watching(self._allocate_transition, client_handler=True) def _child_watching(self, func=None, client_handler=False): watcher = PatientChildrenWatch(self._client, self._party_path, self._time_boundary) asy = watcher.start() if (func is not None): if client_handler: func = partial(self._client.handler.spawn, func) asy.rawlink(func) return asy def _establish_sessionwatch(self, state): with self._state_change: if self.failed: pass elif (state == KazooState.LOST): self._client.handler.spawn(self._fail_out) elif (not self.release): self._set_state(PartitionState.RELEASE) return (state == KazooState.LOST) def _partitioner(self, identifier, members, partitions): all_partitions = sorted(partitions) workers = sorted(members) i = workers.index(identifier) return all_partitions[i::len(workers)] def _set_state(self, state): self.state = state self.state_id += 1 self.state_change_event.set()
def test_wr_As_rd_A_rd_At_can_schedule(): class Top(ComponentLevel3): def construct(s): s.A = Wire(Bits32) def up_wr_As(): s.A[1:3] = Bits2(2) def up_rd_A(): z = s.A def up_rd_As(): assert (s.A[2:4] == 1) _test_model(Top)
def convertMesh(case, mesh_file, scale): mesh_file = translatePath(mesh_file) if (mesh_file.find('.unv') > 0): cmdline = ['ideasUnvToFoam', '"{}"'.format(mesh_file)] runFoamCommand(cmdline, case) changeBoundaryType(case, 'defaultFaces', 'wall') if (mesh_file[(- 4):] == '.msh'): cmdline = ['gmshToFoam', '"{}"'.format(mesh_file)] runFoamCommand(cmdline, case) changeBoundaryType(case, 'defaultFaces', 'wall') if (mesh_file[(- 4):] == '.msh'): cmdline = ['fluentMeshToFoam', '"{}"'.format(mesh_file)] runFoamCommand(cmdline, case) changeBoundaryType(case, 'defaultFaces', 'wall') print('Info: boundary exported from named selection, started with lower case') if (scale and isinstance(scale, numbers.Number) and (float(scale) != 1.0)): cmdline = ['transformPoints', '-scale', '"({} {} {})"'.format(scale, scale, scale)] runFoamApplication(cmdline, case) else: print('Error: mesh scaling ratio is must be a float or integer\n')
def rescale_absorbance(spec, rescaled, initial, old_mole_fraction, new_mole_fraction, old_path_length_cm, new_path_length_cm, waveunit, rescaled_units, extra, true_path_length): unit = None if ('absorbance' in rescaled): if __debug__: printdbg('... rescale: absorbance was scaled already') assert ('absorbance' in rescaled_units) return (rescaled, rescaled_units) if ('absorbance' in initial): if __debug__: printdbg('... rescale: absorbance A_2 = A_1*(x_2/x_1)*(L_2/L_1)') (_, absorbance) = spec.get('absorbance', wunit=waveunit, Iunit=spec.units['absorbance'], copy=True) assert (spec.units['absorbance'] == '') absorbance *= (new_mole_fraction / old_mole_fraction) absorbance *= (new_path_length_cm / old_path_length_cm) unit = '' elif (('abscoeff' in rescaled) and true_path_length): if __debug__: printdbg('... rescale: absorbance A_2 = k_2*L_2') abscoeff = rescaled['abscoeff'] absorbance = (abscoeff * new_path_length_cm) assert (rescaled_units['abscoeff'] == 'cm-1') unit = '' elif (('transmittance_noslit' in initial) and true_path_length): if __debug__: printdbg('... rescale: absorbance A_2 = -ln(T1)*(x_2/x_1)*(L_2/L_1)') (_, T1) = spec.get('transmittance_noslit', wunit=waveunit, Iunit=spec.units['transmittance_noslit'], copy=False) b = (T1 == 0) if (b.sum() > 0): msg = "Transmittance is saturated. Can't calculate absorbance" if ('absorbance' in extra): absorbance = None if __debug__: printdbg(msg) else: raise ValueError(msg) absorbance = (- ln(T1)) absorbance *= (new_mole_fraction / old_mole_fraction) absorbance *= (new_path_length_cm / old_path_length_cm) unit = '' else: msg = ('Cant recalculate absorbance if scaled abscoeff ' + '({0}) and true path_length ({1}) are not given'.format(('abscoeff' in rescaled), true_path_length)) if ('absorbance' in extra): absorbance = None if __debug__: printdbg(msg) else: raise ValueError(msg) if (absorbance is not None): rescaled['absorbance'] = absorbance if (rescaled_units is not None): rescaled_units['absorbance'] = unit return (rescaled, rescaled_units)
class PrecipCloudsRGB(GenericCompositor): def __call__(self, projectables, *args, **kwargs): projectables = self.match_data_arrays(projectables) light = projectables[0] moderate = projectables[1] intense = projectables[2] status_flag = projectables[3] if np.bitwise_and(status_flag, 4).any(): maxs1 = 70 maxs2 = 70 maxs3 = 100 else: maxs1 = 30 maxs2 = 50 maxs3 = 40 scalef3 = ((1.0 / maxs3) - (1 / 255.0)) scalef2 = ((1.0 / maxs2) - (1 / 255.0)) scalef1 = ((1.0 / maxs1) - (1 / 255.0)) p1data = (light * scalef1).where((light != 0)) p1data = p1data.where((light != light.attrs['_FillValue'])) p1data.attrs = light.attrs data = (moderate * scalef2) p2data = data.where((moderate != 0)) p2data = p2data.where((moderate != moderate.attrs['_FillValue'])) p2data.attrs = moderate.attrs data = (intense * scalef3) p3data = data.where((intense != 0)) p3data = p3data.where((intense != intense.attrs['_FillValue'])) p3data.attrs = intense.attrs res = super(PrecipCloudsRGB, self).__call__((p3data, p2data, p1data), *args, **kwargs) return res
class ProjectResourceGroupManager(RetrieveMixin, UpdateMixin, RESTManager): _path = '/projects/{project_id}/resource_groups' _obj_cls = ProjectResourceGroup _from_parent_attrs = {'project_id': 'id'} _list_filters = ('order_by', 'sort', 'include_html_description') _update_attrs = RequiredOptional(optional=('process_mode',)) def get(self, id: Union[(str, int)], lazy: bool=False, **kwargs: Any) -> ProjectResourceGroup: return cast(ProjectResourceGroup, super().get(id=id, lazy=lazy, **kwargs))
def convert_hit_area(filename, savename): hit_x = [] hit_y = [] hit_area = [] landing_x = [] landing_y = [] landing_area = [] time = [] getpoint_player = [] lose_reason = [] ball_type = [] frame = [] output = pd.DataFrame([]) data = pd.read_csv(filename) sets = data['Set'] rally = data['Rally'] frame = data['Frame'] time = data['Time'] hit_x = data['Y'] hit_y = data['X'] getpoint_player = data['Getpoint_player'] for reason in data['Lose_reason']: lose_reason.append(map_reason(reason)) hit_area = convert.to_area(hit_x, hit_y) output['set'] = sets output['rally'] = rally output['frame_num'] = frame output['time'] = time output['hit_area'] = hit_area output['hit_x'] = hit_x output['hit_y'] = hit_y output['landing_area'] = pd.Series(hit_area[1:]) output['landing_x'] = pd.Series(hit_x.values[1:]) output['landing_y'] = pd.Series(hit_y.values[1:]) output['lose_reason'] = pd.Series(lose_reason) output['getpoint_player'] = pd.Series(getpoint_player) output['type'] = pd.Series(ball_type) output.to_csv(savename, index=False, encoding='utf-8')
def register_events(path_or_typeclass): if isinstance(path_or_typeclass, str): typeclass = class_from_module(path_or_typeclass) else: typeclass = path_or_typeclass typeclass_name = ((typeclass.__module__ + '.') + typeclass.__name__) try: storage = ScriptDB.objects.get(db_key='event_handler') assert storage.is_active assert (storage.ndb.events is not None) except (ScriptDB.DoesNotExist, AssertionError): storage = EVENTS for (name, tup) in getattr(typeclass, '_events', {}).items(): if (len(tup) == 4): (variables, help_text, custom_call, custom_add) = tup elif (len(tup) == 3): (variables, help_text, custom_call) = tup custom_add = None elif (len(tup) == 2): (variables, help_text) = tup custom_call = None custom_add = None else: variables = help_text = custom_call = custom_add = None if isinstance(storage, list): storage.append((typeclass_name, name, variables, help_text, custom_call, custom_add)) else: storage.add_event(typeclass_name, name, variables, help_text, custom_call, custom_add) return typeclass
def gather_files_in_dirs(rootdir, targetdir, searchfilename, newfilesuffix='_Impact.png'): for (root, dirs, files) in os.walk(rootdir): for f in files: if (os.path.basename(f) == searchfilename): current_dir = os.path.dirname(os.path.join(root, f)) model_id = os.path.basename(os.path.dirname(current_dir)) newf = os.path.join(targetdir, (model_id + newfilesuffix)) shutil.copyfile(os.path.join(root, f), newf)
class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('id_list_file', type=str, help=((('required list of project ids to delete in plain text format, ' + 'project ids have to be at the beginning of the line, ') + 'supports commenting lines out: if a line does ') + 'not start with an integer it will be skipped')) def handle(self, *args, **options): project_ids = set() with open(options['id_list_file']) as fp: for line in fp.readlines(): m = re.search('^([0-9]+)', line) if m: project_ids.add(int(m.group(1))) if (input(f"You are about to delete {len(project_ids)} projects. Are you sure? If so please enter 'yes' to continue: ") == 'yes'): for project in Project.objects.filter(id__in=project_ids): project.delete() print(f'Project {project} deleted.') else: print('Aborted!')
class QnliProcessor(DataProcessor): def get_example_from_tensor_dict(self, tensor_dict): return InputExample(tensor_dict['idx'].numpy(), tensor_dict['question'].numpy().decode('utf-8'), tensor_dict['sentence'].numpy().decode('utf-8'), str(tensor_dict['label'].numpy())) def get_train_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'train.tsv')), 'train') def get_dev_examples(self, data_dir): return self._create_examples(self._read_tsv(os.path.join(data_dir, 'dev.tsv')), 'dev_matched') def get_labels(self): return ['entailment', 'not_entailment'] def _create_examples(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): if (i == 0): continue guid = ('%s-%s' % (set_type, line[0])) text_a = line[1] text_b = line[2] label = line[(- 1)] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples
def _prepare_instance_masks_thicken(instances, semantic_mapping, distance_field, frustum_mask) -> Dict[(int, Tuple[(torch.Tensor, int)])]: instance_information = {} for (instance_id, semantic_class) in semantic_mapping.items(): instance_mask: torch.Tensor = (instances == instance_id) instance_distance_field = torch.full_like(instance_mask, dtype=torch.float, fill_value=3.0) instance_distance_field[instance_mask] = distance_field.squeeze()[instance_mask] instance_distance_field_masked = (instance_distance_field.abs() < 1.0) instance_grid = thicken_grid(instance_distance_field_masked, [256, 256, 256], frustum_mask) instance_information[instance_id] = (instance_grid, semantic_class) return instance_information
def update_usage_list_schemas() -> None: print('updating docs/usage.rst -- list schemas') with open('docs/usage.rst') as fp: content = fp.read() vendored_list_start = '.. vendored-schema-list-start\n' vendored_list_end = '\n.. vendored-schema-list-end' content_head = content.split(vendored_list_start)[0] content_tail = content.split(vendored_list_end)[(- 1)] generated_list = '\n'.join((([vendored_list_start] + [f'- ``vendor.{n}``' for n in SCHEMA_CATALOG]) + [vendored_list_end])) content = ((content_head + generated_list) + content_tail) with open('docs/usage.rst', 'w') as fp: fp.write(content)
class CLIPVisionCfg(): backbone: str = 'ModifiedRN50' layers: Union[(Tuple[(int, int, int, int)], int)] = 12 width: int = 64 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[(Tuple[(int, int)], int)] = 512 timm_model_name: str = None timm_model_pretrained: bool = False timm_pool: str = 'avg' timm_proj: str = 'linear' patch_dropout: float = 0.0 drop_attention_rate: float = 0.0
def reset_defaults(): defaults.update(_control_defaults) from .freqplot import _freqplot_defaults, _nyquist_defaults defaults.update(_freqplot_defaults) defaults.update(_nyquist_defaults) from .nichols import _nichols_defaults defaults.update(_nichols_defaults) from .pzmap import _pzmap_defaults defaults.update(_pzmap_defaults) from .rlocus import _rlocus_defaults defaults.update(_rlocus_defaults) from .sisotool import _sisotool_defaults defaults.update(_sisotool_defaults) from .iosys import _iosys_defaults defaults.update(_iosys_defaults) from .xferfcn import _xferfcn_defaults defaults.update(_xferfcn_defaults) from .statesp import _statesp_defaults defaults.update(_statesp_defaults) from .optimal import _optimal_defaults defaults.update(_optimal_defaults) from .timeplot import _timeplot_defaults defaults.update(_timeplot_defaults)
def test_unused_udp_port_factory_selects_unused_port(pytester: Pytester): pytester.makepyfile(dedent(' .asyncio\n async def test_unused_udp_port_factory_fixture(unused_udp_port_factory):\n class Closer:\n def connection_made(self, transport):\n pass\n\n def connection_lost(self, *arg, **kwd):\n pass\n\n port1, port2, port3 = (\n unused_udp_port_factory(),\n unused_udp_port_factory(),\n unused_udp_port_factory(),\n )\n\n event_loop = asyncio.get_running_loop()\n transport1, _ = await event_loop.create_datagram_endpoint(\n Closer,\n local_addr=("127.0.0.1", port1),\n reuse_port=False,\n )\n transport2, _ = await event_loop.create_datagram_endpoint(\n Closer,\n local_addr=("127.0.0.1", port2),\n reuse_port=False,\n )\n transport3, _ = await event_loop.create_datagram_endpoint(\n Closer,\n local_addr=("127.0.0.1", port3),\n reuse_port=False,\n )\n\n for port in port1, port2, port3:\n with pytest.raises(IOError):\n await event_loop.create_datagram_endpoint(\n Closer,\n local_addr=("127.0.0.1", port),\n reuse_port=False,\n )\n\n transport1.abort()\n transport2.abort()\n transport3.abort()\n '))
class TourneySlotSelec(discord.ui.Select): view: QuotientView def __init__(self, slots: T.List[TMSlot], placeholder: str='Select a slot to cancel'): _options = [] for slot in slots: _options.append(discord.SelectOption(emoji=emote.TextChannel, label=f'Slot {slot.num} - {slot.team_name}', description=f"#{getattr(slot.tourney.registration_channel, 'name', 'channel-deleted')} - (ID:{slot.tourney.id})", value=f'{slot.id}:{slot.tourney.id}')) super().__init__(options=_options, placeholder=placeholder, max_values=len(_options)) async def callback(self, interaction: discord.Interaction): (await interaction.response.defer()) self.view.stop() self.view.custom_id = interaction.data['values']
def get_data_points(data): date_index = [m.span() for m in DATE_PATTERN.finditer(data)] start_points = [(span[0] - 8) for span in date_index] end_points = (start_points[1:] + [None]) data_points = [data[start:end] for (start, end) in zip(start_points, end_points)] return data_points
def keras_model_functional(): is_training = tf.compat.v1.placeholder_with_default(tf.constant(True), shape=(), name='is_training') inputs = tf.keras.Input(shape=(32, 32, 3)) x = tf.keras.layers.Conv2D(32, (3, 3))(inputs) x = tf.keras.layers.BatchNormalization(momentum=0.3, epsilon=0.65)(x, training=True) with tf.compat.v1.variable_scope('scope_1'): x = tf.keras.layers.Conv2D(16, (2, 2), activation=tf.nn.tanh)(x) x = tf.keras.layers.BatchNormalization(momentum=0.4, epsilon=0.25)(x, training=is_training) x = tf.keras.layers.Conv2D(8, (2, 2), activation=tf.nn.tanh)(x) x = tf.keras.layers.BatchNormalization(momentum=0.5, epsilon=0.35)(x, training=False) x = tf.keras.layers.Conv2D(4, (2, 2), activation=tf.nn.relu6)(x) x = tf.keras.layers.Flatten()(x) outputs = tf.keras.layers.Dense(10, activation=tf.nn.softmax, name='keras_model_functional')(x) model = tf.keras.Model(inputs=inputs, outputs=outputs) return model