code
stringlengths
281
23.7M
class ImportPlugin(MdfInfo): name = 'Import plugin' author = 'Aymeric Rateau' description = 'Import MDF files' promote_tab = 'MDF' file_extensions = set(['.dat', '.mf4', '.mdf']) def __init__(self): self.fields = [] def getPreview(self, params): info = MdfInfo() if (info.mdfversion < 400): f = '' f += (('Time: ' + info['HDBlock']['Date']) + ' ') f += (info['HDBlock']['Time'] + '\n') f += (('Author: ' + info['HDBlock']['Author']) + '\n') f += (('Organisation: ' + info['HDBlock']['Organization']) + '\n') f += (('Project Name: ' + info['HDBlock']['ProjectName']) + '\n') f += ((('Subject: ' + info['HDBlock']['Subject']) + '\n') + 'Channel List:\n') else: from time import gmtime, strftime fileDateTime = gmtime((info['HDBlock']['hd_start_time_ns'] / )) date = strftime('%Y-%m-%d', fileDateTime) time = strftime('%H:%M:%S', fileDateTime) f = '' f += (((('Date Time: ' + date) + ' ') + time) + '\n') if ('Comment' in info['HDBlock']): Comment = info['HDBlock']['Comment'] if ('author' in Comment): f += (('Author: ' + Comment['author']) + '\n') if ('department' in Comment): f += (('Organisation: ' + Comment['department']) + '\n') if ('project' in Comment): f += (('Project Name: ' + Comment['project']) + '\n') if ('subject' in Comment): f += ((('Subject: ' + Comment['subject']) + '\n') + 'Channel List:\n') for channelName in info.list_channels(): f += ((' ' + channelName) + '\n') return (f, True) def doImport(self, params): return []
def parse_input_update_spec(spec): for key in spec: assert (key not in {'action', 'buttons', 'code', 'inline', 'max_size', 'max_total_size', 'multiple', 'name', 'onchange', 'type', 'validate'}), ('%r can not be updated' % key) attributes = dict(((k, v) for (k, v) in spec.items() if (v is not None))) if ('options' in spec): attributes['options'] = _parse_select_options(spec['options']) return attributes
def _convert_advanced_activation(inexpr, keras_layer, etab): act_type = type(keras_layer).__name__ if (act_type == 'Softmax'): axis = keras_layer.axis dims = len(keras_layer.input_shape) if isinstance(axis, list): raise tvm.error.OpAttributeUnImplemented('Softmax with axes {} is not supported.'.format(axis)) if (axis == (- 1)): axis = 1 else: axis = ((axis + 1) if (axis < (dims - 1)) else 1) return _op.nn.softmax(inexpr, axis=axis) if (act_type == 'ReLU'): threshold = _expr.const(keras_layer.threshold, dtype='float32') if (keras_layer.max_value and (float(keras_layer.threshold) == 0)): return _op.clip(inexpr, a_min=0.0, a_max=float(keras_layer.max_value)) if (keras_layer.max_value and _op.greater(threshold, inexpr).astype('float32')): negative_slope = _expr.const(keras_layer.negative_slope, dtype='float32') return _op.multiply(negative_slope, _op.subtract(inexpr, threshold)) return _op.nn.relu(inexpr) if (act_type == 'LeakyReLU'): return _op.nn.leaky_relu(inexpr, alpha=float(keras_layer.alpha)) if (act_type == 'ELU'): alpha = (keras_layer.alpha if hasattr(keras_layer, 'alpha') else 1.0) alpha = _expr.const(alpha, dtype='float32') return _get_elu(inexpr, alpha) if (act_type == 'PReLU'): assert hasattr(keras_layer, 'alpha'), 'alpha required for PReLU.' _check_data_format(keras_layer) size = len(keras_layer.alpha.shape) alpha = etab.new_const(keras_layer.get_weights()[0].transpose(np.roll(range(size), 1))) return ((_op.negative(alpha) * _op.nn.relu(_op.negative(inexpr))) + _op.nn.relu(inexpr)) if (act_type == 'ThresholdedReLU'): theta = (keras_layer.theta if hasattr(keras_layer, 'theta') else 1.0) return _op.multiply(inexpr, _op.greater(inexpr, _expr.const(theta, dtype='float32')).astype('float32')) raise tvm.error.OpNotImplemented('Operator {} is not supported in frontend Keras.'.format(act_type))
def conv2d_transposed(x, shape, outshape, name, strides=[1, 1, 1, 1]): weight = weight_variable(shape, '{}_W'.format(name)) bias = bias_variable([shape[(- 2)]], '{}_b'.format(name)) return (tf.nn.conv2d_transpose(x, weight, output_shape=outshape, strides=strides, padding='SAME', name=name) + bias)
class JciHitachiMonthlyPowerConsumptionSensorEntity(JciHitachiEntity, SensorEntity): def __init__(self, thing, coordinator): super().__init__(thing, coordinator) def name(self): return f'{self._thing.name} Monthly Power Consumption' def native_value(self): monthly_data = self._thing.monthly_data if monthly_data: return (monthly_data[0]['PowerConsumption_Sum'] / 10) return (- 1) def device_class(self): return SensorDeviceClass.ENERGY def native_unit_of_measurement(self): return UnitOfEnergy.KILO_WATT_HOUR def unique_id(self): return f'{self._thing.gateway_mac_address}_monthly_power_consumption_sensor' def state_class(self): return None
class Iptables(InstanceModule): def __init__(self): super().__init__() self._has_w_argument = None def _iptables_command(self, version): if (version == 4): iptables = 'iptables' elif (version == 6): iptables = 'ip6tables' else: raise RuntimeError('Invalid version: {}'.format(version)) if (self._has_w_argument is False): return iptables else: return '{} -w 90'.format(iptables) def _run_iptables(self, version, cmd, *args): ipt_cmd = '{} {}'.format(self._iptables_command(version), cmd) if (self._has_w_argument is None): result = self.run_expect([0, 2], ipt_cmd, *args) if (result.rc == 2): self._has_w_argument = False return self._run_iptables(version, cmd, *args) else: self._has_w_argument = True return result.stdout.rstrip('\r\n') else: return self.check_output(ipt_cmd, *args) def rules(self, table='filter', chain=None, version=4): (cmd, args) = ('-t %s -S', [table]) if chain: cmd += ' %s' args += [chain] rules = [] for line in self._run_iptables(version, cmd, *args).splitlines(): line = line.replace('\t', ' ') rules.append(line) return rules
class AsmCmdImportMulti(AsmCmdImportSingle): _id = 26 _menuText = QT_TRANSLATE_NOOP('asm3', 'Import as multi-document') _tooltip = QT_TRANSLATE_NOOP('asm3', 'Import assemblies from STEP file into separate document') _iconName = 'Assembly_ImportMulti.svg' def importMode(cls): params = FreeCAD.ParamGet('User parameter:BaseApp/Preferences/Mod/Import') mode = params.GetInt('ImportMode', 0) if (not mode): mode = 2 return mode
_tokenizers class CpmTokenizationTest(XLNetModelTest): def test_pre_tokenization(self): tokenizer = CpmTokenizer.from_pretrained('TsinghuaAI/CPM-Generate') text = 'Hugging Face,' normalized_text = 'Hugging Face,<unk>' bpe_tokens = 'Hu gg ing F ace , '.split() tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = (tokens + [tokenizer.unk_token]) input_bpe_tokens = [13789, 13283, 1421, 8, 10, 1164, 13608, 16528, 63, 8, 9, 440, 108, 440, 121, 90, 8, 12, 0] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) reconstructed_text = tokenizer.decode(input_bpe_tokens) self.assertEqual(reconstructed_text, normalized_text)
class QuoPageView(QuotientView): def __init__(self, ctx: Context, *, pages: T.List[PageLine], items: T.Optional[T.List[discord.ui.Item]]=None, embed: discord.Embed, show_count: bool, need_skip: bool): super().__init__(ctx, timeout=40) self.pages = pages self.items = items self.current_page = 1 self.embed = embed self.show_count = show_count self.need_skip = need_skip self.__input_lock = asyncio.Lock() self.clear_items() self.fill_items() def fill_items(self) -> None: for item in self.items: self.add_item(item) self.add_item(self.first_page) self.add_item(self.previous_page) if self.need_skip: self.add_item(self.skip_page) self.add_item(self.next_page) self.add_item(self.last_page) def update_embed(self): if self.show_count: self.embed.set_footer(text=f'Page {self.current_page} of {len(self.pages)}') self.embed.description = self.pages[(self.current_page - 1)].line if self.pages[(self.current_page - 1)].image: self.embed.set_image(url=self.pages[(self.current_page - 1)].image) .button(style=discord.ButtonStyle.green, custom_id='first', emoji='<:double_left:>') async def first_page(self, interaction: discord.Interaction, button: discord.Button): self.current_page = 1 self.update_embed() (await interaction.response.edit_message(embed=self.embed, view=self)) .button(style=discord.ButtonStyle.green, custom_id='previous', emoji='<:left:>') async def previous_page(self, interaction: discord.Interaction, button: discord.Button): if (self.current_page == 1): return self.current_page -= 1 self.update_embed() (await interaction.response.edit_message(embed=self.embed, view=self)) .button(style=discord.ButtonStyle.green, custom_id='skipto', label='Skip to page ...') async def skip_page(self, interaction: discord.Interaction, button: discord.Button): if self.__input_lock.locked(): return (await interaction.response.send_message('Already waiting for your response...', ephemeral=True)) if (self.message is None): return async with self.__input_lock: channel = self.message.channel author_id = (interaction.user and interaction.user.id) (await interaction.response.send_message('Please enter the page number you want to skip to.', ephemeral=True)) def _msg_check(m: discord.Message) -> bool: return ((m.author.id == author_id) and (channel == m.channel) and m.content.isdigit()) try: msg = (await self.ctx.bot.wait_for('message', check=_msg_check, timeout=30.0)) except asyncio.TimeoutError: (await interaction.followup.send('Took too long.', ephemeral=True)) (await asyncio.sleep(5)) else: page = int(msg.content) (await msg.delete()) if (page > len(self.pages)): (await interaction.followup.send('Page number too high.', ephemeral=True)) return self.current_page = page self.update_embed() if interaction.response.is_done(): if self.message: (await self.message.edit(embed=self.embed, view=self)) else: (await interaction.response.edit_message(embed=self.embed, view=self)) .button(style=discord.ButtonStyle.green, custom_id='next', emoji='<:right:>') async def next_page(self, interaction: discord.Interaction, button: discord.Button): if (self.current_page == len(self.pages)): return self.current_page += 1 self.update_embed() (await interaction.response.edit_message(embed=self.embed, view=self)) .button(style=discord.ButtonStyle.green, custom_id='last', emoji='<:double_right:>') async def last_page(self, interaction: discord.Interaction, button: discord.Button): self.current_page = len(self.pages) self.update_embed() (await interaction.response.edit_message(embed=self.embed, view=self))
class DirectionalGridCRF(GridCRF, EdgeFeatureGraphCRF): def __init__(self, n_states=None, n_features=None, inference_method=None, neighborhood=4): self.neighborhood = neighborhood n_edge_features = (2 if (neighborhood == 4) else 4) EdgeFeatureGraphCRF.__init__(self, n_states, n_features, n_edge_features, inference_method=inference_method) def _set_size_joint_feature(self): if ((self.n_features is not None) and (self.n_states is not None)): self.size_joint_feature = ((self.n_states * self.n_features) + (self.n_edge_features * (self.n_states ** 2))) def _check_size_x(self, x): GridCRF._check_size_x(self, x) def initialize(self, X, Y): CRF.initialize(self, X, Y) def _get_edges(self, x, flat=True): return make_grid_edges(x, neighborhood=self.neighborhood, return_lists=(not flat)) def joint_feature(self, x, y): return EdgeFeatureGraphCRF.joint_feature(self, x, y) def _get_edge_features(self, x): return edge_list_to_features(self._get_edges(x, flat=False))
class _NetD(nn.Module): def __init__(self): super(_NetD, self).__init__() self.features = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=64, kernel_size=5, stride=1, padding=2, bias=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=64, out_channels=64, kernel_size=4, stride=2, padding=1, bias=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1, bias=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=128, out_channels=128, kernel_size=4, stride=2, padding=1, bias=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, padding=1, bias=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=256, out_channels=256, kernel_size=4, stride=2, padding=1, bias=True), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=4, stride=2, padding=1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, padding=1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(in_channels=512, out_channels=512, kernel_size=4, stride=2, padding=1, bias=False), nn.LeakyReLU(0.2, inplace=True)) self.LeakyReLU = nn.LeakyReLU(0.2, inplace=True) self.fc1 = nn.Linear(2048, 64) self.fc2 = nn.Linear(64, 1) for m in self.modules(): if isinstance(m, nn.Conv2d): m.weight.data.normal_(0.0, 0.02) elif isinstance(m, nn.BatchNorm2d): m.weight.data.normal_(1.0, 0.02) m.bias.data.fill_(0) def forward(self, input): out = self.features(input) out = out.view(out.size(0), (- 1)) out = self.fc1(out) out = self.LeakyReLU(out) out = self.fc2(out) return out.view((- 1), 1).squeeze(1)
def menu_setattr(menu, choice, obj, string): attr = (getattr(choice, 'attr', None) if choice else None) if ((choice is None) or (string is None) or (attr is None) or (menu is None)): log_err(dedent('\n The `menu_setattr` function was called to set the attribute {} of object {} to {},\n but the choice {} of menu {} or another information is missing.\n '.format(attr, obj, repr(string), choice, menu)).strip('\n')).strip() return for part in attr.split('.')[:(- 1)]: obj = getattr(obj, part) setattr(obj, attr.split('.')[(- 1)], string) return True
class TestOutputParser(TestCase): def setUp(self) -> None: self.parser = OutputParser([]) def test_parse_pytest(self): output = get_output('pytest') failed = list(self.parser.parse_failed('python#pytest', output)) self.assertEqual(failed, [ParseResult(name='test_d', namespaces=['TestMyClass']), ParseResult(name='test_parametrize', namespaces=[]), ParseResult(name='test_a', namespaces=[])]) def test_parse_pyunit(self): output = get_output('pyunit') failed = list(self.parser.parse_failed('python#pyunit', output)) self.assertEqual(failed, [ParseResult(name='test_d', namespaces=['TestMyClass'])]) def test_parse_gotest(self): output = get_output('gotest') failed = list(self.parser.parse_failed('go#gotest', output)) self.assertEqual(failed, [ParseResult(name='TestA', namespaces=[]), ParseResult(name='TestB', namespaces=[])]) def test_parse_jest(self): output = get_output('jest') failed = list(self.parser.parse_failed('javascript#jest', output)) self.assertEqual(failed, [ParseResult(name="it shouldn't pass", namespaces=['First namespace', 'Another namespace']), ParseResult(name="it shouldn't pass again", namespaces=[])]) def test_parse_exunit(self): output = get_output('exunit') failed = list(self.parser.parse_failed('elixir#exunit', output)) self.assertEqual(failed, [ParseResult(name='the world', namespaces=[]), ParseResult(name='greets the world', namespaces=[])]) def test_parse_richgo(self): output = get_output('richgo') failed = list(self.parser.parse_failed('go#richgo', output)) self.assertEqual(failed, [ParseResult(name='TestA', namespaces=[]), ParseResult(name='TestAAAB', namespaces=[])])
class AlexNet(nn.Module): def __init__(self, num_classes=100): super(AlexNet, self).__init__() self.features = nn.Sequential(nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2)) self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential(nn.Dropout(), nn.Linear(((256 * 6) * 6), 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes)) def forward(self, x): x = self.features(x) x = self.avgpool(x) x = torch.flatten(x, 1) x = self.classifier(x) return x
def is_html(ct_headers, url, allow_xhtml=False): if (not ct_headers): return is_html_file_extension(url, allow_xhtml) headers = split_header_words(ct_headers) if (len(headers) < 1): return is_html_file_extension(url, allow_xhtml) first_header = headers[0] first_parameter = first_header[0] ct = first_parameter[0] html_types = ['text/html'] if allow_xhtml: html_types += ['text/xhtml', 'text/xml', 'application/xml', 'application/xhtml+xml'] return (ct in html_types)
.parametrize('q', [quantize(symmetric=True, initialized=False), quantize_dequantize(symmetric=True, initialized=False)]) def test_compute_encodings_updates_parameters_upon_exit(q: _QuantizerBase, x: torch.Tensor): assert (q.get_min() is None) assert (q.get_max() is None) assert (q.get_scale() is None) assert (q.get_offset() is None) with q.compute_encodings(): assert (q.get_min() is None) assert (q.get_max() is None) assert (q.get_scale() is None) assert (q.get_offset() is None) _ = q(x) assert (q.get_min() is None) assert (q.get_max() is None) assert (q.get_scale() is None) assert (q.get_offset() is None) assert (q.get_min() is not None) assert (q.get_max() is not None) assert (q.get_scale() is not None) assert (q.get_offset() is not None)
class Migration(migrations.Migration): dependencies = [('jobs', '0009_auto__1815')] operations = [migrations.AlterField(model_name='job', name='company_description_markup_type', field=models.CharField(max_length=30, choices=[('', '--'), ('html', 'HTML'), ('plain', 'Plain'), ('markdown', 'Markdown'), ('restructuredtext', 'Restructured Text')], default='restructuredtext', blank=True), preserve_default=True), migrations.AlterField(model_name='job', name='description_markup_type', field=models.CharField(max_length=30, default='restructuredtext', choices=[('', '--'), ('html', 'HTML'), ('plain', 'Plain'), ('markdown', 'Markdown'), ('restructuredtext', 'Restructured Text')]), preserve_default=True), migrations.AlterField(model_name='job', name='requirements_markup_type', field=models.CharField(max_length=30, default='restructuredtext', choices=[('', '--'), ('html', 'HTML'), ('plain', 'Plain'), ('markdown', 'Markdown'), ('restructuredtext', 'Restructured Text')]), preserve_default=True)]
def example_interaction_with_policy(policy='random'): assert (policy in ('random', 'user')) def random_policy(state): from helper import get_candidates lfs = get_candidates(state) path = random.choice(lfs)[0] return path def user_policy(state): from helper import get_candidates from helper import get_candidate lfs = get_candidates(state) while True: print('Choices: ') for (idx, (lf_path, lf_prog)) in enumerate(lfs): print(idx, lf_prog) index = input('Choices: ') try: path = lfs[int(index)][0] except: print('ERROR. TRY AGAIN.') return path problem = '(q-transform/hint (quote (lambda (cdr (cdr (var ()))))) (quote ((() y . 1) (#f y () . #t) (#f b () b . y) (x #f (#f . #f) . #t) (a #f y x s . a))))' print('Starting problem:', problem) print('Policy:', policy) step = 0 max_steps = 50 policy_fn = {'random': random_policy, 'user': user_policy}[policy] with Interaction(lisp.parse(problem)) as env: signal = None while ((signal != 'solved') and (step < max_steps)): path = policy_fn(env.state) is_good_path = (path == env.good_path) signal = env.follow_path(path) step += 1 print('Step:', step, 'GT Path?:', is_good_path, 'Signal:', signal) print('Completed.')
def format_xlabel(wunit, plot_medium): if (wunit == 'cm-1'): xlabel = 'Wavenumber (cm-1)' elif (wunit == 'nm'): if (plot_medium and (plot_medium != 'vacuum_only')): xlabel = 'Wavelength [air] (nm)' else: xlabel = 'Wavelength (nm)' elif (wunit == 'nm_vac'): if (plot_medium and (plot_medium != 'vacuum_only')): xlabel = 'Wavelength [vacuum] (nm)' else: xlabel = 'Wavelength (nm)' else: raise ValueError(wunit) return make_up(xlabel)
def weight2subspace(weight, ratio=0.7, num=(- 1)): dim = len(weight) threshold = (ratio * np.sum(weight)) sorted_idx = np.argsort(weight) sorted_idx = [sorted_idx[((dim - i) - 1)] for i in range(dim)] if (num != (- 1)): exp_subspace = sorted_idx[:num] exp_subspace = list(np.sort(exp_subspace)) return exp_subspace tmp_s = 0 exp_subspace = [] for idx in sorted_idx: tmp_s += weight[idx] exp_subspace.append(idx) if (tmp_s >= threshold): break exp_subspace = list(np.sort(exp_subspace)) return exp_subspace
def _test_sharding(tables: List[EmbeddingBagConfig], initial_state_dict: Dict[(str, Any)], rank: int, world_size: int, kjt_input_per_rank: List[KeyedJaggedTensor], sharder: ModuleSharder[nn.Module], backend: str, constraints: Optional[Dict[(str, ParameterConstraints)]]=None, local_size: Optional[int]=None, is_data_parallel: bool=False, use_apply_optimizer_in_backward: bool=False) -> None: trec_dist.comm_ops.set_gradient_division(False) with MultiProcessContext(rank, world_size, backend, local_size) as ctx: kjt_input_per_rank = [kjt.to(ctx.device) for kjt in kjt_input_per_rank] initial_state_dict = {fqn: tensor.to(ctx.device) for (fqn, tensor) in initial_state_dict.items()} planner = EmbeddingShardingPlanner(topology=Topology(world_size, ctx.device.type, local_world_size=ctx.local_size), constraints=constraints) model = EmbeddingBagCollection(tables=tables, device=ctx.device) unsharded_model = EmbeddingBagCollection(tables=tables, device=ctx.device) if use_apply_optimizer_in_backward: apply_optimizer_in_backward(torch.optim.SGD, model.embedding_bags['table_0'].parameters(), {'lr': 1.0}) apply_optimizer_in_backward(torch.optim.SGD, model.embedding_bags['table_1'].parameters(), {'lr': 4.0}) apply_optimizer_in_backward(torch.optim.SGD, unsharded_model.embedding_bags['table_0'].parameters(), {'lr': 1.0}) apply_optimizer_in_backward(torch.optim.SGD, unsharded_model.embedding_bags['table_1'].parameters(), {'lr': 4.0}) plan: ShardingPlan = planner.collective_plan(model, [sharder], ctx.pg) sharded_model = shard(module=model, env=ShardingEnv.from_process_group(ctx.pg), plan=plan.get_plan_for_module(''), sharder=sharder, device=ctx.device) if (not use_apply_optimizer_in_backward): unsharded_model_optimizer = torch.optim.SGD(unsharded_model.parameters(), lr=0.01) sharded_model_optimizer = torch.optim.SGD(sharded_model.parameters(), lr=0.01) assert isinstance(sharded_model, ShardedEmbeddingBagCollection) unsharded_model.load_state_dict(copy.deepcopy(initial_state_dict)) copy_state_dict(sharded_model.state_dict(), copy.deepcopy(initial_state_dict)) feature_keys = [] for table in tables: feature_keys.extend(table.feature_names) for _it in range(5): unsharded_model_params = dict(unsharded_model.named_parameters()) if (not use_apply_optimizer_in_backward): unsharded_model_optimizer.zero_grad() sharded_model_optimizer.zero_grad() if is_data_parallel: for (fqn, param) in sharded_model.named_parameters(): assert _optional_equals(param.grad, unsharded_model_params[fqn].grad) unsharded_model_pred_kt = [] for unsharded_rank in range(ctx.world_size): unsharded_model_pred_kt.append(unsharded_model(kjt_input_per_rank[unsharded_rank])) all_unsharded_preds = [] for unsharded_rank in range(ctx.world_size): unsharded_model_pred_kt_mini_batch = unsharded_model_pred_kt[unsharded_rank].to_dict() all_unsharded_preds.extend([unsharded_model_pred_kt_mini_batch[feature] for feature in feature_keys]) if (unsharded_rank == ctx.rank): unsharded_model_pred = torch.stack([unsharded_model_pred_kt_mini_batch[feature] for feature in feature_keys]) sharded_model_pred_kt = sharded_model(kjt_input_per_rank[ctx.rank]).to_dict() sharded_model_pred = torch.stack([sharded_model_pred_kt[feature] for feature in feature_keys]) torch.testing.assert_close(sharded_model_pred.cpu(), unsharded_model_pred.cpu()) sharded_model_pred.sum().backward() all_unsharded_preds = torch.stack(all_unsharded_preds) _sum = all_unsharded_preds.sum() if is_data_parallel: _sum /= world_size _sum.backward() if is_data_parallel: for (fqn, param) in sharded_model.named_parameters(): assert _optional_equals(param.grad, unsharded_model_params[fqn].grad) if (not use_apply_optimizer_in_backward): unsharded_model_optimizer.step() sharded_model_optimizer.step() assert_state_buffers_parameters_equal(unsharded_model, sharded_model) for fqn in unsharded_model.state_dict(): unsharded_state = unsharded_model.state_dict()[fqn] sharded_state = sharded_model.state_dict()[fqn] if is_data_parallel: torch.testing.assert_close(unsharded_state, sharded_state) else: out = (torch.zeros(size=unsharded_state.shape, device=ctx.device) if (ctx.rank == 0) else None) sharded_state.gather(out=out) if (ctx.rank == 0): torch.testing.assert_close(unsharded_state, out)
def parse_fatal_stacktrace(text): lines = ['(?P<type>Fatal Python error|Windows fatal exception): (?P<msg>.*)', ' *', '(Current )?[Tt]hread [^ ]* \\(most recent call first\\): *', ' File ".*", line \\d+ in (?P<func>.*)'] m = re.search('\n'.join(lines), text) if (m is None): return ('', '') else: msg = m.group('msg') typ = m.group('type') func = m.group('func') if (typ == 'Windows fatal exception'): msg = ('Windows ' + msg) return (msg, func)
(scope='class') def request_chat(): return KeyboardButtonRequestChat(TestKeyboardButtonRequestChatBase.request_id, TestKeyboardButtonRequestChatBase.chat_is_channel, TestKeyboardButtonRequestChatBase.chat_is_forum, TestKeyboardButtonRequestChatBase.chat_has_username, TestKeyboardButtonRequestChatBase.chat_is_created, TestKeyboardButtonRequestChatBase.user_administrator_rights, TestKeyboardButtonRequestChatBase.bot_administrator_rights, TestKeyboardButtonRequestChatBase.bot_is_member)
def adj_loglikelihood_scalar(disp, X, y, mu, sign): n = (1 / disp) p = (n / (n + mu)) loglik = sum(nbinom.logpmf(y, n, p)) diagVec = (mu / (1 + (mu * disp))) diagWM = np.diag(diagVec) xtwx = np.dot(np.dot(X.T, diagWM), X) coxreid = (0.5 * np.log(np.linalg.det(xtwx))) ret = ((loglik - coxreid) * sign) if isinstance(ret, complex): raise complexException() return ret
def get_address_metadata(address: Address, route_states: List[RouteState]) -> Optional[AddressMetadata]: for route_state in route_states: recipient_metadata = route_state.address_to_metadata.get(address, None) if (recipient_metadata is not None): return recipient_metadata return None
def _decode(inputpath, coder, show, device, output=None): decode_func = {CodecType.IMAGE_CODEC: decode_image, CodecType.VIDEO_CODEC: decode_video} compressai.set_entropy_coder(coder) dec_start = time.time() with Path(inputpath).open('rb') as f: (model, metric, quality) = parse_header(read_uchars(f, 2)) original_size = read_uints(f, 2) original_bitdepth = read_uchars(f, 1)[0] start = time.time() model_info = models[model] net = model_info(quality=quality, metric=metric, pretrained=True).to(device).eval() codec_type = (CodecType.IMAGE_CODEC if (model in image_models) else CodecType.VIDEO_CODEC) load_time = (time.time() - start) print(f'Model: {model:s}, metric: {metric:s}, quality: {quality:d}') stream_info = CodecInfo(None, original_size, original_bitdepth, net, device) out = decode_func[codec_type](f, stream_info, output) dec_time = (time.time() - dec_start) print(f'Decoded in {dec_time:.2f}s (model loading: {load_time:.2f}s)') if show: show_image(out['img'])
def get_ms(): try: client = docker.DockerClient(base_url=('tcp://%s:2376' % docker_host_ip)) for ms in client.containers.list(): if (ms.name == sys.argv[1]): cms.append(ms) return cms[0] except: print("Can't connect to docker API, Exiting!") print(traceback.format_exc()) sys.exit(1)
class FocalLoss(nn.Module): def __init__(self, num_classes, w, epsilon=0.1, use_gpu=True, label_smooth=True, gamma=0.5): super(FocalLoss, self).__init__() self.num_classes = num_classes self.epsilon = (epsilon if label_smooth else 0) self.use_gpu = use_gpu self.sigmoid = nn.Sigmoid() self.gamma = gamma self.w = w def forward(self, inputs, targets): if self.use_gpu: targets = targets.cuda() w = torch.tensor(self.w).float().cuda() else: w = torch.tensor(self.w).float().cpu() targets = (((1 - self.epsilon) * targets) + (self.epsilon / self.num_classes)) inputs_pos = inputs.clamp(min=0) inputs_neg = inputs.clamp(max=0) sigmoid_pos = ((self.gamma * inputs_neg) - (self.gamma * (1 + (- inputs.abs()).exp()).log())).exp() sigmoid_neg = (((- self.gamma) * inputs_pos) - (self.gamma * (1 + (- inputs.abs()).exp()).log())).exp() first_pos = (((- sigmoid_pos) * inputs_pos) * (1 - targets)) first_neg = ((sigmoid_neg * inputs_neg) * targets) loss = (- (((first_pos + first_neg) - ((sigmoid_neg * (1 + (- inputs.abs()).exp()).log()) * targets)) - ((sigmoid_pos * (1 + (- inputs.abs()).exp()).log()) * (1 - targets)))) return (w * loss.mean(0)).sum()
_fixtures(SqlAlchemyFixture, DeferredActionFixture) def test_deferred_action_completes_with_shared_requirements(sql_alchemy_fixture, deferred_action_fixture): fixture = deferred_action_fixture with sql_alchemy_fixture.persistent_test_classes(fixture.MyDeferredAction, fixture.SomeObject): requirements1 = [Requirement()] requirements2 = [Requirement(), Requirement()] deferred_action1 = fixture.MyDeferredAction(fixture.one_object, requirements=requirements2, deadline=fixture.future_time) Session.add(deferred_action1) deferred_action2 = fixture.MyDeferredAction(fixture.another_object, requirements=(requirements1 + requirements2), deadline=fixture.future_time) Session.add(deferred_action2) Session.flush() for requirement in requirements2: assert (set(requirement.deferred_actions) == {deferred_action1, deferred_action2}) for requirement in requirements1: assert (set(requirement.deferred_actions) == {deferred_action2}) for requirement in requirements2: requirement.set_fulfilled() assert fixture.one_object.done_flag assert (not fixture.another_object.done_flag) for requirement in (requirements1 + requirements2): assert (set(requirement.deferred_actions) == {deferred_action2}) assert (Session.query(Requirement).count() == 3) assert (Session.query(DeferredAction).count() == 1) for requirement in requirements1: requirement.set_fulfilled() assert fixture.one_object.done_flag assert fixture.another_object.done_flag assert (Session.query(Requirement).count() == 0) assert (Session.query(DeferredAction).count() == 0)
def test_Array_dc_ohms_from_percent(mocker): mocker.spy(pvsystem, 'dc_ohms_from_percent') expected = 0.1425 array = pvsystem.Array(pvsystem.FixedMount(0, 180), array_losses_parameters={'dc_ohmic_percent': 3}, module_parameters={'I_mp_ref': 8, 'V_mp_ref': 38}) out = array.dc_ohms_from_percent() pvsystem.dc_ohms_from_percent.assert_called_with(dc_ohmic_percent=3, vmp_ref=38, imp_ref=8, modules_per_string=1, strings=1) assert_allclose(out, expected) array = pvsystem.Array(pvsystem.FixedMount(0, 180), array_losses_parameters={'dc_ohmic_percent': 3}, module_parameters={'Impo': 8, 'Vmpo': 38}) out = array.dc_ohms_from_percent() pvsystem.dc_ohms_from_percent.assert_called_with(dc_ohmic_percent=3, vmp_ref=38, imp_ref=8, modules_per_string=1, strings=1) assert_allclose(out, expected) array = pvsystem.Array(pvsystem.FixedMount(0, 180), array_losses_parameters={'dc_ohmic_percent': 3}, module_parameters={'Impp': 8, 'Vmpp': 38}) out = array.dc_ohms_from_percent() pvsystem.dc_ohms_from_percent.assert_called_with(dc_ohmic_percent=3, vmp_ref=38, imp_ref=8, modules_per_string=1, strings=1) assert_allclose(out, expected) with pytest.raises(ValueError, match='Parameters for Vmp and Imp could not be found in the array module parameters. Module parameters must include one set of {"V_mp_ref", "I_mp_Ref"}, {"Vmpo", "Impo"}, or {"Vmpp", "Impp"}.'): array = pvsystem.Array(pvsystem.FixedMount(0, 180), array_losses_parameters={'dc_ohmic_percent': 3}) out = array.dc_ohms_from_percent()
class DataTrainingArguments(): dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'}) dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) data_dir: Optional[str] = field(default=None, metadata={'help': 'The data directory containing input files.'}) image_column: Optional[str] = field(default='image_path', metadata={'help': 'The name of the column in the datasets containing the full image file paths.'}) caption_column: Optional[str] = field(default='caption', metadata={'help': 'The name of the column in the datasets containing the image captions.'}) train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a jsonlines file).'}) validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file (a jsonlines file).'}) test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input testing data file (a jsonlines file).'}) max_seq_length: Optional[int] = field(default=128, metadata={'help': 'The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded.'}) max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'}) max_eval_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of evaluation examples to this value if set.'}) overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'}) preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'}) def __post_init__(self): if ((self.dataset_name is None) and (self.train_file is None) and (self.validation_file is None)): raise ValueError('Need either a dataset name or a training/validation file.') else: if (self.train_file is not None): extension = self.train_file.split('.')[(- 1)] assert (extension in ['csv', 'json']), '`train_file` should be a csv or a json file.' if (self.validation_file is not None): extension = self.validation_file.split('.')[(- 1)] assert (extension in ['csv', 'json']), '`validation_file` should be a csv or a json file.' if (self.validation_file is not None): extension = self.validation_file.split('.')[(- 1)] assert (extension == 'json'), '`validation_file` should be a json file.'
def create_table(table: str, namespace: Optional[str]=None, lifecycle_state: Optional[LifecycleState]=None, schema: Optional[Union[(pa.Schema, str, bytes)]]=None, schema_consistency: Optional[Dict[(str, SchemaConsistencyType)]]=None, partition_keys: Optional[List[Dict[(str, Any)]]]=None, primary_keys: Optional[Set[str]]=None, sort_keys: Optional[List[SortKey]]=None, description: Optional[str]=None, properties: Optional[Dict[(str, str)]]=None, permissions: Optional[Dict[(str, Any)]]=None, content_types: Optional[List[ContentType]]=None, replace_existing_table: bool=False, *args, **kwargs) -> TableDefinition: raise NotImplementedError('create_table not implemented')
class PreviewForm(QDialog): def __init__(self, parent): super(PreviewForm, self).__init__(parent) self.encodingComboBox = QComboBox() encodingLabel = QLabel('&Encoding:') encodingLabel.setBuddy(self.encodingComboBox) self.textEdit = QTextEdit() self.textEdit.setLineWrapMode(QTextEdit.NoWrap) self.textEdit.setReadOnly(True) buttonBox = QDialogButtonBox((QDialogButtonBox.Ok | QDialogButtonBox.Cancel)) self.encodingComboBox.activated.connect(self.updateTextEdit) buttonBox.accepted.connect(self.accept) buttonBox.rejected.connect(self.reject) mainLayout = QGridLayout() mainLayout.addWidget(encodingLabel, 0, 0) mainLayout.addWidget(self.encodingComboBox, 0, 1) mainLayout.addWidget(self.textEdit, 1, 0, 1, 2) mainLayout.addWidget(buttonBox, 2, 0, 1, 2) self.setLayout(mainLayout) self.setWindowTitle('Choose Encoding') self.resize(400, 300) def setCodecList(self, codecs): self.encodingComboBox.clear() for codec in codecs: self.encodingComboBox.addItem(codec_name(codec), codec.mibEnum()) def setEncodedData(self, data): self.encodedData = data self.updateTextEdit() def decodedString(self): return self.decodedStr def updateTextEdit(self): mib = self.encodingComboBox.itemData(self.encodingComboBox.currentIndex()) codec = QTextCodec.codecForMib(mib) data = QTextStream(self.encodedData) data.setAutoDetectUnicode(False) data.setCodec(codec) self.decodedStr = data.readAll() self.textEdit.setPlainText(self.decodedStr)
def test_huge_dataset(): candidates = CompletedKeys((1024 * 1024)) start_time = datetime.now() iterations = 0 with pytest.raises(NoAvailableKeysError): while ((datetime.now() - start_time) < timedelta(seconds=10)): start = candidates.get_block_start_index(1024) assert candidates.is_available(start) candidates.mark_completed(start, (start + random.randint(512, 1024))) iterations += 1 assert (iterations > 1024) assert (candidates.num_remaining == 0)
def make_rotating_equity_info(num_assets, first_start, frequency, periods_between_starts, asset_lifetime, exchange='TEST'): return pd.DataFrame({'symbol': [chr((ord('A') + i)) for i in range(num_assets)], 'start_date': pd.date_range(first_start, freq=(periods_between_starts * frequency), periods=num_assets), 'end_date': pd.date_range((first_start + (asset_lifetime * frequency)), freq=(periods_between_starts * frequency), periods=num_assets), 'exchange': exchange}, index=range(num_assets))
class Effect11947(BaseEffect): type = ('projected', 'passive') def handler(fit, beacon, context, projectionRange, **kwargs): fit.modules.filteredItemMultiply((lambda mod: mod.item.requiresSkill('Vorton Projector Operation')), 'aoeCloudSize', beacon.getModifiedItemAttr('aoeCloudSizeMultiplier'), stackingPenalties=True, penaltyGroup='postMul', **kwargs)
class ProxySimple(ProxyDirect): def __init__(self, jump, protos, cipher, users, rule, bind, host_name, port, unix, lbind, sslclient, sslserver): super().__init__(lbind) self.protos = protos self.cipher = cipher self.users = users self.rule = (compile_rule(rule) if rule else None) self.bind = bind self.host_name = host_name self.port = port self.unix = unix self.sslclient = sslclient self.sslserver = sslserver self.jump = jump def logtext(self, host, port): return (f" -> {(self.rproto.name + ('+ssl' if self.sslclient else ''))} {self.bind}" + self.jump.logtext(host, port)) def match_rule(self, host, port): return ((self.rule is None) or self.rule(host) or self.rule(str(port))) def rproto(self): return self.protos[0] def auth(self): return (self.users[0] if self.users else b'') def udp_packet_unpack(self, data): data = (self.cipher.datagram.decrypt(data) if self.cipher else data) return self.jump.udp_packet_unpack(self.rproto.udp_unpack(data)) def destination(self, host, port): return (self.host_name, self.port) def udp_prepare_connection(self, host, port, data): data = self.jump.udp_prepare_connection(host, port, data) (whost, wport) = self.jump.destination(host, port) data = self.rproto.udp_connect(rauth=self.auth, host_name=whost, port=wport, data=data) if self.cipher: data = self.cipher.datagram.encrypt(data) return data def udp_start_server(self, args): class Protocol(asyncio.DatagramProtocol): def connection_made(prot, transport): prot.transport = transport def datagram_received(prot, data, addr): asyncio.ensure_future(datagram_handler(prot.transport, data, addr, **vars(self), **args)) return asyncio.get_event_loop().create_datagram_endpoint(Protocol, local_addr=(self.host_name, self.port)) def wait_open_connection(self, host, port, local_addr, family): if self.unix: return asyncio.open_unix_connection(path=self.bind) else: return asyncio.open_connection(host=self.host_name, port=self.port, local_addr=local_addr, family=family) async def prepare_connection(self, reader_remote, writer_remote, host, port): (reader_remote, writer_remote) = proto.sslwrap(reader_remote, writer_remote, self.sslclient, False, self.host_name) (_, writer_cipher_r) = (await prepare_ciphers(self.cipher, reader_remote, writer_remote, self.bind)) (whost, wport) = self.jump.destination(host, port) (await self.rproto.connect(reader_remote=reader_remote, writer_remote=writer_remote, rauth=self.auth, host_name=whost, port=wport, writer_cipher_r=writer_cipher_r, myhost=self.host_name, sock=writer_remote.get_extra_info('socket'))) return (await self.jump.prepare_connection(reader_remote, writer_remote, host, port)) def start_server(self, args, stream_handler=stream_handler): handler = functools.partial(stream_handler, **vars(self), **args) if self.unix: return asyncio.start_unix_server(handler, path=self.bind) else: return asyncio.start_server(handler, host=self.host_name, port=self.port, reuse_port=args.get('ruport'))
def test_load_backoff_callable_bare(): with pytest.raises(ValueError) as err: backoffcache.load_backoff_callable('local_test_arb_callable') assert (str(err.value) == "Trying to find back-off strategy 'local_test_arb_callable'. If this is a built-in back-off strategy, are you sure you got the name right?\nIf you're trying to load a custom callable, name should be in format 'package.module.ClassName' or 'mod.ClassName'.")
.skipif(kvikio.defaults.compat_mode(), reason='cannot test `set_compat_mode` when already running in compatibility mode') def test_set_compat_mode_between_io(tmp_path): with kvikio.defaults.set_compat_mode(False): f = kvikio.CuFile((tmp_path / 'test-file'), 'w') assert (not f.closed) assert ((f.open_flags() & os.O_WRONLY) != 0) with kvikio.defaults.set_compat_mode(True): a = cupy.arange(10) assert (f.write(a) == a.nbytes)
(ORDERS_PATH) def handle_create_order() -> dict[(str, Any)]: env_vars: MyHandlerEnvVars = get_environment_variables(model=MyHandlerEnvVars) logger.debug('environment variables', env_vars=env_vars.model_dump()) my_configuration = parse_configuration(model=MyConfiguration) logger.debug('fetched dynamic configuration', configuration=my_configuration.model_dump()) create_input: CreateOrderRequest = parse(event=app.current_event.raw_event, model=CreateOrderRequest, envelope=ApiGatewayEnvelope) logger.info('got create order request', order=create_input.model_dump()) metrics.add_metric(name='ValidCreateOrderEvents', unit=MetricUnit.Count, value=1) response: CreateOrderOutput = create_order(order_request=create_input, table_name=env_vars.TABLE_NAME, context=app.lambda_context) logger.info('finished handling create order request') return response.model_dump()
class Instrument(): def __init__(self, ip_addr: str, timeout: Optional[float]=None, port: int=PORT, sub_address: str='hislip0') -> None: timeout = (timeout or 5.0) self._sync = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sync.connect((ip_addr, port)) self._sync.settimeout(timeout) self._sync.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) init = self.initialize(sub_address=sub_address.encode('ascii')) if (init.overlap != 0): print(('**** prefer overlap = %d' % init.overlap)) self._async = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._async.connect((ip_addr, port)) self._async.settimeout(timeout) self._async.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self._async_init = self.async_initialize(session_id=init.session_id) self.max_msg_size = DEFAULT_MAX_MSG_SIZE self.keepalive = False self.timeout = timeout self._rmt = 0 self._message_id = self._last_message_id: Optional[int] = None self._msg_type: str = '' self._payload_remaining: int = 0 def close(self) -> None: self._sync.close() self._async.close() def timeout(self) -> float: return self._timeout def timeout(self, val: float) -> None: self._timeout = val self._sync.settimeout(self._timeout) self._async.settimeout(self._timeout) def max_msg_size(self) -> int: return self._max_msg_size _msg_size.setter def max_msg_size(self, size: int) -> None: self._max_msg_size = self.async_maximum_message_size(size) def last_message_id(self) -> Optional[int]: return self._last_message_id _message_id.setter def last_message_id(self, message_id: Optional[int]) -> None: self._last_message_id = message_id self._rmt = 0 self._payload_remaining = 0 self._msg_type = '' def keepalive(self) -> bool: return self._keepalive def keepalive(self, keepalive: bool) -> None: self._keepalive = bool(keepalive) self._sync.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, bool(keepalive)) self._async.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, bool(keepalive)) def send(self, data: bytes) -> int: data_view = memoryview(data) num_bytes_to_send = len(data) max_payload_size = (self._max_msg_size - HEADER_SIZE) while (num_bytes_to_send > 0): if (num_bytes_to_send <= max_payload_size): assert (len(data_view) == num_bytes_to_send) self._send_data_end_packet(data_view) bytes_sent = num_bytes_to_send else: self._send_data_packet(data_view[:max_payload_size]) bytes_sent = max_payload_size data_view = data_view[bytes_sent:] num_bytes_to_send -= bytes_sent return len(data) def receive(self, max_len: int=4096) -> bytes: recv_buffer = bytearray(max_len) view = memoryview(recv_buffer) bytes_recvd = 0 while (bytes_recvd < max_len): if (self._payload_remaining <= 0): if (self._msg_type == 'DataEnd'): recv_buffer = recv_buffer[:bytes_recvd] break (self._msg_type, self._payload_remaining) = self._next_data_header() request_size = min(self._payload_remaining, (max_len - bytes_recvd)) receive_exact_into(self._sync, view[:request_size]) self._payload_remaining -= request_size bytes_recvd += request_size view = view[request_size:] if (bytes_recvd > max_len): raise MemoryError('scribbled past end of recv_buffer') if ((self._payload_remaining == 0) and (self._msg_type == 'DataEnd')): self._rmt = 1 return recv_buffer def _next_data_header(self) -> Tuple[(str, int)]: while True: header = RxHeader(self._sync) if (header.msg_type in ('Data', 'DataEnd')): if ((header.message_parameter == ) or (header.message_parameter == self.last_message_id)): break receive_flush(self._sync, header.payload_length) return (header.msg_type, header.payload_length) def device_clear(self) -> None: feature = self.async_device_clear() time.sleep(0.1) self.device_clear_complete(feature) self._message_id = def initialize(self, version: tuple=(1, 0), vendor_id: bytes=b'xx', sub_address: bytes=b'hislip0') -> InitializeResponse: (major, minor) = version header = struct.pack('!2sBBBB2sQ', b'HS', MESSAGETYPE['Initialize'], 0, major, minor, vendor_id, len(sub_address)) self._sync.sendall((header + sub_address)) return InitializeResponse(self._sync) def async_initialize(self, session_id: int) -> AsyncInitializeResponse: send_msg(self._async, 'AsyncInitialize', 0, session_id) return AsyncInitializeResponse(self._async) def async_maximum_message_size(self, size: int) -> int: payload = struct.pack('!Q', size) send_msg(self._async, 'AsyncMaxMsgSize', 0, 0, payload) response = AsyncMaxMsgSizeResponse(self._async) return response.max_msg_size def async_lock_info(self) -> int: send_msg(self._async, 'AsyncLockInfo', 0, 0) response = AsyncLockInfoResponse(self._async) return response.exclusive_lock def async_lock_request(self, timeout: float, lock_string: str='') -> str: ctrl_code = LOCKCONTROLCODE['request'] timeout_ms = int((1000.0 * timeout)) send_msg(self._async, 'AsyncLock', ctrl_code, timeout_ms, lock_string.encode()) response = AsyncLockResponse(self._async) return response.lock_response def async_lock_release(self) -> str: ctrl_code = LOCKCONTROLCODE['release'] send_msg(self._async, 'AsyncLock', ctrl_code, self.last_message_id) response = AsyncLockResponse(self._async) return response.lock_response def async_remote_local_control(self, remotelocalcontrol: str) -> None: ctrl_code = REMOTELOCALCONTROLCODE[remotelocalcontrol] send_msg(self._async, 'AsyncRemoteLocalControl', ctrl_code, self.last_message_id) AsyncRemoteLocalResponse(self._async) def async_status_query(self) -> int: send_msg(self._async, 'AsyncStatusQuery', self._rmt, self._message_id) self._rmt = 0 response = AsyncStatusResponse(self._async) return response.server_status def async_device_clear(self) -> int: send_msg(self._async, 'AsyncDeviceClear', 0, 0) response = AsyncDeviceClearAcknowledge(self._async) return response.feature_bitmap def device_clear_complete(self, feature_bitmap: int) -> int: send_msg(self._sync, 'DeviceClearComplete', feature_bitmap, 0) response = DeviceClearAcknowledge(self._sync) return response.feature_bitmap def trigger(self) -> None: send_msg(self._sync, 'Trigger', self._rmt, self._message_id) self.last_message_id = self._message_id self._message_id = ((self._message_id + 2) & ) def _send_data_packet(self, payload: bytes) -> None: send_msg(self._sync, 'Data', self._rmt, self._message_id, payload) self.last_message_id = self._message_id self._message_id = ((self._message_id + 2) & ) def _send_data_end_packet(self, payload: bytes) -> None: send_msg(self._sync, 'DataEnd', self._rmt, self._message_id, payload) self.last_message_id = self._message_id self._message_id = ((self._message_id + 2) & ) def fatal_error(self, error: str, error_message: str='') -> None: err_msg = (error_message or error).encode() send_msg(self._sync, 'FatalError', FATALERRORCODE[error], 0, err_msg) def error(self, error: str, error_message: str='') -> None: err_msg = (error_message or error).encode() send_msg(self._sync, 'Error', ERRORCODE[error], 0, err_msg)
.remote_data .flaky(reruns=RERUNS, reruns_delay=RERUNS_DELAY) def test_get_acis_station_data(): (df, meta) = get_acis_station_data('ORD', '2020-01-10', '2020-01-12', trace_val=(- 99)) expected = pd.DataFrame([[10.0, 2.0, 6.0, np.nan, 21.34, 0.0, 0.0, 0.0, 59.0, 0.0], [3.0, (- 4.0), (- 0.5), np.nan, 9.4, 5.3, 0.0, 0.0, 65.0, 0.0], [(- 1.0), (- 5.0), (- 3.0), np.nan, (- 99), (- 99), 5.0, 0.0, 68.0, 0.0]], columns=['temp_air_max', 'temp_air_min', 'temp_air_average', 'temp_air_observation', 'precipitation', 'snowfall', 'snowdepth', 'cooling_degree_days', 'heating_degree_days', 'growing_degree_days'], index=pd.to_datetime(['2020-01-10', '2020-01-11', '2020-01-12'])) assert_frame_equal(df, expected) expected_meta = {'uid': 48, 'state': 'IL', 'name': 'CHICAGO OHARE INTL AP', 'altitude': 204.8256, 'latitude': 41.96017, 'longitude': (- 87.93164)} expected_meta = {'valid_daterange': [['1958-11-01', '2023-06-15'], ['1958-11-01', '2023-06-15'], ['1958-11-01', '2023-06-15'], [], ['1958-11-01', '2023-06-15'], ['1958-11-01', '2023-06-15'], ['1958-11-01', '2023-06-15'], ['1958-11-01', '2023-06-15'], ['1958-11-01', '2023-06-15'], ['1958-11-01', '2023-06-15']], 'name': 'CHICAGO OHARE INTL AP', 'sids': ['94846 1', '111549 2', 'ORD 3', '72530 4', 'KORD 5', 'USW 6', 'ORD 7', 'USW 32'], 'county': '17031', 'state': 'IL', 'climdiv': 'IL02', 'uid': 48, 'tzo': (- 6.0), 'sid_dates': [['94846 1', '1989-01-19', '9999-12-31'], ['94846 1', '1958-10-30', '1989-01-01'], ['111549 2', '1989-01-19', '9999-12-31'], ['111549 2', '1958-10-30', '1989-01-01'], ['ORD 3', '1989-01-19', '9999-12-31'], ['ORD 3', '1958-10-30', '1989-01-01'], ['72530 4', '1989-01-19', '9999-12-31'], ['72530 4', '1958-10-30', '1989-01-01'], ['KORD 5', '1989-01-19', '9999-12-31'], ['KORD 5', '1958-10-30', '1989-01-01'], ['USW 6', '1989-01-19', '9999-12-31'], ['USW 6', '1958-10-30', '1989-01-01'], ['ORD 7', '1989-01-19', '9999-12-31'], ['ORD 7', '1958-10-30', '1989-01-01'], ['USW 32', '1989-01-19', '9999-12-31'], ['USW 32', '1958-10-30', '1989-01-01']], 'altitude': 204.8256, 'longitude': (- 87.93164), 'latitude': 41.96017} meta.pop('valid_daterange') expected_meta.pop('valid_daterange') assert (meta == expected_meta) (df, meta) = get_acis_station_data('ORD', '2020-01-10', '2020-01-12', trace_val=(- 99), map_variables=False) expected.columns = ['maxt', 'mint', 'avgt', 'obst', 'pcpn', 'snow', 'snwd', 'cdd', 'hdd', 'gdd'] assert_frame_equal(df, expected) expected_meta['lat'] = expected_meta.pop('latitude') expected_meta['lon'] = expected_meta.pop('longitude') expected_meta['elev'] = expected_meta.pop('altitude') meta.pop('valid_daterange') assert (meta == expected_meta)
def makefile(filepath, mode=448, size=None, exist_ok=False): (dirname, _) = os.path.split(filepath) makedirs(dirname, mode, exist_ok=True) try: mkfile(filepath, size) except OSError as exc: if ((not os.path.isfile(filepath)) or (not exist_ok)): raise OSError(exc)
def un_serialize(folder): txt_lst = [] msg = '' folder_lst = os.listdir(folder) for item in folder_lst: _file = os.path.join(folder, item) if os.path.isfile(_file): if _file.endswith('.txt'): txt_lst.append(_file) record_file = get_minimum_file(txt_lst) if (not record_file): msg = ' txt ' return (False, msg) record = {} try: with open(record_file, 'rb') as f_handle: _record = load(f_handle) if isinstance(_record, dict): record = _record except Exception as e: pass if (not record): msg = f'{record_file} : ' return (False, msg) else: files_info = {} not_complete = False for _file in record['parts']: full_path = os.path.join(folder, _file) if (not os.path.isfile(full_path)): not_complete = True files_info[_file] = False if not_complete: msg = '' return (False, msg) merged_file_name = os.path.join(folder, record['name']) with open(merged_file_name, 'ab') as merge_f: for _file in record['parts']: part_file_name = os.path.join(folder, _file) with open(part_file_name, 'rb') as f: for data in f: merge_f.write(data) if (os.path.getsize(merged_file_name) == record['size']): for _file in record['parts']: part_file_name = os.path.join(folder, _file) os.remove(part_file_name) os.remove(record_file) return (True, '') else: msg = '' return (False, msg)
class Question(): def __init__(self, data: QuestionData): self._data = data self._guesses: dict[(int, UserGuess)] = {} self._started = None def number(self) -> str: return self._data['number'] def description(self) -> str: return self._data['description'] def answers(self) -> list[tuple[(str, str)]]: return [(ascii_uppercase[i], q) for (i, q) in enumerate(self._data['answers'])] def correct(self) -> str: return self._data['correct'] def max_points(self) -> int: return (self._data.get('points') or DEFAULT_QUESTION_POINTS) def time(self) -> float: return (self._data.get('time') or DEFAULT_QUESTION_TIME) def start(self) -> float: self._started = time.perf_counter() return self._started def _update_guess(self, user: int, answer: str) -> UserGuess: if (self._started is None): raise QuestionClosedError('Question is not open for answers.') if (self._guesses[user][1] is False): raise AlreadyUpdatedError(f'User({user}) has already updated their guess once.') self._guesses[user] = (answer, False, (time.perf_counter() - self._started)) return self._guesses[user] def guess(self, user: int, answer: str) -> UserGuess: if (user in self._guesses): return self._update_guess(user, answer) if (self._started is None): raise QuestionClosedError('Question is not open for answers.') self._guesses[user] = (answer, True, (time.perf_counter() - self._started)) return self._guesses[user] def stop(self) -> dict[(int, UserGuess)]: guesses = self._guesses self._started = None self._guesses = {} return guesses
def build_encoder(cfg, default_args=None): backbone = build_from_cfg(cfg['backbone'], BACKBONES, default_args) enhance_cfg = cfg.get('enhance') if enhance_cfg: enhance_module = build_from_cfg(enhance_cfg, ENHANCE_MODULES, default_args) encoder = nn.Sequential(backbone, enhance_module) else: encoder = backbone return encoder
class LimitTest(ConfigurableNodeTest, TestCase): def setUpClass(cls): cls.NodeType = bonobo.Limit def test_execution_default(self): object_list = [object() for _ in range(42)] with self.execute() as context: context.write_sync(*object_list) assert (context.get_buffer() == list(map(ensure_tuple, object_list[:10]))) def test_execution_custom(self): object_list = [object() for _ in range(42)] with self.execute(21) as context: context.write_sync(*object_list) assert (context.get_buffer() == list(map(ensure_tuple, object_list[:21]))) def test_manual(self): limit = self.NodeType(5) buffer = [] for x in range(10): buffer += list(limit(x)) assert (len(buffer) == 5) def test_underflow(self): limit = self.NodeType(10) buffer = [] for x in range(5): buffer += list(limit(x)) assert (len(buffer) == 5)
class JpegXr(Codec): codec_id = 'imagecodecs_jpegxr' def __init__(self, level=None, photometric=None, hasalpha=None, resolution=None, fp2int=None): self.level = level self.photometric = photometric self.hasalpha = hasalpha self.resolution = resolution self.fp2int = fp2int def encode(self, buf): buf = protective_squeeze(numpy.asarray(buf)) return imagecodecs.jpegxr_encode(buf, level=self.level, photometric=self.photometric, hasalpha=self.hasalpha, resolution=self.resolution) def decode(self, buf, out=None): return imagecodecs.jpegxr_decode(buf, fp2int=self.fp2int, out=out)
class RatingOutcomeModelGenerator(keras.utils.Sequence): def __init__(self, data_root, phase, batch_size, use_feature=True, use_exposure=True, shuffle=True): assert (phase in ['train', 'val', 'test']) self.phase = phase self.batch_size = batch_size self.use_feature = use_feature self.use_exposure = use_exposure self.__load_data(data_root) self.shuffle = shuffle if self.shuffle: self.on_epoch_end() def __load_data(self, data_root): exposure_root = os.path.join(data_root, 'exposure') self.subs_conf = np.load(os.path.join(exposure_root, '{}_subs_conf.npy'.format(self.phase))) self.conf_dim = self.subs_conf.shape[(- 1)] (names, dims) = (['subs_conf'], [self.conf_dim]) if self.use_exposure: self.exposure = sparse.load_npz(os.path.join(data_root, '{}_obs_rat.npz'.format(self.phase))).A self.exposure = (self.exposure > 0).astype(np.int32) self.exposure_dim = self.exposure.shape[(- 1)] names.append('exposure') dims.append(self.exposure_dim) if self.use_feature: self.features = np.load(os.path.join(data_root, '{}_feat.npy'.format(self.phase))) self.feature_dim = self.features.shape[(- 1)] names.append('features') dims.append(self.feature_dim) if (self.phase == 'train'): self.ratings = sparse.load_npz(os.path.join(data_root, 'train_obs_rat.npz')).A elif (self.phase == 'val'): self.ratings = sparse.load_npz(os.path.join(data_root, 'val_hdt_rat.npz')).A else: self.ratings = np.load(os.path.join(data_root, 'test_unk_rat.npy')) (self.num_users, self.num_items) = self.ratings.shape self.indexes = np.arange(self.num_users) names.append('ratings') dims.append(self.num_items) self._name_dim_dict = dict(zip(names, dims)) def on_epoch_end(self): if self.shuffle: np.random.shuffle(self.indexes) def __len__(self): batch_num = (self.num_users // self.batch_size) if ((self.num_users % self.batch_size) != 0): batch_num += 1 return batch_num def __getitem__(self, i): batch_idxes = self.indexes[(i * self.batch_size):((i + 1) * self.batch_size)] batch_X = [self.subs_conf[batch_idxes]] if self.use_exposure: batch_X += [self.exposure[batch_idxes]] if self.use_feature: batch_X += [self.features[batch_idxes]] batch_Y = self.ratings[batch_idxes] return (batch_X, batch_Y) def name_dim_dict(self): return self._name_dim_dict
.ddblocal def test_transaction_write_with_version_attribute_condition_failure(connection): foo = Foo(21) foo.save() foo2 = Foo(21) with pytest.raises(TransactWriteError) as exc_info: with TransactWrite(connection=connection) as transaction: transaction.save(Foo(21)) assert (exc_info.value.cause_response_code == TRANSACTION_CANCELLED) assert (len(exc_info.value.cancellation_reasons) == 1) assert (exc_info.value.cancellation_reasons[0].code == 'ConditionalCheckFailed') assert isinstance(exc_info.value.cause, botocore.exceptions.ClientError) assert (Foo.Meta.table_name in exc_info.value.cause.MSG_TEMPLATE) with pytest.raises(TransactWriteError) as exc_info: with TransactWrite(connection=connection) as transaction: transaction.update(foo2, actions=[Foo.star.set('birdistheword')]) assert (exc_info.value.cause_response_code == TRANSACTION_CANCELLED) assert (len(exc_info.value.cancellation_reasons) == 1) assert (exc_info.value.cancellation_reasons[0].code == 'ConditionalCheckFailed') assert (Foo.Meta.table_name in exc_info.value.cause.MSG_TEMPLATE) assert (foo2.version is None) with pytest.raises(TransactWriteError) as exc_info: with TransactWrite(connection=connection) as transaction: transaction.delete(foo2) assert (exc_info.value.cause_response_code == TRANSACTION_CANCELLED) assert (len(exc_info.value.cancellation_reasons) == 1) assert (exc_info.value.cancellation_reasons[0].code == 'ConditionalCheckFailed') assert (Foo.Meta.table_name in exc_info.value.cause.MSG_TEMPLATE)
_db def test_cannot_propose_a_talk_as_unlogged_user(graphql_client, conference_factory): conference = conference_factory(topics=('my-topic',), languages=('it',), submission_types=('talk',), durations=('50',), audience_levels=('Beginner',)) (resp, _) = _submit_talk(graphql_client, conference) assert (resp['errors'][0]['message'] == 'User not logged in')
def test_get_rect_from_points_given_topright_bottomleft(): rect = utils.get_rect_from_points(QtCore.QPointF(50, (- 20)), QtCore.QPointF((- 30), 40)) assert (rect.topLeft().x() == (- 30)) assert (rect.topLeft().y() == (- 20)) assert (rect.bottomRight().x() == 50) assert (rect.bottomRight().y() == 40)
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None): end_points = {} def add_and_check_final(name, net): end_points[name] = net return (name == final_endpoint) with tf.variable_scope(scope, 'InceptionV4', [inputs]): with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, padding='SAME'): net = slim.conv2d(inputs, 32, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') if add_and_check_final('Conv2d_1a_3x3', net): return (net, end_points) net = slim.conv2d(net, 32, [3, 3], padding='VALID', scope='Conv2d_2a_3x3') if add_and_check_final('Conv2d_2a_3x3', net): return (net, end_points) net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3') if add_and_check_final('Conv2d_2b_3x3', net): return (net, end_points) with tf.variable_scope('Mixed_3a'): with tf.variable_scope('Branch_0'): branch_0 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_0a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, 96, [3, 3], stride=2, padding='VALID', scope='Conv2d_0a_3x3') net = tf.concat(axis=3, values=[branch_0, branch_1]) if add_and_check_final('Mixed_3a', net): return (net, end_points) with tf.variable_scope('Mixed_4a'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') branch_0 = slim.conv2d(branch_0, 96, [3, 3], padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.conv2d(net, 64, [1, 1], scope='Conv2d_0a_1x1') branch_1 = slim.conv2d(branch_1, 64, [1, 7], scope='Conv2d_0b_1x7') branch_1 = slim.conv2d(branch_1, 64, [7, 1], scope='Conv2d_0c_7x1') branch_1 = slim.conv2d(branch_1, 96, [3, 3], padding='VALID', scope='Conv2d_1a_3x3') net = tf.concat(axis=3, values=[branch_0, branch_1]) if add_and_check_final('Mixed_4a', net): return (net, end_points) with tf.variable_scope('Mixed_5a'): with tf.variable_scope('Branch_0'): branch_0 = slim.conv2d(net, 192, [3, 3], stride=2, padding='VALID', scope='Conv2d_1a_3x3') with tf.variable_scope('Branch_1'): branch_1 = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='MaxPool_1a_3x3') net = tf.concat(axis=3, values=[branch_0, branch_1]) if add_and_check_final('Mixed_5a', net): return (net, end_points) for idx in range(4): block_scope = ('Mixed_5' + chr((ord('b') + idx))) net = block_inception_a(net, block_scope) if add_and_check_final(block_scope, net): return (net, end_points) net = block_reduction_a(net, 'Mixed_6a') if add_and_check_final('Mixed_6a', net): return (net, end_points) for idx in range(7): block_scope = ('Mixed_6' + chr((ord('b') + idx))) net = block_inception_b(net, block_scope) if add_and_check_final(block_scope, net): return (net, end_points) net = block_reduction_b(net, 'Mixed_7a') if add_and_check_final('Mixed_7a', net): return (net, end_points) for idx in range(3): block_scope = ('Mixed_7' + chr((ord('b') + idx))) net = block_inception_c(net, block_scope) if add_and_check_final(block_scope, net): return (net, end_points) raise ValueError(('Unknown final endpoint %s' % final_endpoint))
def correlated_to_datum_inner(e): if isinstance(e, W_Correlated): return correlated_to_datum_inner(e.get_obj()) elif isinstance(e, W_List): a = correlated_to_datum_inner(e.car()) d = correlated_to_datum_inner(e.cdr()) if ((a is e.car()) and (d is e.cdr())): return e else: return W_Cons.make(a, d) else: return e
('document.paragraphs is a list containing three paragraphs') def then_document_paragraphs_is_a_list_containing_three_paragraphs(context): document = context.document paragraphs = document.paragraphs assert isinstance(paragraphs, list) assert (len(paragraphs) == 3) for paragraph in paragraphs: assert isinstance(paragraph, Paragraph)
class Line(): def __init__(self, v1, v2): self.a = (v2.y - v1.y) self.b = (v1.x - v2.x) self.c = v2.cross(v1) def __call__(self, p): return (((self.a * p.x) + (self.b * p.y)) + self.c) def intersection(self, other): if (not isinstance(other, Line)): return NotImplemented w = ((self.a * other.b) - (self.b * other.a)) return Vector((((self.b * other.c) - (self.c * other.b)) / w), (((self.c * other.a) - (self.a * other.c)) / w))
def test_lambert_cylindrical_equal_area_scale_operation__defaults(): lceaop = LambertCylindricalEqualAreaScaleConversion() assert (lceaop.name == 'unknown') assert (lceaop.method_name == 'Lambert Cylindrical Equal Area') assert (_to_dict(lceaop) == {'Latitude of 1st standard parallel': 0.0, 'Longitude of natural origin': 0.0, 'False easting': 0.0, 'False northing': 0.0})
class TestClique(QiskitOptimizationTestCase): def setUp(self): super().setUp() self.k = 5 self.seed = 100 aqua_globals.random_seed = self.seed self.num_nodes = 5 self.w = random_graph(self.num_nodes, edge_prob=0.8, weight_range=10) (self.qubit_op, self.offset) = clique.get_operator(self.w, self.k) def _brute_force(self): def bitfield(n, length): result = np.binary_repr(n, length) return [int(digit) for digit in result] nodes = self.num_nodes maximum = (2 ** nodes) has_sol = False for i in range(maximum): cur = bitfield(i, nodes) cur_v = clique.satisfy_or_not(np.array(cur), self.w, self.k) if cur_v: has_sol = True break return has_sol def test_clique(self): algo = NumPyMinimumEigensolver(self.qubit_op, aux_operators=[]) result = algo.run() x = sample_most_likely(result.eigenstate) ising_sol = clique.get_graph_solution(x) np.testing.assert_array_equal(ising_sol, [1, 1, 1, 1, 1]) oracle = self._brute_force() self.assertEqual(clique.satisfy_or_not(ising_sol, self.w, self.k), oracle) def test_clique_vqe(self): aqua_globals.random_seed = 10598 result = VQE(self.qubit_op, RealAmplitudes(reps=5, entanglement='linear'), COBYLA(), max_evals_grouped=2).run(QuantumInstance(BasicAer.get_backend('statevector_simulator'), seed_simulator=aqua_globals.random_seed, seed_transpiler=aqua_globals.random_seed)) x = sample_most_likely(result.eigenstate) ising_sol = clique.get_graph_solution(x) np.testing.assert_array_equal(ising_sol, [1, 1, 1, 1, 1]) oracle = self._brute_force() self.assertEqual(clique.satisfy_or_not(ising_sol, self.w, self.k), oracle)
class MarkGenerator(): if TYPE_CHECKING: skip: _SkipMarkDecorator skipif: _SkipifMarkDecorator xfail: _XfailMarkDecorator parametrize: _ParametrizeMarkDecorator usefixtures: _UsefixturesMarkDecorator filterwarnings: _FilterwarningsMarkDecorator def __init__(self, *, _ispytest: bool=False) -> None: check_ispytest(_ispytest) self._config: Optional[Config] = None self._markers: Set[str] = set() def __getattr__(self, name: str) -> MarkDecorator: if (name[0] == '_'): raise AttributeError('Marker name must NOT start with underscore') if (self._config is not None): if (name not in self._markers): for line in self._config.getini('markers'): marker = line.split(':')[0].split('(')[0].strip() self._markers.add(marker) if (name not in self._markers): if (self._config.option.strict_markers or self._config.option.strict): fail(f'{name!r} not found in `markers` configuration option', pytrace=False) if (name in ['parameterize', 'parametrise', 'parameterise']): __tracebackhide__ = True fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") warnings.warn(('Unknown pytest.mark.%s - is this a typo? You can register custom marks to avoid this warning - for details, see % name), PytestUnknownMarkWarning, 2) return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True)
class TaggedInlineSingleAdminTest(AdminTestManager, TagTestManager, TestCase): admin_cls = test_admin.SimpleMixedTestSingletagAdmin tagged_model = test_models.SimpleMixedTest model = test_models.SimpleMixedTest.singletag.tag_model def setUpExtra(self): self.site = admin.AdminSite(name='tagulous_admin') tag_admin.register(self.model, admin_class=self.admin_cls, site=self.site) self.ma = self.site._registry[self.model] self.cl = None User.objects.create_superuser('test', '', 'user') result = self.client.login(username='test', password='user') self.assertEqual(result, True) def tearDownExtra(self): test_urls.urlpatterns = self.old_urls self.client.logout() def get_url(self, name, *args, **kwargs): content_type = ContentType.objects.get_for_model(self.model) return reverse(('admin:%s_%s_%s' % (content_type.app_label, content_type.model, name)), args=args, kwargs=kwargs) def test_add_renders(self): response = self.client.get(self.get_url('add')) self.assertContains(response, '<h2>Simple mixed tests</h2>') self.assertContains(response, 'id="id_simplemixedtest_set-TOTAL_FORMS') self.assertContains(response, 'id="id_simplemixedtest_set-0-singletag"') self.assertContains(response, 'id="id_simplemixedtest_set-0-name"') self.assertContains(response, 'id="id_simplemixedtest_set-0-tags"') self.assertContains(response, 'id="id_simplemixedtest_set-2-singletag"') self.assertNotContains(response, 'id="id_simplemixedtest_set-3-singletag"') def test_add_submits(self): data = {'simplemixedtest_set-TOTAL_FORMS': 3, 'simplemixedtest_set-INITIAL_FORMS': 0, 'simplemixedtest_set-MAX_NUM_FORMS': 1000, '_save': 'Save', 'name': 'Mr', 'slug': 'mr', 'simplemixedtest_set-0-name': 'Test 1', 'simplemixedtest_set-0-tags': 'tag1, tag2'} response = self.client.post(self.get_url('add'), data) self.assertEqual(response.status_code, 302) self.assertEqual(self.tagged_model.objects.count(), 1) t1 = self.tagged_model.objects.get(name='Test 1') self.assertInstanceEqual(t1, name='Test 1', singletag='Mr', tags=['tag1', 'tag2']) self.assertTagModel(self.model, {'Mr': 1}) self.assertTagModel(self.tagged_model.tags.tag_model, {'tag1': 1, 'tag2': 1}) def test_edit_renders(self): obj1 = self.tagged_model.objects.create(name='Test 1', singletag='Mr', tags=['tag1', 'tag2']) response = self.client.get(self.get_url('change', obj1.singletag.pk)) self.assertContains(response, '<h2>Simple mixed tests</h2>') self.assertContains(response, 'id="id_simplemixedtest_set-TOTAL_FORMS') self.assertContains(response, 'id="id_simplemixedtest_set-0-singletag"') self.assertContains(response, 'id="id_simplemixedtest_set-0-name"') self.assertContains(response, 'id="id_simplemixedtest_set-0-tags"') self.assertContains(response, 'id="id_simplemixedtest_set-3-singletag"') self.assertNotContains(response, 'id="id_simplemixedtest_set-4-singletag"') html = response.content.decode('utf-8') i_name = html.index('name="simplemixedtest_set-0-singletag"') i_open = html.rindex('<', 0, i_name) i_close = html.index('>', i_name) self.assertHTMLEqual(html[i_open:(i_close + 1)], ('<input type="hidden" name="simplemixedtest_set-0-singletag" value="%d" id="id_simplemixedtest_set-0-singletag" />' % obj1.singletag.pk)) def test_edit_saves(self): obj1 = self.tagged_model.objects.create(name='Test 1', singletag='Mr', tags=['tag1', 'tag2']) obj2 = self.tagged_model.objects.create(name='Test 2', singletag='Mr', tags=['tag3', 'tag4']) tag1 = obj1.singletag self.assertEqual(self.tagged_model.objects.count(), 2) self.assertTagModel(self.model, {'Mr': 2}) self.assertTagModel(self.tagged_model.tags.tag_model, {'tag1': 1, 'tag2': 1, 'tag3': 1, 'tag4': 1}) data = {'simplemixedtest_set-TOTAL_FORMS': 5, 'simplemixedtest_set-INITIAL_FORMS': 2, 'simplemixedtest_set-MAX_NUM_FORMS': 1000, '_save': 'Save', 'name': 'Mr', 'slug': 'mr', 'simplemixedtest_set-0-name': 'Test 1e', 'simplemixedtest_set-0-tags': 'tag1, tag2e', 'simplemixedtest_set-0-id': ('%d' % obj1.pk), 'simplemixedtest_set-0-singletag': ('%d' % tag1.pk), 'simplemixedtest_set-0-DELETE': '', 'simplemixedtest_set-1-name': 'Test 2e', 'simplemixedtest_set-1-tags': 'tag3, tag4e', 'simplemixedtest_set-1-id': ('%d' % obj2.pk), 'simplemixedtest_set-1-singletag': ('%d' % tag1.pk), 'simplemixedtest_set-1-DELETE': ''} response = self.client.post(self.get_url('change', tag1.pk), data) self.assertEqual(response.status_code, 302) self.assertEqual(self.tagged_model.objects.count(), 2) self.assertInstanceEqual(obj1, name='Test 1e', singletag='Mr', tags=['tag1', 'tag2e']) self.assertInstanceEqual(obj2, name='Test 2e', singletag='Mr', tags=['tag3', 'tag4e']) self.assertTagModel(self.model, {'Mr': 2}) self.assertTagModel(self.tagged_model.tags.tag_model, {'tag1': 1, 'tag2e': 1, 'tag3': 1, 'tag4e': 1})
_params(node='x') def test_outside_if(condition: str, satisfy_val: (int | None), fail_val: (int | None)) -> None: nodes_ = builder.extract_node(f''' def f1(x = {fail_val}): if {condition}: pass return ( x # ) def f2(x = {satisfy_val}): if {condition}: pass return ( x # ) ''') for (node, val) in zip(nodes_, (fail_val, satisfy_val)): inferred = node.inferred() assert (len(inferred) == 2) assert isinstance(inferred[0], nodes.Const) assert (inferred[0].value == val) assert (inferred[1] is Uninferable)
def unit_impulse(shape, idx=None, dtype=float): shape = np.atleast_1d(shape) if (idx is None): idx = ((0,) * len(shape)) elif (idx == 'mid'): idx = tuple((shape // 2)) elif (not hasattr(idx, '__iter__')): idx = ((idx,) * len(shape)) return _unit_impulse_kernel(idx[0], size=shape[0])
def ql_syscall_socketpair(ql: Qiling, domain: int, socktype: int, protocol: int, sv: int): unpopulated_fd = (i for i in range(NR_OPEN) if (ql.os.fd[i] is None)) idx1 = next(unpopulated_fd, (- 1)) idx2 = next(unpopulated_fd, (- 1)) regreturn = (- 1) if ((idx1 != (- 1)) and (idx2 != (- 1))): vsock_type = socktype hsock_type = __host_socket_type(vsock_type, ql.arch.type) ql.log.debug(f'Converted emulated socket type {vsock_type} to host socket type {hsock_type}') try: (sock1, sock2) = ql_socket.socketpair(domain, hsock_type, protocol) except OSError as e: ql.log.debug(f'{e}: domain={domain!r}, socktype={socktype!r}, protocol={protocol!r}, sv={sv!r}') regreturn = (- 1) else: ql.os.fd[idx1] = sock1 ql.os.fd[idx2] = sock2 ql.mem.write_ptr((sv + 0), idx1) ql.mem.write_ptr((sv + 4), idx2) regreturn = 0 s_domain = socket_domain_mapping(domain, ql.arch.type, ql.os.type) s_type = socket_type_mapping(socktype, ql.arch.type) ql.log.debug(('socketpair(%s, %s, %d, %d) = %d' % (s_domain, s_type, protocol, sv, regreturn))) return regreturn
class MinValueConstraint(ValidationConstraint): name = 'minvalue' def __init__(self, min_value, error_message=None): error_message = (error_message or _('$label should be $min_value or greater')) super().__init__(error_message=error_message) self.min_value = min_value def validate_parsed_value(self, parsed_value): if (not (parsed_value >= self.min_value)): raise self
class AttrVI_ATTR_TCPIP_HOSTNAME(Attribute): resources = [(constants.InterfaceType.tcpip, 'INSTR'), (constants.InterfaceType.tcpip, 'SOCKET')] py_name = '' visa_name = 'VI_ATTR_TCPIP_HOSTNAME' visa_type = 'ViString' default = NotAvailable (read, write, local) = (True, False, False)
class VaultClientFactory(): def __init__(self, base_url: str, role: str, auth_type: Authenticator, mount_point: str): self.base_url = base_url self.role = role self.auth_type = auth_type self.mount_point = mount_point self.session = requests.Session() self.session.headers['User-Agent'] = f'baseplate.py-{self.__class__.__name__}/{baseplate_version}' self.client: Optional['VaultClient'] = None def _make_client(self) -> 'VaultClient': (client_token, lease_duration) = self.auth_type(self) return VaultClient(self.session, self.base_url, client_token, lease_duration) def _vault_kubernetes_auth(self) -> Tuple[(str, datetime.datetime)]: try: with open(K8S_SERVICE_ACCOUNT_TOKEN_FILE, encoding='UTF-8') as f: token = f.read() except OSError: logger.error("Could not read Kubernetes token file '%s'", K8S_SERVICE_ACCOUNT_TOKEN_FILE) raise login_data = {'jwt': token, 'role': self.role} logger.debug('Obtaining Vault token via kubernetes auth.') response = self.session.post(urllib.parse.urljoin(self.base_url, f'v1/auth/{self.mount_point}/login'), json=login_data, timeout=5) response.raise_for_status() auth = response.json()['auth'] return (auth['client_token'], ttl_to_time(auth['lease_duration'])) def _vault_aws_auth(self) -> Tuple[(str, datetime.datetime)]: identity_document = fetch_instance_identity() nonce = load_nonce() if (not nonce): nonce = generate_nonce() store_nonce(nonce) login_data = {'role': self.role, 'pkcs7': identity_document, 'nonce': nonce} logger.debug('Obtaining Vault token via aws auth.') response = self.session.post(urllib.parse.urljoin(self.base_url, f'v1/auth/{self.mount_point}/login'), json=login_data, timeout=5) if (response.status_code == 400): logger.error(REAUTHENTICATION_ERROR_MESSAGE) response.raise_for_status() auth = response.json()['auth'] return (auth['client_token'], ttl_to_time(auth['lease_duration'])) def auth_types() -> Dict[(str, Authenticator)]: return {'aws': VaultClientFactory._vault_aws_auth, 'kubernetes': VaultClientFactory._vault_kubernetes_auth} def get_client(self) -> 'VaultClient': if ((not self.client) or self.client.is_about_to_expire): self.client = self._make_client() return self.client
class SimpleMAStrategy(AbstractStrategy): def __init__(self, ts: BacktestTradingSession, ticker: Ticker): super().__init__(ts) self.broker = ts.broker self.order_factory = ts.order_factory self.data_handler = ts.data_handler self.ticker = ticker def calculate_and_place_orders(self): long_ma_len = 20 short_ma_len = 5 long_ma_series = self.data_handler.historical_price(self.ticker, PriceField.Close, long_ma_len) long_ma_price = long_ma_series.mean() short_ma_series = long_ma_series.tail(short_ma_len) short_ma_price = short_ma_series.mean() if (short_ma_price >= long_ma_price): orders = self.order_factory.target_percent_orders({self.ticker: 1.0}, MarketOrder(), TimeInForce.DAY) else: orders = self.order_factory.target_percent_orders({self.ticker: 0.0}, MarketOrder(), TimeInForce.DAY) self.broker.cancel_all_open_orders() self.broker.place_orders(orders)
def test_run_shortcut_skip_parse(mock_pipe, monkeypatch): shortcuts = {'arb pipe': {'pipeline_name': 'sc pipe', 'skip_parse': True}} monkeypatch.setattr('pypyr.config.config.shortcuts', shortcuts) out = run(pipeline_name='arb pipe') assert (type(out) is Context) assert (out == {}) assert (not out.is_in_pipeline_scope) mock_pipe.assert_called_once_with(name='sc pipe', context_args=None, parse_input=False, groups=None, success_group=None, failure_group=None, loader=None, py_dir=None) mock_pipe.return_value.run.assert_called_once_with(out) assert (shortcuts == {'arb pipe': {'pipeline_name': 'sc pipe', 'skip_parse': True}})
class TestParallel(TestNested): def setUp(self): super(TestParallel, self).setUp() self.states = ['A', 'B', {'name': 'C', 'parallel': [{'name': '1', 'children': ['a', 'b'], 'initial': 'a', 'transitions': [['go', 'a', 'b']]}, {'name': '2', 'children': ['a', 'b'], 'initial': 'a', 'transitions': [['go', 'a', 'b']]}]}] self.transitions = [['reset', 'C', 'A']] def test_init(self): m = self.stuff.machine_cls(states=self.states) m.to_C() self.assertEqual(['C{0}1{0}a'.format(State.separator), 'C{0}2{0}a'.format(State.separator)], m.state) def test_enter(self): m = self.stuff.machine_cls(states=self.states, transitions=self.transitions, initial='A') m.to_C() m.go() self.assertEqual(['C{0}1{0}b'.format(State.separator), 'C{0}2{0}b'.format(State.separator)], m.state) def test_exit(self): class Model(): def __init__(self): self.mock = MagicMock() def on_exit_C(self): self.mock() def on_exit_C_1(self): self.mock() def on_exit_C_2(self): self.mock() model1 = Model() m = self.stuff.machine_cls(model1, states=self.states, transitions=self.transitions, initial='A') m.add_transition('reinit', 'C', 'C') model1.to_C() self.assertEqual(['C{0}1{0}a'.format(State.separator), 'C{0}2{0}a'.format(State.separator)], model1.state) model1.reset() self.assertTrue(model1.is_A()) self.assertEqual(3, model1.mock.call_count) model2 = Model() m.add_model(model2, initial='C') model2.reinit() self.assertEqual(['C{0}1{0}a'.format(State.separator), 'C{0}2{0}a'.format(State.separator)], model2.state) self.assertEqual(3, model2.mock.call_count) model2.reset() self.assertTrue(model2.is_A()) self.assertEqual(6, model2.mock.call_count) for mod in m.models: mod.trigger('to_C') for mod in m.models: mod.trigger('reset') self.assertEqual(6, model1.mock.call_count) self.assertEqual(9, model2.mock.call_count) def test_parent_transition(self): m = self.stuff.machine_cls(states=self.states) m.add_transition('switch', 'C{0}2{0}a'.format(State.separator), 'C{0}2{0}b'.format(State.separator)) m.to_C() m.switch() self.assertEqual(['C{0}1{0}a'.format(State.separator), 'C{0}2{0}b'.format(State.separator)], m.state) def test_shallow_parallel(self): sep = self.state_cls.separator states = [{'name': 'P', 'parallel': ['1', {'name': '2', 'children': ['a', 'b'], 'initial': 'b'}]}, 'X'] m = self.machine_cls(states=states, initial='P') self.assertEqual(['P{0}1'.format(sep), 'P{0}2{0}b'.format(sep)], m.state) m.to_X() self.assertEqual('X', m.state) m.to_P() self.assertEqual(['P{0}1'.format(sep), 'P{0}2{0}b'.format(sep)], m.state) with self.assertRaises(MachineError): m.to('X') def test_multiple(self): states = ['A', {'name': 'B', 'parallel': [{'name': '1', 'parallel': [{'name': 'a', 'children': ['x', 'y', 'z'], 'initial': 'z'}, {'name': 'b', 'children': ['x', 'y', 'z'], 'initial': 'y'}]}, {'name': '2', 'children': ['a', 'b', 'c'], 'initial': 'a'}]}] m = self.stuff.machine_cls(states=states, initial='A') self.assertTrue(m.is_A()) m.to_B() self.assertEqual([['B{0}1{0}a{0}z'.format(State.separator), 'B{0}1{0}b{0}y'.format(State.separator)], 'B{0}2{0}a'.format(State.separator)], m.state) m2 = self.machine_cls(states=states, initial=m.state) self.assertEqual([['B{0}1{0}a{0}z'.format(State.separator), 'B{0}1{0}b{0}y'.format(State.separator)], 'B{0}2{0}a'.format(State.separator)], m2.state) m.to_A() self.assertEqual('A', m.state) m2.to_A() self.assertEqual(m.state, m2.state) def test_deep_initial(self): exit_mock = MagicMock() m = self.machine_cls(initial=['B{0}1'.format(State.separator), 'B{0}2{0}a'.format(State.separator)]) m.on_exit('B', exit_mock) m.on_exit('B{0}1'.format(State.separator), exit_mock) m.on_exit('B{0}2'.format(State.separator), exit_mock) m.on_exit('B{0}2{0}a'.format(State.separator), exit_mock) m.to_B() self.assertEqual('B', m.state) self.assertEqual(4, exit_mock.call_count) def test_parallel_initial(self): m = self.machine_cls(states=['A', 'B', {'name': 'C', 'parallel': ['1', '2']}], initial='C') m = self.machine_cls(states=['A', 'B', {'name': 'C', 'parallel': ['1', '2']}], initial=['C_1', 'C_2']) def test_parallel_reflexive(self): exit_c_1_mock = MagicMock() m = self.machine_cls(states=['A', 'B', {'name': 'C', 'parallel': [{'name': '1', 'on_exit': exit_c_1_mock}, '2']}], transitions=[['test', 'C{0}2'.format(State.separator), 'C{0}2'.format(State.separator)]], initial='C') m.test() self.assertEqual(['C{0}1'.format(State.separator), 'C{0}2'.format(State.separator)], m.state) self.assertFalse(exit_c_1_mock.called) def test_multiple_deeper(self): sep = self.state_cls.separator states = ['A', {'name': 'P', 'parallel': ['1', {'name': '2', 'parallel': [{'name': 'a'}, {'name': 'b', 'parallel': [{'name': 'x', 'parallel': ['1', '2']}, 'y']}]}]}] ref_state = ['P{0}1'.format(sep), ['P{0}2{0}a'.format(sep), [['P{0}2{0}b{0}x{0}1'.format(sep), 'P{0}2{0}b{0}x{0}2'.format(sep)], 'P{0}2{0}b{0}y'.format(sep)]]] m = self.stuff.machine_cls(states=states, initial='A') self.assertTrue(m.is_A()) m.to_P() self.assertEqual(ref_state, m.state) m.to_A() def test_model_state_conversion(self): sep = self.state_cls.separator states = ['P{0}1'.format(sep), ['P{0}2{0}a'.format(sep), [['P{0}2{0}b{0}x{0}1'.format(sep), 'P{0}2{0}b{0}x{0}2'.format(sep)], 'P{0}2{0}b{0}y'.format(sep)]]] tree = OrderedDict([('P', OrderedDict([('1', OrderedDict()), ('2', OrderedDict([('a', OrderedDict()), ('b', OrderedDict([('x', OrderedDict([('1', OrderedDict()), ('2', OrderedDict())])), ('y', OrderedDict())]))]))]))]) m = self.machine_cls() self.assertEqual(tree, m.build_state_tree(states, sep)) self.assertEqual(states, _build_state_list(tree, sep))
def token_network_registry_state(chain_state, token_network_registry_address): token_network_registry = TokenNetworkRegistryState(token_network_registry_address, []) chain_state.identifiers_to_tokennetworkregistries[token_network_registry_address] = token_network_registry return token_network_registry
class RNN(nn.Module): def __init__(self, input_size, hidden_size, output_size): super(RNN, self).__init__() self.hidden_size = hidden_size self.i2h = nn.Linear((input_size + hidden_size), hidden_size) self.i2o = nn.Linear((input_size + hidden_size), output_size) self.softmax = nn.LogSoftmax(dim=1) def forward(self, input, hidden): combined = torch.cat((input, hidden), 1) hidden = self.i2h(combined) output = self.i2o(combined) output = self.softmax(output) return (output, hidden) def initHidden(self): return torch.zeros(1, self.hidden_size)
def test_check_unique(): with pytest.raises(NameNonUniqueError): repair_names([np.nan], repair='check_unique') with pytest.raises(NameNonUniqueError): repair_names([''], repair='check_unique') with pytest.raises(NameNonUniqueError): repair_names(['a', 'a'], repair='check_unique') with pytest.raises(NameNonUniqueError): repair_names(['__1'], repair='check_unique') with pytest.raises(NameNonUniqueError): repair_names(['__'], repair='check_unique') assert (repair_names(['a', 'b'], repair='check_unique') == ['a', 'b'])
def resnext101_32x8d(deconv, delinear, channel_deconv, pretrained=False, progress=True, **kwargs): kwargs['groups'] = 32 kwargs['width_per_group'] = 8 return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, deconv=deconv, delinear=delinear, channel_deconv=channel_deconv, **kwargs)
def test_plugin_config_repo_override(hatch, devpi, temp_dir_cache, helpers, published_project_name, config_file): config_file.model.publish['index']['user'] = 'foo' config_file.model.publish['index']['auth'] = 'bar' config_file.model.publish['index']['ca-cert'] = 'cert' config_file.model.publish['index']['repo'] = 'dev' config_file.model.publish['index']['repos'] = {'dev': {'url': devpi.repo, 'user': devpi.user, 'auth': devpi.auth, 'ca-cert': devpi.ca_cert}} config_file.save() with temp_dir_cache.as_cwd(): result = hatch('new', published_project_name) assert (result.exit_code == 0), result.output path = (temp_dir_cache / published_project_name) with path.as_cwd(): del os.environ[PublishEnvVars.REPO] current_version = timestamp_to_version(helpers.get_current_timestamp()) result = hatch('version', current_version) assert (result.exit_code == 0), result.output result = hatch('build') assert (result.exit_code == 0), result.output build_directory = (path / 'dist') artifacts = list(build_directory.iterdir()) result = hatch('publish') assert (result.exit_code == 0), result.output assert (result.output == helpers.dedent(f''' {artifacts[0].relative_to(path)} ... success {artifacts[1].relative_to(path)} ... success [{published_project_name}] {devpi.repo}{published_project_name}/{current_version}/ '''))
def require_version(minver: str='0.0.0', maxver: str='4.0.0') -> Callable: def parse(python_version: str) -> tuple[(int, ...)]: try: return tuple((int(v) for v in python_version.split('.'))) except ValueError as e: msg = f'{python_version} is not a correct version : should be X.Y[.Z].' raise ValueError(msg) from e min_version = parse(minver) max_version = parse(maxver) def check_require_version(f): current: tuple[(int, int, int)] = sys.version_info[:3] if (min_version < current <= max_version): return f version: str = '.'.join((str(v) for v in sys.version_info)) (f) def new_f(*args, **kwargs): if (current <= min_version): pytest.skip(f'Needs Python > {minver}. Current version is {version}.') elif (current > max_version): pytest.skip(f'Needs Python <= {maxver}. Current version is {version}.') return new_f return check_require_version
class OCIModel(RegistryDataInterface): def __init__(self): self._legacy_image_id_handler = SyntheticIDHandler() def set_id_hash_salt(self, id_hash_salt): self._legacy_image_id_handler = SyntheticIDHandler(id_hash_salt) def _resolve_legacy_image_id_to_manifest_row(self, legacy_image_id): decoded = self._legacy_image_id_handler.decode(legacy_image_id) if (len(decoded) == 0): return (None, None) (manifest_id, layer_index) = decoded if (manifest_id is None): return (None, None) try: return (database.Manifest.get(id=manifest_id), layer_index) except database.Manifest.DoesNotExist: return (None, None) def _resolve_legacy_image_id(self, legacy_image_id): (manifest, layer_index) = self._resolve_legacy_image_id_to_manifest_row(legacy_image_id) if (manifest is None): return (None, None) return (Manifest.for_manifest(manifest, self._legacy_image_id_handler), layer_index) def get_tag_legacy_image_id(self, repository_ref, tag_name, storage): tag = self.get_repo_tag(repository_ref, tag_name) if (tag is None): return None retriever = RepositoryContentRetriever(repository_ref.id, storage) legacy_image = tag.manifest.lookup_legacy_image(0, retriever) if (legacy_image is None): return None return legacy_image.docker_image_id def get_legacy_tags_map(self, repository_ref, storage): tags = oci.tag.list_alive_tags(repository_ref._db_id) tags_map = {} for tag in tags: root_id = Manifest.for_manifest(tag.manifest, self._legacy_image_id_handler).legacy_image_root_id if (root_id is not None): tags_map[tag.name] = root_id return tags_map def find_matching_tag(self, repository_ref, tag_names): found_tag = oci.tag.find_matching_tag(repository_ref._db_id, tag_names) assert ((found_tag is None) or (not found_tag.hidden)) return Tag.for_tag(found_tag, self._legacy_image_id_handler) def get_most_recent_tag(self, repository_ref): found_tag = oci.tag.get_most_recent_tag(repository_ref._db_id) assert ((found_tag is None) or (not found_tag.hidden)) return Tag.for_tag(found_tag, self._legacy_image_id_handler) def get_manifest_for_tag(self, tag): assert (tag is not None) return tag.manifest def lookup_manifest_by_digest(self, repository_ref, manifest_digest, allow_dead=False, allow_hidden=False, require_available=False, raise_on_error=False): manifest = oci.manifest.lookup_manifest(repository_ref._db_id, manifest_digest, allow_dead=allow_dead, allow_hidden=allow_hidden, require_available=require_available) if (manifest is None): if raise_on_error: raise model.ManifestDoesNotExist() return None return Manifest.for_manifest(manifest, self._legacy_image_id_handler) def create_manifest_label(self, manifest, key, value, source_type_name, media_type_name=None): label_data = dict(key=key, value=value, source_type_name=source_type_name, media_type_name=media_type_name) label = oci.label.create_manifest_label(manifest._db_id, key, value, source_type_name, media_type_name) if (label is None): return None apply_label_to_manifest(label_data, manifest, self) return Label.for_label(label) def batch_create_manifest_labels(self, manifest): labels_to_add = [] def add_label(key, value, source_type_name, media_type_name=None): labels_to_add.append(dict(key=key, value=value, source_type_name=source_type_name, media_type_name=media_type_name)) (yield add_label) for label_data in labels_to_add: with db_transaction(): oci.label.create_manifest_label(manifest._db_id, **label_data) apply_label_to_manifest(label_data, manifest, self) def list_manifest_labels(self, manifest, key_prefix=None): labels = oci.label.list_manifest_labels(manifest._db_id, prefix_filter=key_prefix) return [Label.for_label(l) for l in labels] def get_manifest_label(self, manifest, label_uuid): return Label.for_label(oci.label.get_manifest_label(label_uuid, manifest._db_id)) def delete_manifest_label(self, manifest, label_uuid): return Label.for_label(oci.label.delete_manifest_label(label_uuid, manifest._db_id)) def lookup_active_repository_tags(self, repository_ref, start_pagination_id, limit): tags = oci.tag.lookup_alive_tags_shallow(repository_ref._db_id, start_pagination_id, limit) return [ShallowTag.for_tag(tag) for tag in tags] def list_all_active_repository_tags(self, repository_ref): tags = list(oci.tag.list_alive_tags(repository_ref._db_id)) return [Tag.for_tag(tag, self._legacy_image_id_handler) for tag in tags] def list_repository_tag_history(self, repository_ref, page=1, size=100, specific_tag_name=None, active_tags_only=False, since_time_ms=None, filter_tag_name=None): (tags, has_more) = oci.tag.list_repository_tag_history(repository_ref._db_id, page, size, specific_tag_name, active_tags_only, since_time_ms, filter_tag_name) return ([Tag.for_tag(tag, self._legacy_image_id_handler) for tag in tags], has_more) def has_expired_tag(self, repository_ref, tag_name): return bool(oci.tag.get_expired_tag(repository_ref._db_id, tag_name)) def get_most_recent_tag_lifetime_start(self, repository_refs): if (not repository_refs): return {} toSeconds = (lambda ms: ((ms // 1000) if (ms is not None) else None)) last_modified = oci.tag.get_most_recent_tag_lifetime_start([r.id for r in repository_refs]) return {repo_id: toSeconds(ms) for (repo_id, ms) in list(last_modified.items())} def get_repo_tag(self, repository_ref, tag_name, raise_on_error=False): assert isinstance(tag_name, str) tag = oci.tag.get_tag(repository_ref._db_id, tag_name) if (tag is None): if raise_on_error: raise model.TagDoesNotExist() return None return Tag.for_tag(tag, self._legacy_image_id_handler) def create_manifest_and_retarget_tag(self, repository_ref, manifest_interface_instance, tag_name, storage, raise_on_error=False, verify_quota=False): with db_disallow_replica_use(): created_manifest = oci.manifest.get_or_create_manifest(repository_ref._db_id, manifest_interface_instance, storage, for_tagging=True, raise_on_error=raise_on_error) if (created_manifest is None): return (None, None) wrapped_manifest = Manifest.for_manifest(created_manifest.manifest, self._legacy_image_id_handler) if (not created_manifest.newly_created): label_dict = next((label.asdict() for label in self.list_manifest_labels(wrapped_manifest, key_prefix='quay') if (label.key == LABEL_EXPIRY_KEY)), None) else: label_dict = next((dict(key=label_key, value=label_value) for (label_key, label_value) in created_manifest.labels_to_apply.items() if (label_key == LABEL_EXPIRY_KEY)), None) expiration_seconds = None if (label_dict is not None): try: expiration_td = convert_to_timedelta(label_dict['value']) expiration_seconds = expiration_td.total_seconds() except ValueError: pass if verify_quota: quota = namespacequota.verify_namespace_quota(repository_ref) if (quota['severity_level'] == 'Warning'): namespacequota.notify_organization_admins(repository_ref, 'quota_warning') elif (quota['severity_level'] == 'Reject'): namespacequota.notify_organization_admins(repository_ref, 'quota_error') if created_manifest.newly_created: oci.tag.create_temporary_tag_outside_timemachine(created_manifest.manifest) raise QuotaExceededException() tag = oci.tag.retarget_tag(tag_name, created_manifest.manifest, raise_on_error=raise_on_error, expiration_seconds=expiration_seconds) if (tag is None): return (None, None) return (wrapped_manifest, Tag.for_tag(tag, self._legacy_image_id_handler, manifest_row=created_manifest.manifest)) def retarget_tag(self, repository_ref, tag_name, manifest_or_legacy_image, storage, legacy_manifest_key, is_reversion=False): with db_disallow_replica_use(): assert (legacy_manifest_key is not None) manifest = manifest_or_legacy_image.as_manifest() manifest_id = manifest._db_id if (manifest.media_type in DOCKER_SCHEMA1_CONTENT_TYPES): try: parsed = manifest.get_parsed_manifest() except ManifestException: logger.exception('Could not parse manifest `%s` in retarget_tag', manifest._db_id) return None if (parsed.tag != tag_name): logger.debug('Rewriting manifest `%s` for tag named `%s`', manifest._db_id, tag_name) repository_id = repository_ref._db_id updated = parsed.with_tag_name(tag_name, legacy_manifest_key) assert updated.is_signed created = oci.manifest.get_or_create_manifest(repository_id, updated, storage) if (created is None): return None manifest_id = created.manifest.id label_dict = next((label.asdict() for label in self.list_manifest_labels(manifest, key_prefix='quay') if (label.key == LABEL_EXPIRY_KEY)), None) expiration_seconds = None if (label_dict is not None): try: expiration_td = convert_to_timedelta(label_dict['value']) expiration_seconds = expiration_td.total_seconds() except ValueError: pass tag = oci.tag.retarget_tag(tag_name, manifest_id, is_reversion=is_reversion, expiration_seconds=expiration_seconds) return Tag.for_tag(tag, self._legacy_image_id_handler) def delete_tag(self, repository_ref, tag_name): with db_disallow_replica_use(): deleted_tag = oci.tag.delete_tag(repository_ref._db_id, tag_name) if (deleted_tag is None): return None return Tag.for_tag(deleted_tag, self._legacy_image_id_handler) def delete_tags_for_manifest(self, manifest): with db_disallow_replica_use(): deleted_tags = oci.tag.delete_tags_for_manifest(manifest._db_id) return [ShallowTag.for_tag(tag) for tag in deleted_tags] def change_repository_tag_expiration(self, tag, expiration_date): with db_disallow_replica_use(): return oci.tag.change_tag_expiration(tag._db_id, expiration_date) def reset_security_status(self, manifest_or_legacy_image): with db_disallow_replica_use(): manifest_id = manifest_or_legacy_image.as_manifest()._db_id manifestsecuritystatus = oci.shared.get_manifestsecuritystatus_for_manifest(manifest_id) if (manifestsecuritystatus is not None): manifestsecuritystatus.delete_instance() def list_manifest_layers(self, manifest, storage, include_placements=False): try: manifest_obj = database.Manifest.get(id=manifest._db_id) except database.Manifest.DoesNotExist: logger.exception('Could not find manifest for manifest `%s`', manifest._db_id) return None try: parsed = manifest.get_parsed_manifest() except ManifestException: logger.exception('Could not parse and validate manifest `%s`', manifest._db_id) return None try: layers = self._list_manifest_layers(manifest_obj.repository_id, parsed, storage, include_placements) except Exception: logger.exception('Could not list manifest layers `%s`', manifest._db_id) return None return layers def set_tags_expiration_for_manifest(self, manifest, expiration_sec): with db_disallow_replica_use(): oci.tag.set_tag_expiration_sec_for_manifest(manifest._db_id, expiration_sec) def get_schema1_parsed_manifest(self, manifest, namespace_name, repo_name, tag_name, storage, raise_on_error=False): try: parsed = manifest.get_parsed_manifest() except ManifestException: if raise_on_error: raise ManifestException return None try: manifest_row = database.Manifest.get(id=manifest._db_id) except database.Manifest.DoesNotExist: if raise_on_error: raise ManifestDoesNotExist return None retriever = RepositoryContentRetriever(manifest_row.repository_id, storage) schema1 = parsed.get_schema1_manifest(namespace_name, repo_name, tag_name, retriever) if ((schema1 is None) and raise_on_error): raise ManifestException return schema1 def convert_manifest(self, manifest, namespace_name, repo_name, tag_name, allowed_mediatypes, storage): try: parsed = manifest.get_parsed_manifest() except ManifestException as e: return None try: manifest_row = database.Manifest.get(id=manifest._db_id) except database.Manifest.DoesNotExist: return None retriever = RepositoryContentRetriever(manifest_row.repository_id, storage) return parsed.convert_manifest(allowed_mediatypes, namespace_name, repo_name, tag_name, retriever) def create_manifest_with_temp_tag(self, repository_ref, manifest_interface_instance, expiration_sec, storage): with db_disallow_replica_use(): created_manifest = oci.manifest.get_or_create_manifest(repository_ref._db_id, manifest_interface_instance, storage, temp_tag_expiration_sec=expiration_sec) if (created_manifest is None): return None return Manifest.for_manifest(created_manifest.manifest, self._legacy_image_id_handler) def get_repo_blob_by_digest(self, repository_ref, blob_digest, include_placements=False): image_storage = self._get_shared_storage(blob_digest) if (image_storage is None): image_storage = oci.blob.get_repository_blob_by_digest(repository_ref._db_id, blob_digest) if (image_storage is None): return None assert (image_storage.cas_path is not None) placements = None if include_placements: placements = list(model.storage.get_storage_locations(image_storage.uuid)) return Blob.for_image_storage(image_storage, storage_path=model.storage.get_layer_path(image_storage), placements=placements) def list_parsed_manifest_layers(self, repository_ref, parsed_manifest, storage, include_placements=False): return self._list_manifest_layers(repository_ref._db_id, parsed_manifest, storage, include_placements=include_placements) def get_manifest_local_blobs(self, manifest, storage, include_placements=False): try: manifest_row = database.Manifest.get(id=manifest._db_id) except database.Manifest.DoesNotExist: return None return self._get_manifest_local_blobs(manifest, manifest_row.repository_id, include_placements, storage) def find_repository_with_garbage(self, limit_to_gc_policy_s): repo = model.oci.tag.find_repository_with_garbage(limit_to_gc_policy_s) if (repo is None): return None return RepositoryReference.for_repo_obj(repo) def lookup_repository(self, namespace_name, repo_name, kind_filter=None, raise_on_error=False, manifest_ref=None): repo = model.repository.get_repository(namespace_name, repo_name, kind_filter=kind_filter) if (repo is None): if raise_on_error: raise model.RepositoryDoesNotExist() return None state = repo.state return RepositoryReference.for_repo_obj(repo, namespace_name, repo_name, (repo.namespace_user.stripe_id is None), state=state) def is_existing_disabled_namespace(self, namespace_name): namespace = model.user.get_namespace_user(namespace_name) return ((namespace is not None) and (not namespace.enabled)) def is_namespace_enabled(self, namespace_name): namespace = model.user.get_namespace_user(namespace_name) return ((namespace is not None) and namespace.enabled) def lookup_cached_active_repository_tags(self, model_cache, repository_ref, start_pagination_id, limit): def load_tags(): tags = self.lookup_active_repository_tags(repository_ref, start_pagination_id, limit) return [tag.asdict() for tag in tags] tags_cache_key = cache_key.for_active_repo_tags(repository_ref._db_id, start_pagination_id, limit, model_cache.cache_config) result = model_cache.retrieve(tags_cache_key, load_tags) try: return [ShallowTag.from_dict(tag_dict) for tag_dict in result] except FromDictionaryException: return self.lookup_active_repository_tags(repository_ref, start_pagination_id, limit) def get_cached_namespace_region_blacklist(self, model_cache, namespace_name): def load_blacklist(): restrictions = model.user.list_namespace_geo_restrictions(namespace_name) if (restrictions is None): return None return [restriction.restricted_region_iso_code for restriction in restrictions] blacklist_cache_key = cache_key.for_namespace_geo_restrictions(namespace_name, model_cache.cache_config) result = model_cache.retrieve(blacklist_cache_key, load_blacklist) if (result is None): return None return set(result) def get_cached_repo_blob(self, model_cache, namespace_name, repo_name, blob_digest): def load_blob(): repository_ref = self.lookup_repository(namespace_name, repo_name) if (repository_ref is None): return None blob_found = self.get_repo_blob_by_digest(repository_ref, blob_digest, include_placements=True) if (blob_found is None): return None return blob_found.asdict() blob_cache_key = cache_key.for_repository_blob(namespace_name, repo_name, blob_digest, 2, model_cache.cache_config) blob_dict = model_cache.retrieve(blob_cache_key, load_blob) try: return (Blob.from_dict(blob_dict) if (blob_dict is not None) else None) except FromDictionaryException: repository_ref = self.lookup_repository(namespace_name, repo_name) if (repository_ref is None): return None return self.get_repo_blob_by_digest(repository_ref, blob_digest, include_placements=True) def create_blob_upload(self, repository_ref, new_upload_id, location_name, storage_metadata): with db_disallow_replica_use(): repo = model.repository.lookup_repository(repository_ref._db_id) if (repo is None): return None try: upload_record = model.blob.initiate_upload_for_repo(repo, new_upload_id, location_name, storage_metadata) return BlobUpload.for_upload(upload_record, location_name=location_name) except database.Repository.DoesNotExist: return None def lookup_blob_upload(self, repository_ref, blob_upload_id): with db_disallow_replica_use(): upload_record = model.blob.get_blob_upload_by_uuid(blob_upload_id) if (upload_record is None): return None return BlobUpload.for_upload(upload_record) def update_blob_upload(self, blob_upload, uncompressed_byte_count, storage_metadata, byte_count, chunk_count, sha_state): with db_disallow_replica_use(): upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id) if (upload_record is None): return None upload_record.uncompressed_byte_count = uncompressed_byte_count upload_record.storage_metadata = storage_metadata upload_record.byte_count = byte_count upload_record.chunk_count = chunk_count upload_record.sha_state = sha_state upload_record.save() return BlobUpload.for_upload(upload_record) def delete_blob_upload(self, blob_upload): with db_disallow_replica_use(): upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id) if (upload_record is not None): upload_record.delete_instance() def commit_blob_upload(self, blob_upload, blob_digest_str, blob_expiration_seconds): with db_disallow_replica_use(): upload_record = model.blob.get_blob_upload_by_uuid(blob_upload.upload_id) if (upload_record is None): return None repository_id = upload_record.repository_id location_obj = model.storage.get_image_location_for_name(blob_upload.location_name) blob_record = model.blob.store_blob_record_and_temp_link_in_repo(repository_id, blob_digest_str, location_obj.id, blob_upload.byte_count, blob_expiration_seconds, blob_upload.uncompressed_byte_count) upload_record.delete_instance() return Blob.for_image_storage(blob_record, storage_path=model.storage.get_layer_path(blob_record)) def mount_blob_into_repository(self, blob, target_repository_ref, expiration_sec): with db_disallow_replica_use(): storage = model.blob.temp_link_blob(target_repository_ref._db_id, blob.digest, expiration_sec) return bool(storage) def get_legacy_image(self, repository_ref, docker_image_id, storage, include_blob=False): retriever = RepositoryContentRetriever(repository_ref._db_id, storage) (manifest, layer_index) = self._resolve_legacy_image_id(docker_image_id) if (manifest is None): return None legacy_image = manifest.lookup_legacy_image(layer_index, retriever) if ((legacy_image is None) or (not include_blob)): return legacy_image return legacy_image.with_blob(self.get_repo_blob_by_digest(repository_ref, legacy_image.blob_digest, include_placements=True)) def find_manifests_for_sec_notification(self, manifest_digest): found = model.oci.manifest.find_manifests_for_sec_notification(manifest_digest) for manifest in found: (yield Manifest.for_manifest(manifest, self._legacy_image_id_handler)) def lookup_secscan_notification_severities(self, repository): return model.repository.lookup_secscan_notification_severities(repository.id) def tag_names_for_manifest(self, manifest, limit): return model.oci.tag.tag_names_for_manifest(manifest._db_id, limit) def remove_tag_from_timemachine(self, repo_ref, tag_name, manifest_ref, include_submanifests=False, is_alive=False): return oci.tag.remove_tag_from_timemachine(repo_ref.id, tag_name, manifest_ref.id, include_submanifests, is_alive) def _get_manifest_local_blobs(self, manifest, repo_id, storage, include_placements=False): parsed = manifest.get_parsed_manifest() if (parsed is None): return None local_blob_digests = list(set(parsed.local_blob_digests)) if (not len(local_blob_digests)): return [] blob_query = self._lookup_repo_storages_by_content_checksum(repo_id, local_blob_digests, storage) blobs = [] for image_storage in blob_query: placements = None if include_placements: placements = list(model.storage.get_storage_locations(image_storage.uuid)) blob = Blob.for_image_storage(image_storage, storage_path=model.storage.get_layer_path(image_storage), placements=placements) blobs.append(blob) return blobs def _list_manifest_layers(self, repo_id, parsed, storage, include_placements=False): assert (not parsed.is_manifest_list) retriever = RepositoryContentRetriever(repo_id, storage) requires_empty_blob = parsed.get_requires_empty_layer_blob(retriever) storage_map = {} blob_digests = list(parsed.local_blob_digests) if requires_empty_blob: blob_digests.append(EMPTY_LAYER_BLOB_DIGEST) if blob_digests: blob_query = self._lookup_repo_storages_by_content_checksum(repo_id, blob_digests, storage) storage_map = {blob.content_checksum: blob for blob in blob_query} layers = parsed.get_layers(retriever) if (layers is None): logger.error('Could not load layers for manifest `%s`', parsed.digest) return None manifest_layers = [] for layer in layers: if layer.is_remote: manifest_layers.append(ManifestLayer(layer, None)) continue digest_str = str(layer.blob_digest) if (digest_str not in storage_map): logger.error('Missing digest `%s` for manifest `%s`', layer.blob_digest, parsed.digest) return None image_storage = storage_map[digest_str] assert (image_storage.cas_path is not None) assert (image_storage.image_size is not None) placements = None if include_placements: placements = list(model.storage.get_storage_locations(image_storage.uuid)) blob = Blob.for_image_storage(image_storage, storage_path=model.storage.get_layer_path(image_storage), placements=placements) manifest_layers.append(ManifestLayer(layer, blob)) return manifest_layers def _get_shared_storage(self, blob_digest, storage=None): if (blob_digest == EMPTY_LAYER_BLOB_DIGEST): found = model.blob.get_shared_blob(EMPTY_LAYER_BLOB_DIGEST) if ((found is None) and (storage is not None)): try: return model.blob.get_or_create_shared_blob(EMPTY_LAYER_BLOB_DIGEST, EMPTY_LAYER_BYTES, storage) except ReadOnlyModeException: return None return found return None def _lookup_repo_storages_by_content_checksum(self, repo, checksums, storage): checksums = set(checksums) extra_storages = [] for checksum in list(checksums): shared_storage = self._get_shared_storage(checksum, storage=storage) if (shared_storage is not None): extra_storages.append(shared_storage) checksums.remove(checksum) found = [] if checksums: found = list(model.storage.lookup_repo_storages_by_content_checksum(repo, checksums)) return (found + extra_storages)
def BuildHON(InputFileName, OutputNetworkFile): RawTrajectories = ReadSequentialData(InputFileName) (TrainingTrajectory, TestingTrajectory) = BuildTrainingAndTesting(RawTrajectories) VPrint(len(TrainingTrajectory)) Rules = BuildRulesFastParameterFree.ExtractRules(TrainingTrajectory, MaxOrder, MinSupport) Network = BuildNetwork.BuildNetwork(Rules) DumpNetwork(Network, OutputNetworkFile) VPrint(('Done: ' + InputFileName))
def get_perturbation_results(args, data, mask_model, mask_tokenizer, base_model, base_tokenizer, span_length=10, n_perturbations=1, method='DetectGPT'): load_mask_model(args, mask_model) torch.manual_seed(0) np.random.seed(0) train_text = data['train']['text'] train_label = data['train']['label'] test_text = data['test']['text'] test_label = data['test']['label'] if (method in ['DetectGPT', 'NPR']): p_train_text = perturb_texts(args, [x for x in train_text for _ in range(n_perturbations)], mask_model, mask_tokenizer, base_tokenizer, ceil_pct=False) p_test_text = perturb_texts(args, [x for x in test_text for _ in range(n_perturbations)], mask_model, mask_tokenizer, base_tokenizer, ceil_pct=False) for _ in range((args.n_perturbation_rounds - 1)): try: (p_train_text, p_test_text) = (perturb_texts(args, p_train_text, mask_model, mask_tokenizer, base_tokenizer, ceil_pct=False), perturb_texts(args, p_test_text, mask_model, mask_tokenizer, base_tokenizer, ceil_pct=False)) except AssertionError: break assert (len(p_train_text) == (len(train_text) * n_perturbations)), f'Expected {(len(train_text) * n_perturbations)} perturbed samples, got {len(p_train_text)}' assert (len(p_test_text) == (len(test_text) * n_perturbations)), f'Expected {(len(test_text) * n_perturbations)} perturbed samples, got {len(p_test_text)}' train = [] test = [] for idx in range(len(train_text)): train.append({'text': train_text[idx], 'label': train_label[idx], 'perturbed_text': p_train_text[(idx * n_perturbations):((idx + 1) * n_perturbations)]}) for idx in range(len(test_text)): test.append({'text': test_text[idx], 'label': test_label[idx], 'perturbed_text': p_test_text[(idx * n_perturbations):((idx + 1) * n_perturbations)]}) elif (method in ['LRR']): train = [] test = [] for idx in range(len(train_text)): train.append({'text': train_text[idx], 'label': train_label[idx]}) for idx in range(len(test_text)): test.append({'text': test_text[idx], 'label': test_label[idx]}) if (method == 'DetectGPT'): for res in tqdm(train, desc='Computing log likelihoods'): p_ll = get_lls(res['perturbed_text'], base_model, base_tokenizer, args.DEVICE) res['ll'] = get_ll(res['text'], base_model, base_tokenizer, args.DEVICE) res['all_perturbed_ll'] = p_ll res['perturbed_ll_mean'] = np.mean(p_ll) res['perturbed_ll_std'] = (np.std(p_ll) if (len(p_ll) > 1) else 1) for res in tqdm(test, desc='Computing log likelihoods'): p_ll = get_lls(res['perturbed_text'], base_model, base_tokenizer, args.DEVICE) res['ll'] = get_ll(res['text'], base_model, base_tokenizer, args.DEVICE) res['all_perturbed_ll'] = p_ll res['perturbed_ll_mean'] = np.mean(p_ll) res['perturbed_ll_std'] = (np.std(p_ll) if (len(p_ll) > 1) else 1) results = {'train': train, 'test': test} elif (method == 'LRR'): for res in tqdm(train, desc='LRR Train'): res['ll'] = get_ll(res['text'], base_model, base_tokenizer, args.DEVICE) res['logrank'] = get_rank(res['text'], base_model, base_tokenizer, args.DEVICE, log=True) for res in tqdm(test, desc='LRR Test'): res['ll'] = get_ll(res['text'], base_model, base_tokenizer, args.DEVICE) res['logrank'] = get_rank(res['text'], base_model, base_tokenizer, args.DEVICE, log=True) elif (method == 'NPR'): for res in tqdm(train, desc='NPR Train'): res['logrank'] = get_rank(res['text'], base_model, base_tokenizer, args.DEVICE, log=True) res['all_perturbed_logrank'] = get_ranks(res['perturbed_text'], base_model, base_tokenizer, args.DEVICE, log=True) res['perturbed_logrank_mean'] = np.mean(res['all_perturbed_logrank']) for res in tqdm(test, desc='NPR Test'): res['logrank'] = get_rank(res['text'], base_model, base_tokenizer, args.DEVICE, log=True) res['all_perturbed_logrank'] = get_ranks(res['perturbed_text'], base_model, base_tokenizer, args.DEVICE, log=True) res['perturbed_logrank_mean'] = np.mean(res['all_perturbed_logrank']) else: raise ValueError('%s method has not been implemented.') results = {'train': train, 'test': test} return results
def doctest(): extension = 'sphinx.ext.doctest' doctest_global_setup = '\nimport torch\nfrom torch import nn\n\nimport pystiche\n\nimport warnings\nwarnings.filterwarnings("ignore", category=FutureWarning)\n\nfrom unittest import mock\n\npatcher = mock.patch(\n "pystiche.enc.models.utils.ModelMultiLayerEncoder.load_state_dict_from_url"\n)\npatcher.start()\n' doctest_global_cleanup = '\nmock.patch.stopall()\n' config = dict(doctest_global_setup=doctest_global_setup, doctest_global_cleanup=doctest_global_cleanup) return (extension, config)
def test_register_action(mocker): from solcore import registries mock_gr = mocker.patch('solcore.registries.generic_register') name = 'pre-process' overwrite = False reason_to_exclude = None _action(name, overwrite=overwrite, reason_to_exclude=reason_to_exclude) def solver(*args, **kwargs): pass mock_gr.assert_called_once_with(name=name, registrator_name='Action', registry=registries.ACTIONS_REGISTRY, signature=registries.ACTIONS_SIGNATURE, overwrite=overwrite, reason_to_exclude=reason_to_exclude)
def color_jitter_nonrand(image, brightness=0, contrast=0, saturation=0, hue=0): with tf.name_scope('distort_color'): def apply_transform(i, x, brightness, contrast, saturation, hue): if ((brightness != 0) and (i == 0)): x = tf.image.random_brightness(x, max_delta=brightness) elif ((contrast != 0) and (i == 1)): x = tf.image.random_contrast(x, lower=(1 - contrast), upper=(1 + contrast)) elif ((saturation != 0) and (i == 2)): x = tf.image.random_saturation(x, lower=(1 - saturation), upper=(1 + saturation)) elif (hue != 0): x = tf.image.random_hue(x, max_delta=hue) return x for i in range(4): image = apply_transform(i, image, brightness, contrast, saturation, hue) image = tf.clip_by_value(image, 0.0, 1.0) return image
def test_dataclass_with_field_init_is_false() -> None: (first, second, second_child, third_child, third) = astroid.extract_node('\n from dataclasses import dataclass, field\n\n\n \n class First:\n a: int\n\n \n class Second(First):\n a: int = field(init=False, default=1)\n\n \n class SecondChild(Second):\n a: float\n\n \n class ThirdChild(SecondChild):\n a: str\n\n \n class Third(First):\n a: str\n\n First.__init__ #\n Second.__init__ #\n SecondChild.__init__ #\n ThirdChild.__init__ #\n Third.__init__ #\n ') first_init: bases.UnboundMethod = next(first.infer()) assert ([a.name for a in first_init.args.args] == ['self', 'a']) assert ([a.value for a in first_init.args.defaults] == []) second_init: bases.UnboundMethod = next(second.infer()) assert ([a.name for a in second_init.args.args] == ['self']) assert ([a.value for a in second_init.args.defaults] == []) second_child_init: bases.UnboundMethod = next(second_child.infer()) assert ([a.name for a in second_child_init.args.args] == ['self', 'a']) assert ([a.value for a in second_child_init.args.defaults] == [1]) third_child_init: bases.UnboundMethod = next(third_child.infer()) assert ([a.name for a in third_child_init.args.args] == ['self', 'a']) assert ([a.value for a in third_child_init.args.defaults] == [1]) third_init: bases.UnboundMethod = next(third.infer()) assert ([a.name for a in third_init.args.args] == ['self', 'a']) assert ([a.value for a in third_init.args.defaults] == [])
def test_dependency_from_pep_508_with_not_in_op_marker() -> None: name = 'jinja2 (>=2.7,<2.8); python_version not in "3.0,3.1,3.2" and extra == "export"' dep = Dependency.create_from_pep_508(name) assert (dep.name == 'jinja2') assert (str(dep.constraint) == '>=2.7,<2.8') assert (dep.in_extras == ['export']) assert (dep.python_versions == '!=3.0.*, !=3.1.*, !=3.2.*') assert (str(dep.marker) == 'python_version not in "3.0,3.1,3.2" and extra == "export"')
def _apply_bpe(model_path: str, in_path: str, out_path: str): Args = namedtuple('Args', ['sentencepiece_model']) args = Args(sentencepiece_model=model_path) tokenizer = SentencepieceBPE(args) with open(in_path) as f, open(out_path, 'w') as f_o: for s in f: f_o.write((tokenizer.encode(s.strip()) + '\n'))
.parametrize('converter_cls', [BaseConverter, Converter]) def test_structure_literal_enum(converter_cls): converter = converter_cls() class Foo(Enum): FOO = 1 BAR = 2 class ClassWithLiteral(): literal_field: Literal[Foo.FOO] = Foo.FOO assert (converter.structure({'literal_field': 1}, ClassWithLiteral) == ClassWithLiteral(Foo.FOO))
class DistModel(BaseModel): def name(self): return self.model_name def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None, use_gpu=True, printNet=False, spatial=False, is_train=False, lr=0.0001, beta1=0.5, version='0.1', gpu_ids=[0]): BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids) self.model = model self.net = net self.is_train = is_train self.spatial = spatial self.gpu_ids = gpu_ids self.model_name = ('%s [%s]' % (model, net)) if (self.model == 'net-lin'): self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net, use_dropout=True, spatial=spatial, version=version, lpips=True) kw = {} if (not use_gpu): kw['map_location'] = 'cpu' if (model_path is None): import inspect model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', ('weights/v%s/%s.pth' % (version, net)))) if (not is_train): print(('Loading model from: %s' % model_path)) self.net.load_state_dict(torch.load(model_path, **kw), strict=False) self.net.cuda() elif (self.model == 'net'): self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False) elif (self.model in ['L2', 'l2']): self.net = networks.L2(use_gpu=use_gpu, colorspace=colorspace) self.model_name = 'L2' elif (self.model in ['DSSIM', 'dssim', 'SSIM', 'ssim']): self.net = networks.DSSIM(use_gpu=use_gpu, colorspace=colorspace) self.model_name = 'SSIM' else: raise ValueError(('Model [%s] not recognized.' % self.model)) self.parameters = list(self.net.parameters()) if self.is_train: self.rankLoss = networks.BCERankingLoss() self.parameters += list(self.rankLoss.net.parameters()) self.lr = lr self.old_lr = lr self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999)) else: self.net.eval() if use_gpu: self.net.to(gpu_ids[0]) self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids) if self.is_train: self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) if printNet: print(' Networks initialized ') networks.print_network(self.net) print('') def forward(self, in0, in1, retPerLayer=False): return self.net.forward(in0, in1, retPerLayer=retPerLayer) def optimize_parameters(self): self.forward_train() self.optimizer_net.zero_grad() self.backward_train() self.optimizer_net.step() self.clamp_weights() def clamp_weights(self): for module in self.net.modules(): if (hasattr(module, 'weight') and (module.kernel_size == (1, 1))): module.weight.data = torch.clamp(module.weight.data, min=0) def set_input(self, data): self.input_ref = data['ref'] self.input_p0 = data['p0'] self.input_p1 = data['p1'] self.input_judge = data['judge'] if self.use_gpu: self.input_ref = self.input_ref.to(device=self.gpu_ids[0]) self.input_p0 = self.input_p0.to(device=self.gpu_ids[0]) self.input_p1 = self.input_p1.to(device=self.gpu_ids[0]) self.input_judge = self.input_judge.to(device=self.gpu_ids[0]) self.var_ref = Variable(self.input_ref, requires_grad=True) self.var_p0 = Variable(self.input_p0, requires_grad=True) self.var_p1 = Variable(self.input_p1, requires_grad=True) def forward_train(self): self.d0 = self.forward(self.var_ref, self.var_p0) self.d1 = self.forward(self.var_ref, self.var_p1) self.acc_r = self.compute_accuracy(self.d0, self.d1, self.input_judge) self.var_judge = Variable((1.0 * self.input_judge)).view(self.d0.size()) self.loss_total = self.rankLoss.forward(self.d0, self.d1, ((self.var_judge * 2.0) - 1.0)) return self.loss_total def backward_train(self): torch.mean(self.loss_total).backward() def compute_accuracy(self, d0, d1, judge): d1_lt_d0 = (d1 < d0).cpu().data.numpy().flatten() judge_per = judge.cpu().numpy().flatten() return ((d1_lt_d0 * judge_per) + ((1 - d1_lt_d0) * (1 - judge_per))) def get_current_errors(self): retDict = OrderedDict([('loss_total', self.loss_total.data.cpu().numpy()), ('acc_r', self.acc_r)]) for key in retDict.keys(): retDict[key] = np.mean(retDict[key]) return retDict def get_current_visuals(self): zoom_factor = (256 / self.var_ref.data.size()[2]) ref_img = util.tensor2im(self.var_ref.data) p0_img = util.tensor2im(self.var_p0.data) p1_img = util.tensor2im(self.var_p1.data) ref_img_vis = zoom(ref_img, [zoom_factor, zoom_factor, 1], order=0) p0_img_vis = zoom(p0_img, [zoom_factor, zoom_factor, 1], order=0) p1_img_vis = zoom(p1_img, [zoom_factor, zoom_factor, 1], order=0) return OrderedDict([('ref', ref_img_vis), ('p0', p0_img_vis), ('p1', p1_img_vis)]) def save(self, path, label): if self.use_gpu: self.save_network(self.net.module, path, '', label) else: self.save_network(self.net, path, '', label) self.save_network(self.rankLoss.net, path, 'rank', label) def update_learning_rate(self, nepoch_decay): lrd = (self.lr / nepoch_decay) lr = (self.old_lr - lrd) for param_group in self.optimizer_net.param_groups: param_group['lr'] = lr print(('update lr [%s] decay: %f -> %f' % (type, self.old_lr, lr))) self.old_lr = lr
def load_cpp_ext(ext_name): root_dir = os.path.join(os.path.split(__file__)[0]) src_dir = os.path.join(root_dir, 'cpp_ht2im') tar_dir = os.path.join(src_dir, 'build', ext_name) os.makedirs(tar_dir, exist_ok=True) srcs = (glob(f'{src_dir}/*.cu') + glob(f'{src_dir}/*.cpp')) with warnings.catch_warnings(): warnings.simplefilter('ignore') from torch.utils.cpp_extension import load ext = load(name=ext_name, sources=srcs, extra_cflags=['-O3'], extra_cuda_cflags=[], build_directory=tar_dir) return ext
class LockTimeEdit(QWidget): def __init__(self, parent=None): QWidget.__init__(self, parent) hbox = QHBoxLayout() self.setLayout(hbox) hbox.setContentsMargins(0, 0, 0, 0) hbox.setSpacing(0) self.locktime_raw_e = LockTimeRawEdit(self) self.locktime_height_e = LockTimeHeightEdit(self) self.locktime_date_e = LockTimeDateEdit(self) self.editors = [self.locktime_raw_e, self.locktime_height_e, self.locktime_date_e] self.combo = QComboBox() options = [_('Raw'), _('Block height'), _('Date')] option_index_to_editor_map = {0: self.locktime_raw_e, 1: self.locktime_height_e, 2: self.locktime_date_e} default_index = 1 self.combo.addItems(options) def on_current_index_changed(i): for w in self.editors: w.setVisible(False) w.setEnabled(False) prev_locktime = self.editor.get_locktime() self.editor = option_index_to_editor_map[i] if self.editor.is_acceptable_locktime(prev_locktime): self.editor.set_locktime(prev_locktime) self.editor.setVisible(True) self.editor.setEnabled(True) self.editor = option_index_to_editor_map[default_index] self.combo.currentIndexChanged.connect(on_current_index_changed) self.combo.setCurrentIndex(default_index) on_current_index_changed(default_index) hbox.addWidget(self.combo) for w in self.editors: hbox.addWidget(w) hbox.addStretch(1) def get_locktime(self) -> Optional[int]: return self.editor.get_locktime() def set_locktime(self, x: Any) -> None: self.editor.set_locktime(x)
class LocalResource(SchemaBase): cores: PositiveInt = Field(4, description='The number of cores to be allocated to the computation.') memory: PositiveInt = Field(10, description='The amount of memory that should be allocated to the computation in GB.') def local_options(self) -> Dict[(str, int)]: return {'memory': self.memory, 'ncores': self.cores} def divide_resource(self, n_tasks: int) -> 'LocalResource': if (n_tasks == 1): return self else: cores = int((self.cores / n_tasks)) memory = int((self.memory / n_tasks)) return LocalResource(cores=cores, memory=memory)
.parametrize('file_format, filename, content', [('json', 'foo.json', '{"a":\n'), ('yaml', 'foo.yaml', 'a: {b\n'), ('yaml', 'foo.yaml', 'a: b\nc\n'), ('json5', 'foo.json5', '{"a":\n'), ('toml', 'foo.toml', 'abc\n')]) def test_instanceloader_invalid_data(tmp_path, file_format, filename, content, open_wide): if ((file_format == 'json5') and (not JSON5_ENABLED)): pytest.skip("test requires 'json5' support") if ((file_format == 'toml') and (not TOML_ENABLED)): pytest.skip("test requires 'toml' support") f = (tmp_path / filename) f.write_text(content) loader = InstanceLoader(open_wide(f)) data = list(loader.iter_files()) assert (len(data) == 1) assert isinstance(data[0], tuple) assert (len(data[0]) == 2) assert (data[0][0] == str(f)) assert isinstance(data[0][1], FailedFileLoadError)
class ExecutionUsage(Usage): def __init__(self, asynchronous=False): super().__init__(asynchronous) self._recorder = dict() def render(self, flush: bool=False) -> dict: records = self._recorder if flush: self._recorder = dict() return records def usage_variable(self, name, factory, var_type): return ExecutionUsageVariable(name, factory, var_type, self) def init_artifact(self, name, factory): variable = super().init_artifact(name, factory) variable.execute() self._recorder[variable.name] = variable return variable def init_artifact_collection(self, name, factory): variable = super().init_artifact_collection(name, factory) variable.execute() self._recorder[variable.name] = variable return variable def init_metadata(self, name, factory): variable = super().init_metadata(name, factory) variable.execute() self._recorder[variable.name] = variable return variable def init_format(self, name, factory, ext=None): variable = super().init_format(name, factory, ext=ext) variable.execute() self._recorder[variable.name] = variable return variable def import_from_format(self, name, semantic_type, variable, view_type=None): variable = super().import_from_format(name, semantic_type, variable, view_type=view_type) variable.execute() self._recorder[variable.name] = variable return variable def merge_metadata(self, name, *variables): variable = super().merge_metadata(name, *variables) variable.execute() self._recorder[variable.name] = variable return variable def get_metadata_column(self, name, column_name, variable): variable = super().get_metadata_column(name, column_name, variable) variable.execute() self._recorder[variable.name] = variable return variable def view_as_metadata(self, name, artifact_variable): variable = super().view_as_metadata(name, artifact_variable) variable.execute() self._recorder[variable.name] = variable return variable def action(self, action, input_opts, output_opts): variables = super().action(action, input_opts, output_opts) for variable in variables: variable.execute() self._recorder[variable.name] = variable return variables
def decode_train(example): features = tf.parse_single_example(example, features={'label': tf.FixedLenFeature([], tf.int64), 'FEA_SrcItemId': tf.FixedLenFeature([], tf.string), 'FEA_SrcItemCp': tf.FixedLenFeature([], tf.string), 'FEA_SrcItemFirstCat': tf.FixedLenFeature([], tf.string), 'FEA_SrcItemSecondCat': tf.FixedLenFeature([], tf.string), 'FEA_ItemId': tf.FixedLenFeature([], tf.string), 'FEA_ItemCp': tf.FixedLenFeature([], tf.string), 'FEA_ItemFirstCat': tf.FixedLenFeature([], tf.string), 'FEA_ItemSecondCat': tf.FixedLenFeature([], tf.string), 'FEA_ItemDuration': tf.FixedLenFeature([], tf.int64), 'FEA_VIDAGE': tf.FixedLenFeature([], tf.string), 'FEA_ALGID': tf.FixedLenFeature([], tf.string), 'FEA_SrcItemAgeDay': tf.FixedLenFeature([], tf.string), 'FEA_SrcItemDuration': tf.FixedLenFeature([], tf.int64), 'FEA_ItemAgeDay': tf.FixedLenFeature([], tf.string), 'FEA_UserGroup': tf.FixedLenFeature([], tf.string), 'FEA_UserAge': tf.FixedLenFeature([], tf.int64), 'FEA_UserCold': tf.FixedLenFeature([], tf.string), 'FEA_UserEducation': tf.FixedLenFeature([], tf.string), 'FEA_UserSex': tf.FixedLenFeature([], tf.string), 'FEA_UserIndustry': tf.FixedLenFeature([], tf.string), 'FEA_UserStatus': tf.FixedLenFeature([], tf.string), 'FEA_UserCityGrade': tf.FixedLenFeature([], tf.string), 'FEA_SrcItemKeywords': tf.VarLenFeature(tf.string), 'FEA_SrcItemTag': tf.VarLenFeature(tf.string), 'FEA_ItemXfTag': tf.VarLenFeature(tf.string), 'FEA_ItemKeywords': tf.VarLenFeature(tf.string), 'FEA_UserTag': tf.VarLenFeature(tf.string), 'FEA_UserRTagPos': tf.VarLenFeature(tf.string), 'FEA_UserRTagNeg': tf.VarLenFeature(tf.string), 'FEA_UserPvReal': tf.VarLenFeature(tf.string), 'FEA_UserPvOff': tf.VarLenFeature(tf.string), 'FEA_UserPvMonth': tf.VarLenFeature(tf.string), 'FEA_Order': tf.FixedLenFeature([], tf.string), 'FEA_Uid': tf.FixedLenFeature([], tf.string), 'FEA_Cquality': tf.FixedLenFeature([], tf.int64)}) other_features = {} other_features['FEA_Order'] = features['FEA_Order'] other_features['FEA_Uid'] = features['FEA_Uid'] other_features['FEA_Cquality'] = features['FEA_Cquality'] other_features['FEA_UserGroup'] = features['FEA_UserGroup'] other_features['FEA_UserPvOff'] = features['FEA_UserPvOff'] other_features['FEA_UserPvReal'] = features['FEA_UserPvReal'] other_features['FEA_UserPvMonth'] = features['FEA_UserPvMonth'] other_features['FEA_SrcItemCp'] = features['FEA_SrcItemCp'] other_features['FEA_SrcItemId'] = features['FEA_SrcItemId'] other_features['FEA_SrcItemFirstCat'] = features['FEA_SrcItemFirstCat'] other_features['FEA_SrcItemSecondCat'] = features['FEA_SrcItemSecondCat'] other_features['FEA_ItemId'] = features['FEA_ItemId'] other_features['FEA_ItemCp'] = features['FEA_ItemCp'] other_features['FEA_VIDAGE'] = features['FEA_VIDAGE'] other_features['FEA_ALGID'] = features['FEA_ALGID'] other_features['FEA_SrcItemAgeDay'] = features['FEA_SrcItemAgeDay'] other_features['FEA_SrcItemDuration'] = features['FEA_SrcItemDuration'] other_features['FEA_ItemAgeDay'] = features['FEA_ItemAgeDay'] other_features['FEA_ItemKeywords'] = features['FEA_ItemKeywords'] other_features['FEA_SrcItemKeywords'] = features['FEA_SrcItemKeywords'] other_features['FEA_SrcItemTag'] = features['FEA_SrcItemTag'] other_features['FEA_UserTag'] = features['FEA_UserTag'] other_features['FEA_UserRTagPos'] = features['FEA_UserRTagPos'] other_features['FEA_UserRTagNeg'] = features['FEA_UserRTagNeg'] other_features['FEA_UserCold'] = features['FEA_UserCold'] other_features['FEA_UserCityGrade'] = features['FEA_UserCityGrade'] other_features['FEA_UserEducation'] = features['FEA_UserEducation'] other_features['FEA_UserIndustry'] = features['FEA_UserIndustry'] other_features['FEA_UserStatus'] = features['FEA_UserStatus'] other_features['FEA_UserSex'] = features['FEA_UserSex'] other_features['FEA_UserAge'] = features['FEA_UserAge'] other_features['FEA_ItemFirstCat'] = features['FEA_ItemFirstCat'] other_features['FEA_ItemSecondCat'] = features['FEA_ItemSecondCat'] other_features['FEA_ItemXfTag'] = features['FEA_ItemXfTag'] other_features['FEA_ItemDuration'] = features['FEA_ItemDuration'] labels = features['label'] return (other_features, labels, features['FEA_Uid'])
def test_get_trail(): exc = Exception() pytest.raises(AttributeError, (lambda : _raw_trail(exc))) assert (list(get_trail(exc)) == []) append_trail(exc, 'foo') assert (list(get_trail(exc)) == ['foo']) new_exc = Exception() append_trail(new_exc, 'bar') assert (list(get_trail(new_exc)) == ['bar'])
def random_in_unit_spherical_caps(shape, origin, importance_sampled_list): l = len(importance_sampled_list) mask = (np.random.rand(shape) * l).astype(int) mask_list = ([None] * l) cosmax_list = ([None] * l) ax_u_list = ([None] * l) ax_v_list = ([None] * l) ax_w_list = ([None] * l) for i in range(l): ax_w_list[i] = (importance_sampled_list[i].center - origin).normalize() a = vec3.where((np.abs(ax_w_list[i].x) > 0.9), vec3(0, 1, 0), vec3(1, 0, 0)) ax_v_list[i] = ax_w_list[i].cross(a).normalize() ax_u_list[i] = ax_w_list[i].cross(ax_v_list[i]) mask_list[i] = (mask == i) target_distance = np.sqrt((importance_sampled_list[i].center - origin).dot((importance_sampled_list[i].center - origin))) cosmax_list[i] = np.sqrt((1 - (np.clip((importance_sampled_list[i].bounded_sphere_radius / target_distance), 0.0, 1.0) ** 2))) phi = ((np.random.rand(shape) * 2) * np.pi) r2 = np.random.rand(shape) cosmax = np.select(mask_list, cosmax_list) ax_w = vec3.select(mask_list, ax_w_list) ax_v = vec3.select(mask_list, ax_v_list) ax_u = vec3.select(mask_list, ax_u_list) z = (1.0 + (r2 * (cosmax - 1.0))) x = (np.cos(phi) * np.sqrt((1.0 - (z ** 2)))) y = (np.sin(phi) * np.sqrt((1.0 - (z ** 2)))) ray_dir = (((ax_u * x) + (ax_v * y)) + (ax_w * z)) PDF = 0.0 for i in range(l): PDF += np.where((ray_dir.dot(ax_w_list[i]) > cosmax_list[i]), (1 / (((1 - cosmax_list[i]) * 2) * np.pi)), 0.0) PDF = (PDF / l) return (ray_dir, PDF)
class MultiStepLrUpdater(BaseLrUpdater): def __init__(self, milestones=[], gamma=0.1, **kwargs): assert isinstance(milestones, (tuple, list)) self.milestones = milestones self.gamma = gamma super().__init__(**kwargs) def get_lr(self, base_lr, cur_step, steps): num_steps = np.sum((cur_step >= np.asarray(self.milestones))) lr = (base_lr * (self.gamma ** num_steps)) return lr
def _x_and_y_from_pubkey_bytes(pubkey: bytes) -> Tuple[(int, int)]: assert isinstance(pubkey, bytes), f'pubkey must be bytes, not {type(pubkey)}' pubkey_ptr = create_string_buffer(64) ret = _libsecp256k1.secp256k1_ec_pubkey_parse(_libsecp256k1.ctx, pubkey_ptr, pubkey, len(pubkey)) if (not ret): raise InvalidECPointException('public key could not be parsed or is invalid') pubkey_serialized = create_string_buffer(65) pubkey_size = c_size_t(65) _libsecp256k1.secp256k1_ec_pubkey_serialize(_libsecp256k1.ctx, pubkey_serialized, byref(pubkey_size), pubkey_ptr, SECP256K1_EC_UNCOMPRESSED) pubkey_serialized = bytes(pubkey_serialized) assert (pubkey_serialized[0] == 4), pubkey_serialized x = int.from_bytes(pubkey_serialized[1:33], byteorder='big', signed=False) y = int.from_bytes(pubkey_serialized[33:65], byteorder='big', signed=False) return (x, y)
def Get_Visual_Response(generator, num_img, layer_id): LATENT_DIM = 512 noise_z = torch.randn(num_img, LATENT_DIM) if torch.cuda.is_available(): noise_z = noise_z.to('cuda') layer_response = Get_Layer_Output(generator, noise_z, layer_id) (img_tensor, _) = generator([noise_z]) generated_image = [] for i in range(num_img): pil_img = Convert_Tensor_To_Image(img_tensor[i]) np_img = np.array(pil_img) generated_image.append(np_img) return (layer_response, generated_image)
def simulate_full_curve(parameters, Geff, Tcell, ivcurve_pnts=1000): sde_args = pvsystem.calcparams_desoto(Geff, Tcell, alpha_sc=parameters['alpha_sc'], a_ref=parameters['a_ref'], I_L_ref=parameters['I_L_ref'], I_o_ref=parameters['I_o_ref'], R_sh_ref=parameters['R_sh_ref'], R_s=parameters['R_s']) kwargs = {'breakdown_factor': parameters['breakdown_factor'], 'breakdown_exp': parameters['breakdown_exp'], 'breakdown_voltage': parameters['breakdown_voltage']} v_oc = singlediode.bishop88_v_from_i(0.0, *sde_args, **kwargs) vd = np.linspace((0.99 * kwargs['breakdown_voltage']), v_oc, ivcurve_pnts) (ivcurve_i, ivcurve_v, _) = singlediode.bishop88(vd, *sde_args, **kwargs) return pd.DataFrame({'i': ivcurve_i, 'v': ivcurve_v})
def spatial_svd_cp_example(config: argparse.Namespace): data_pipeline = ImageNetDataPipeline(config) model = models.resnet18(pretrained=True) if config.use_cuda: model.to(torch.device('cuda')) model.eval() accuracy = data_pipeline.evaluate(model, use_cuda=config.use_cuda) logger.info('Original Model Top-1 accuracy = %.2f', accuracy) logger.info('Starting Spatial SVD') (compressed_model, stats) = aimet_spatial_svd(model=model, evaluator=data_pipeline.evaluate) logger.info(stats) with open(os.path.join(config.logdir, 'log.txt'), 'w') as outfile: outfile.write(('%s\n\n' % stats)) accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda) logger.info('Spatial SVD Model Top-1 accuracy = %.2f', accuracy) logger.info('Starting Model Finetuning') data_pipeline.finetune(compressed_model) accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda) logger.info('Finetuned SVD Model Top-1 accuracy = %.2f', accuracy) logger.info('Starting Channel Pruning') data_loader = ImageNetDataLoader(is_training=True, images_dir=_config.dataset_dir, image_size=224).data_loader (compressed_model, stats) = aimet_channel_pruning(model=compressed_model, evaluator=data_pipeline.evaluate, data_loader=data_loader) logger.info(stats) with open(os.path.join(config.logdir, 'log.txt'), 'w') as outfile: outfile.write(('%s\n\n' % stats)) accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda) logger.info('After Model Channel Pruning, Top-1 accuracy = %.2f', accuracy) logger.info('Model Channel Pruning Complete') logger.info('Starting Model Finetuning') data_pipeline.finetune(compressed_model) accuracy = data_pipeline.evaluate(compressed_model, use_cuda=config.use_cuda) logger.info('Finetuned Compressed Model Top-1 accuracy = %.2f', accuracy) logger.info('Model Finetuning Complete') torch.save(compressed_model, os.path.join(config.logdir, 'compressed_model.pth'))
class TestHTTPJSONCollector(CollectorTestCase): def setUp(self): config = get_collector_config('HTTPJSONCollector', {}) self.collector = HTTPJSONCollector(config, None) def test_import(self): self.assertTrue(HTTPJSONCollector) (Collector, 'publish') def test_should_work_with_real_data(self, publish_mock): urlopen_mock = patch('urllib2.urlopen', Mock(return_value=self.getFixture('stats.json'))) urlopen_mock.start() self.collector.collect() urlopen_mock.stop() metrics = self.getPickledResults('real_stat.pkl') self.assertPublishedMany(publish_mock, metrics)