code
stringlengths
281
23.7M
class Command(BaseCommand): def handle(self, *args, **kwargs): sql = "\n SELECT\n practice AS items\n FROM\n hscic.normalised_prescribing\n WHERE\n bnf_code LIKE '0604012U0%' AND month >= '2019-09-01'\n GROUP BY practice\n ORDER BY practice\n " practice_ids = [row[0] for row in Client().query(sql).rows] practices = Practice.objects.filter(pk__in=practice_ids) pcns = set((practice.pcn for practice in practices)) ccgs = set((practice.ccg for practice in practices)) url = ' for practice in practices: ctx = {'url': (url + practice.code), 'org_name': practice.name} for bm in practice.orgbookmark_set.all(): self.send_email(bm.user.email, ctx) for pcn in pcns: if (pcn is None): continue pcn_practices = practices.filter(pcn=pcn) ctx = {'url': (url + ','.join((p.code for p in pcn_practices))), 'org_name': pcn.name} for bm in pcn.orgbookmark_set.all(): self.send_email(bm.user.email, ctx) for ccg in ccgs: if (ccg is None): continue ccg_practices = practices.filter(ccg=ccg) ctx = {'url': (url + ','.join((p.code for p in ccg_practices))), 'org_name': ccg.name} for bm in ccg.orgbookmark_set.all(): self.send_email(bm.user.email, ctx) def send_email(self, to_addr, ctx): print('Sending to:', to_addr) html_template = get_template('esmya.html') html = html_template.render(ctx) text_template = get_template('esmya.txt') text = text_template.render(ctx) subject = 'Esmya (ulipristal acetate) - Drug Safety Alert' from_addr = settings.DEFAULT_FROM_EMAIL msg = EmailMultiAlternatives(subject, text, from_addr, [to_addr]) msg.attach_alternative(html, 'text/html') msg.send()
def msgheaders(msg): headers = ('Archived-At', 'Link', 'List-Archive', 'List-ID', 'List-Help', 'List-Owner', 'List-Post', 'List-Subscribe', 'List-Unsubscribe', 'List-Unsubscribe-Post') res = [] for hdr in headers: hdri = msg.get(hdr) if hdri: res.append(hdri) return res
_ARCH_REGISTRY.register() class GeneralizedRCNN(_GeneralizedRCNN): def prepare_for_export(self, cfg, *args, **kwargs): func = RCNN_PREPARE_FOR_EXPORT_REGISTRY.get(cfg.RCNN_PREPARE_FOR_EXPORT) return func(self, cfg, *args, **kwargs) def prepare_for_quant(self, cfg, *args, **kwargs): func = RCNN_PREPARE_FOR_QUANT_REGISTRY.get(cfg.RCNN_PREPARE_FOR_QUANT) return func(self, cfg, *args, **kwargs) def custom_prepare_fx(self, cfg, is_qat, example_input=None): return default_rcnn_custom_prepare_fx(self, cfg, is_qat, example_input) def _cast_model_to_device(self, device): return _cast_detection_model(self, device)
class TestTorchVisionDataModule(unittest.TestCase): data_path: str def setUpClass(cls) -> None: data_path_ctx = TemporaryDirectory() cls.addClassCleanup(data_path_ctx.cleanup) cls.data_path = data_path_ctx.name MNIST(cls.data_path, train=True, download=True) MNIST(cls.data_path, train=False, download=True) def test_init_datamodule_with_hydra(self) -> None: test_conf = {'_target_': 'torchrecipes.vision.core.datamodule.torchvision_data_module.TorchVisionDataModule', 'datasets': self._get_datasets_config(download=False), 'batch_size': 32, 'drop_last': False, 'normalize': False, 'num_workers': 16, 'pin_memory': False, 'seed': 42, 'val_split': None} torchvision_data_module = hydra.utils.instantiate(test_conf) self.assertIsInstance(torchvision_data_module, TorchVisionDataModule) def test_creating_datamodule(self) -> None: torchvision_data_module = self.get_torchvision_data_module() self.assertIsInstance(torchvision_data_module, TorchVisionDataModule) torchvision_data_module.setup() dataloder = torchvision_data_module.train_dataloader() (img, _) = next(iter(dataloder)) self.assertEqual(img.size(), torch.Size([1, 1, 64, 64])) def test_val_split(self) -> None: torchvision_data_module = self.get_torchvision_data_module(val_split=100) torchvision_data_module.setup() self.assertEqual(len(torchvision_data_module.datasets['train']), 59900) self.assertEqual(len(torchvision_data_module.datasets['val']), 100) torchvision_data_module = self.get_torchvision_data_module(val_split=0.1) torchvision_data_module.setup() self.assertEqual(len(torchvision_data_module.datasets['train']), 54000) self.assertEqual(len(torchvision_data_module.datasets['val']), 6000) def get_datasets_from_config(self) -> Dict[(str, Optional[Union[(Subset[VisionDataset], VisionDataset)]])]: datasets_conf = self._get_datasets_config(download=False) datasets = {} for (split, dataset_conf) in datasets_conf.items(): if (dataset_conf is None): datasets[split] = None else: dataset_conf = dict(dataset_conf) dataset_conf = build_transforms_from_dataset_config(dataset_conf) datasets[split] = instantiate(dataset_conf, _recursive_=False) return datasets def get_torchvision_data_module(self, batch_size: int=1, val_split: Optional[Union[(int, float)]]=None) -> TorchVisionDataModule: datasets = self.get_datasets_from_config() return TorchVisionDataModule(datasets=datasets, val_split=val_split, batch_size=batch_size) def _get_datasets_config(self, download: bool=False) -> Dict[(str, Any)]: return {'train': {'_target_': 'torchvision.datasets.mnist.MNIST', 'train': True, 'root': self.data_path, 'download': download, 'transform': [{'_target_': 'torchvision.transforms.Resize', 'size': 64}, {'_target_': 'torchvision.transforms.ToTensor'}]}, 'val': None, 'test': {'_target_': 'torchvision.datasets.mnist.MNIST', 'train': False, 'root': self.data_path, 'download': download, 'transform': [{'_target_': 'torchvision.transforms.Resize', 'size': 64}, {'_target_': 'torchvision.transforms.ToTensor'}]}}
class Office(models.Model): created_at = models.DateTimeField(auto_now_add=True, blank=True, null=True) updated_at = models.DateTimeField(auto_now=True, null=True) office_code = models.TextField(null=False) office_name = models.TextField(null=True) sub_tier_code = models.TextField(null=False) agency_code = models.TextField(null=False) contract_awards_office = models.BooleanField() contract_funding_office = models.BooleanField() financial_assistance_awards_office = models.BooleanField() financial_assistance_funding_office = models.BooleanField() class Meta(): managed = True db_table = 'office' def __str__(self): return ('%s' % self.office_name)
.django_db def test_invalid_award_type_codes(client, monkeypatch, helpers, elasticsearch_award_index, awards_and_transactions): setup_elasticsearch_test(monkeypatch, elasticsearch_award_index) resp = helpers.post_for_spending_endpoint(client, url, award_type_codes=['ZZ', '08'], def_codes=['L', 'M']) assert (resp.status_code == status.HTTP_400_BAD_REQUEST) assert (resp.data['detail'] == "Field 'filter|award_type_codes' is outside valid values ['07', '08']")
_decorator(login_required(login_url='/login'), name='dispatch') class ResetUserView(View): def post(self, request): email = request.POST.get('email') username = request.POST.get('username') if (email and (username is not None)): if User.objects.filter(email=email): return JsonResponse({'status': 400, 'message': ''}) send_register_email.delay(email=email, username=username, send_type='update_email') return JsonResponse({'status': 200, 'message': u',30'}) return JsonResponse({'status': 400, 'message': ''})
def automl_model(): assert _AUTOML_ENABLED automl_client = automl_v1.AutoMlClient() project_id = firebase_admin.get_app().project_id parent = automl_client.location_path(project_id, 'us-central1') models = automl_client.list_models(parent, filter_='display_name=admin_sdk_integ_test1') automl_ref = None for model in models: automl_ref = model.name if (automl_ref is None): pytest.skip('No pre-existing AutoML model found. Skipping test') source = ml.TFLiteAutoMlSource(automl_ref) tflite_format = ml.TFLiteFormat(model_source=source) ml_model = ml.Model(display_name=_random_identifier('TestModel_automl_'), tags=['test_automl'], model_format=tflite_format) model = ml.create_model(model=ml_model) (yield model) _clean_up_model(model)
def llama_executable() -> Optional[str]: import os if (not ('LLAMA_CPP' in os.environ)): logger.warn('llama.cpp executable not found. Please set the `LLAMA_CPP` environment variable to the path to the `main` executable to use the llama.cpp backend.') return None return os.environ['LLAMA_CPP']
class TestRuleExtractor(UnitTestWithNamespace): def test_extract_workflow_rules(self): def build_workflows(): for i in range(3): with Workflow(name='workflow_{}'.format(i), namespace=self.namespace_name) as workflow: o1 = Operator(name='op') workflow_meta = self.metadata_manager.add_workflow(namespace=self.namespace_name, name=workflow.name, content='', workflow_object=cloudpickle.dumps(workflow)) self.metadata_manager.flush() expect_events_1 = ['event_1', 'event_1_{}'.format(i), 'event'] expect_events_2 = ['event_2', 'event_2_{}'.format(i), 'event'] self.metadata_manager.add_workflow_trigger(workflow_id=workflow_meta.id, rule=cloudpickle.dumps(WorkflowRule(condition=Condition(expect_event_keys=expect_events_1)))) self.metadata_manager.flush() self.metadata_manager.add_workflow_trigger(workflow_id=workflow_meta.id, rule=cloudpickle.dumps(WorkflowRule(condition=Condition(expect_event_keys=expect_events_2)))) self.metadata_manager.flush() build_workflows() rule_extractor = RuleExtractor() event = Event(key='event_1', value='') event.namespace = self.namespace_name results = rule_extractor.extract_workflow_rules(event=event) self.metadata_manager.flush() self.assertEqual(3, len(results)) for r in results: self.assertEqual(1, len(r.rules)) event = Event(key='event', value='') event.namespace = self.namespace_name results = rule_extractor.extract_workflow_rules(event=event) self.metadata_manager.flush() self.assertEqual(3, len(results)) for r in results: self.assertEqual(2, len(r.rules)) def test_extract_workflow_execution_rules(self): def build_workflows(): for i in range(3): expect_events_1 = ['event_1', 'event_1_{}'.format(i), 'event'] expect_events_2 = ['event_2', 'event_2_{}'.format(i), 'event'] with Workflow(name='workflow_{}'.format(i)) as workflow: op_1 = Operator(name='op_1') op_2 = Operator(name='op_2') op_3 = Operator(name='op_3') op_1.action_on_condition(action=TaskAction.START, condition=Condition(expect_event_keys=expect_events_1)) op_2.action_on_condition(action=TaskAction.START, condition=Condition(expect_event_keys=expect_events_2)) op_3.action_on_task_status(TaskAction.START, {op_1: TaskStatus.SUCCESS, op_2: TaskStatus.SUCCESS}) workflow_meta = self.metadata_manager.add_workflow(namespace=self.namespace_name, name=workflow.name, content='', workflow_object=cloudpickle.dumps(workflow)) self.metadata_manager.flush() snapshot_meta = self.metadata_manager.add_workflow_snapshot(workflow_id=workflow_meta.id, workflow_object=workflow_meta.workflow_object, uri='url', signature=str(i)) self.metadata_manager.flush() for j in range(3): workflow_execution_meta = self.metadata_manager.add_workflow_execution(workflow_id=workflow_meta.id, run_type=ExecutionType.MANUAL, snapshot_id=snapshot_meta.id) self.metadata_manager.flush() if (0 == (j % 2)): self.metadata_manager.update_workflow_execution(workflow_execution_id=workflow_execution_meta.id, status=WorkflowStatus.RUNNING.value) self.metadata_manager.flush() build_workflows() rule_extractor = RuleExtractor() event = Event(key='event_1', value='') event.namespace = 'default' results = rule_extractor.extract_workflow_execution_rules(event=event) self.metadata_manager.flush() self.assertEqual(6, len(results)) for r in results: self.assertEqual(1, len(r.task_rule_wrappers)) event = Event(key='event', value='') event.namespace = 'default' results = rule_extractor.extract_workflow_execution_rules(event=event) self.metadata_manager.flush() self.assertEqual(6, len(results)) for r in results: self.assertEqual(2, len(r.task_rule_wrappers)) event.context = json.dumps({EventContextConstant.WORKFLOW_EXECUTION_ID: 1}) results = rule_extractor.extract_workflow_execution_rules(event=event) self.metadata_manager.flush() self.assertEqual(1, len(results))
class TicketSchema(TicketSchemaPublic): class Meta(): type_ = 'ticket' self_view = 'v1.ticket_detail' self_view_kwargs = {'id': '<id>'} inflect = dasherize access_codes = Relationship(self_view='v1.ticket_access_code', self_view_kwargs={'id': '<id>'}, related_view='v1.access_code_list', related_view_kwargs={'ticket_id': '<id>'}, schema='AccessCodeSchema', many=True, type_='access-code') attendees = Relationship(self_view='v1.ticket_attendees', self_view_kwargs={'id': '<id>'}, schema='AttendeeSchema', many=True, type_='attendee')
class OptionPlotoptionsHeatmapSonificationTracksMappingPlaydelay(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def get_new_reminder(stype: str, svalue: str) -> str: now = date_now_stamp() if (stype == 'td'): next_date_due = (datetime.now() + timedelta(days=int(svalue))) return f'{now}|{dt_to_stamp(next_date_due)}|td:{svalue}' elif (stype == 'wd'): weekdays_due = [int(d) for d in svalue] next_date_due = next_instance_of_weekdays(weekdays_due) return f'{now}|{dt_to_stamp(next_date_due)}|wd:{svalue}' elif (stype == 'id'): next_date_due = (datetime.now() + timedelta(days=int(svalue))) return f'{now}|{dt_to_stamp(next_date_due)}|id:{svalue}' elif (stype == 'gd'): factor = float(svalue.split(';')[0]) last = float(svalue.split(';')[1]) new = (factor * last) next_date_due = (datetime.now() + timedelta(days=int(new))) return f'{now}|{dt_to_stamp(next_date_due)}|gd:{factor};{new}'
class OptionSeriesSplineTooltipDatetimelabelformats(Options): def day(self): return self._config_get('%A, %e %b %Y') def day(self, text: str): self._config(text, js_type=False) def hour(self): return self._config_get('%A, %e %b, %H:%M') def hour(self, text: str): self._config(text, js_type=False) def millisecond(self): return self._config_get('%A, %e %b, %H:%M:%S.%L') def millisecond(self, text: str): self._config(text, js_type=False) def minute(self): return self._config_get('%A, %e %b, %H:%M') def minute(self, text: str): self._config(text, js_type=False) def month(self): return self._config_get('%B %Y') def month(self, text: str): self._config(text, js_type=False) def second(self): return self._config_get('%A, %e %b, %H:%M:%S') def second(self, text: str): self._config(text, js_type=False) def week(self): return self._config_get('Week from %A, %e %b %Y') def week(self, text: str): self._config(text, js_type=False) def year(self): return self._config_get('%Y') def year(self, text: str): self._config(text, js_type=False)
class IntBetween(_IntComparison): def __init__(self, lower: int, upper: int) -> None: if ((not isinstance(lower, (int, float))) or (not isinstance(upper, (int, float)))): raise ValueError(f"IntBetween(...) expects two numerical values as arguments while '{type(lower).__name__}' and '{type(upper).__name__}' were provided") super().__init__(ge=lower, le=upper)
def printInstanceMethods(cls, showaddr=False, prefix='-'): methods = getMethods(cls) if (not methods): print('No methods were found') for m in methods: if showaddr: print(((((prefix + ' ') + m.prettyPrintString()) + ' ') + str(m.imp))) else: print(((prefix + ' ') + m.prettyPrintString()))
.parametrize('arguments,expected', (({}, [[]]), ({'arg1': 1}, [[]]), ({'arg0': 1}, [[hex_and_pad(1), None, None]]), ({'arg0': [1]}, [[hex_and_pad(1), None, None]]), ({'arg0': [1, 2]}, [[hex_and_pad(1), None, None], [hex_and_pad(2), None, None]]), ({'arg0': [1, 3], 'arg3': [2, 4]}, [[hex_and_pad(1), hex_and_pad(2), None], [hex_and_pad(1), hex_and_pad(4), None], [hex_and_pad(3), hex_and_pad(2), None], [hex_and_pad(3), hex_and_pad(4), None]]))) def test_construct_event_data_set(w3, arguments, expected): actual = construct_event_data_set(EVENT_1_ABI, w3.codec, arguments) assert (actual == expected)
def poly_template(monkeypatch): folder = py.path.local(tempfile.mkdtemp()) script_path = Path(__file__).parent.resolve() folder = make_poly_example(folder, f'{script_path}/../../test-data/poly_template', gen_data_count=34, gen_data_entries=15, summary_data_entries=100, reals=2, summary_data_count=4000, sum_obs_count=450, gen_obs_count=34, sum_obs_every=10, gen_obs_every=1, parameter_entries=12, parameter_count=8, update_steps=1) monkeypatch.chdir(folder) (yield folder)
def backprop_hard_swish_mobilenet(dY, X, *, inplace: bool=False, threads_per_block=128, num_blocks=128): _is_float_array(dY) _is_float_array(X, shape=dY.shape) out = dY if (not inplace): out = _alloc_like(dY, zeros=False) if (dY.dtype == 'float32'): backprop_hard_swish_mobilenet_kernel_float((num_blocks,), (threads_per_block,), (out, dY, X, out.size)) else: backprop_hard_swish_mobilenet_kernel_double((num_blocks,), (threads_per_block,), (out, dY, X, out.size)) return out
def uninstall(fips_dir): emsdk_dir = get_emsdk_dir(fips_dir) log.colored(log.YELLOW, '=== uninstalling emscripten SDK') remove_old_sdks(fips_dir) if emsdk_dir_exists(fips_dir): if util.confirm(((log.RED + "Delete emsdk directory at '{}'?".format(emsdk_dir)) + log.DEF)): log.info("Deleting '{}'...".format(emsdk_dir)) shutil.rmtree(emsdk_dir, onerror=remove_readonly) else: log.info("'No' selected, nothing deleted") else: log.warn('emscripten SDK is not installed, nothing to do')
class TestPlayer(TestPlayerBase): def test_register_plugin(self): self.module.register_plugin(self.manager) self.assertListEqual(self.manager.arguments, []) self.assertIn(self.module.player, self.manager.blueprints) self.assertIn(self.module.playable.detect_playable_mimetype, self.manager.mimetype_functions) widgets = [action['filename'] for action in self.manager.widgets if (action['type'] == 'stylesheet')] self.assertIn('css/browse.css', widgets) actions = [action['endpoint'] for action in self.manager.widgets] self.assertIn('player.static', actions) self.assertIn('player.audio', actions) self.assertIn('player.playlist', actions) self.assertNotIn('player.directory', actions) def test_register_plugin_with_arguments(self): self.manager.argument_values['player_directory_play'] = True self.module.register_plugin(self.manager) actions = [action['endpoint'] for action in self.manager.widgets] self.assertIn('player.directory', actions) def test_register_arguments(self): self.module.register_arguments(self.manager) self.assertEqual(len(self.manager.arguments), 1) arguments = [arg[0][0] for arg in self.manager.arguments] self.assertIn('--player-directory-play', arguments)
def resetLoggingLocks(): try: logging._releaseLock() except RuntimeError: pass for handler in logging.Logger.manager.loggerDict.values(): if (hasattr(handler, 'lock') and handler.lock): try: handler.lock.release() except RuntimeError: pass
class HistoricalDdosMeta(ModelNormal): allowed_values = {} validations = {} _property def additional_properties_type(): return (bool, date, datetime, dict, float, int, list, str, none_type) _nullable = False _property def openapi_types(): return {'start': (str,), 'end': (str,), 'downsample': (str,), 'metric': (str,)} _property def discriminator(): return None attribute_map = {'start': 'start', 'end': 'end', 'downsample': 'downsample', 'metric': 'metric'} read_only_vars = {} _composed_schemas = {} _js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes']) _js_args_to_python_args def __init__(self, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
class ServiceStub(object): def __init__(self, channel): self.GetNodeInfo = channel.unary_unary('/cosmos.base.tendermint.v1beta1.Service/GetNodeInfo', request_serializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetNodeInfoRequest.SerializeToString, response_deserializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetNodeInfoResponse.FromString) self.GetSyncing = channel.unary_unary('/cosmos.base.tendermint.v1beta1.Service/GetSyncing', request_serializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetSyncingRequest.SerializeToString, response_deserializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetSyncingResponse.FromString) self.GetLatestBlock = channel.unary_unary('/cosmos.base.tendermint.v1beta1.Service/GetLatestBlock', request_serializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetLatestBlockRequest.SerializeToString, response_deserializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetLatestBlockResponse.FromString) self.GetBlockByHeight = channel.unary_unary('/cosmos.base.tendermint.v1beta1.Service/GetBlockByHeight', request_serializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetBlockByHeightRequest.SerializeToString, response_deserializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetBlockByHeightResponse.FromString) self.GetLatestValidatorSet = channel.unary_unary('/cosmos.base.tendermint.v1beta1.Service/GetLatestValidatorSet', request_serializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetLatestValidatorSetRequest.SerializeToString, response_deserializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetLatestValidatorSetResponse.FromString) self.GetValidatorSetByHeight = channel.unary_unary('/cosmos.base.tendermint.v1beta1.Service/GetValidatorSetByHeight', request_serializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetValidatorSetByHeightRequest.SerializeToString, response_deserializer=cosmos_dot_base_dot_tendermint_dot_v1beta1_dot_query__pb2.GetValidatorSetByHeightResponse.FromString)
def lambda_handler(event, context): print(event) cognito_id = event['requestContext']['authorizer']['claims']['sub'] try: sentences_response = pull_user_sentences(cognito_id) except Exception as e: print(f'Error: Failed to get user sentences.') print(e) return api_response.response(502, 'Failed to retrieve user sentences.') user_sentences = format_user_sentences(sentences_response) return api_response.response(200, 'Successfully retrieved user sentences.', user_sentences)
def _test_correct_response_for_award_amounts(client): resp = client.post('/api/v2/search/spending_by_award', content_type='application/json', data=json.dumps({'filters': {'award_type_codes': ['A', 'B', 'C', 'D'], 'award_amounts': [{'upper_bound': 1000000}, {'lower_bound': 9013, 'upper_bound': 9017}], 'time_period': [{'start_date': '2007-10-01', 'end_date': '2020-09-30'}]}, 'fields': ['Award ID'], 'page': 1, 'limit': 60, 'sort': 'Award ID', 'order': 'asc', 'subawards': False})) expected_result = [{'internal_id': 1, 'Award ID': 'abc111', 'generated_internal_id': 'CONT_AWD_TESTING_1'}, {'internal_id': 2, 'Award ID': 'abc222', 'generated_internal_id': 'CONT_AWD_TESTING_2'}, {'internal_id': 5, 'Award ID': 'abcdef123', 'generated_internal_id': 'CONT_AWD_TESTING_5'}] assert (resp.status_code == status.HTTP_200_OK) assert (len(resp.json().get('results')) == 3) assert (resp.json().get('results') == expected_result), 'Award Amounts filter does not match expected result'
def out_edge_node_mapper(nodes: List[LabgraphMonitorNode]) -> Dict[(str, List[LabgraphMonitorNode])]: out_edge_node_map: Dict[(str, List[LabgraphMonitorNode])] = {} for node in nodes: for out_edge in node.out_edges: out_edge_node_map[out_edge] = node return out_edge_node_map
def main(argv): argv = build_utils.ExpandFileArgs(argv) parser = optparse.OptionParser() build_utils.AddDepfileOption(parser) parser.add_option('--src-gendirs', help='Directories containing generated java files.') parser.add_option('--java-srcjars', action='append', default=[], help='List of srcjars to include in compilation.') parser.add_option('--bootclasspath', action='append', default=[], help='Boot classpath for javac. If this is specified multiple times, they will all be appended to construct the classpath.') parser.add_option('--classpath', action='append', help='Classpath for javac. If this is specified multiple times, they will all be appended to construct the classpath.') parser.add_option('--javac-includes', help='A list of file patterns. If provided, only java files that matchone of the patterns will be compiled.') parser.add_option('--jar-excluded-classes', default='', help='List of .class file patterns to exclude from the jar.') parser.add_option('--chromium-code', type='int', help='Whether code being compiled should be built with stricter warnings for chromium code.') parser.add_option('--javac-bin', default='javac', help='The javac binary. If empty, the javac binary is resolved from PATH.') parser.add_option('--jar-bin', default='jar', help='The jar binary. If empty, the jar binary is resolved from PATH.') parser.add_option('--java-version', default='1.8', help='The source and target versions passed to javac.') parser.add_option('--classes-dir', help='Directory for compiled .class files.') parser.add_option('--jar-path', help='Jar output path.') parser.add_option('--jar-source-path', help='Source jar output path.') parser.add_option('--jar-source-base-dir', help='Base directory for the source files included in the output source jar.') parser.add_option('--main-class', help='The class containing the main method.') parser.add_option('--manifest-entry', action='append', help='Key:value pairs to add to the .jar manifest.') parser.add_option('--additional-jar-files', dest='additional_jar_files', action='append', help='Additional files to package into jar. By default, only Java .class files are packaged into the jar.') parser.add_option('--stamp', help='Path to touch on success.') (options, args) = parser.parse_args(argv) if (options.main_class and (not options.jar_path)): parser.error('--main-class requires --jar-path') bootclasspath = [] for arg in options.bootclasspath: bootclasspath += build_utils.ParseGypList(arg) classpath = [] for arg in options.classpath: classpath += build_utils.ParseGypList(arg) java_srcjars = [] for arg in options.java_srcjars: java_srcjars += build_utils.ParseGypList(arg) java_files = args if options.src_gendirs: src_gendirs = build_utils.ParseGypList(options.src_gendirs) java_files += build_utils.FindInDirectories(src_gendirs, '*.java') additional_jar_files = [] for arg in (options.additional_jar_files or []): additional_jar_files += build_utils.ParseGypList(arg) input_files = (((bootclasspath + classpath) + java_srcjars) + java_files) with build_utils.TempDir() as temp_dir: classes_dir = os.path.join(temp_dir, 'classes') os.makedirs(classes_dir) if java_srcjars: java_dir = os.path.join(temp_dir, 'java') os.makedirs(java_dir) for srcjar in java_srcjars: build_utils.ExtractAll(srcjar, path=java_dir, pattern='*.java') java_files += build_utils.FindInDirectory(java_dir, '*.java') if options.javac_includes: javac_includes = build_utils.ParseGypList(options.javac_includes) filtered_java_files = [] for f in java_files: for include in javac_includes: if fnmatch.fnmatch(f, include): filtered_java_files.append(f) break java_files = filtered_java_files if (len(java_files) != 0): DoJavac(bootclasspath, classpath, classes_dir, options.chromium_code, options.javac_bin, options.java_version, java_files) if options.jar_path: if (options.main_class or options.manifest_entry): if options.manifest_entry: entries = [e.split(':') for e in options.manifest_entry] else: entries = [] manifest_file = os.path.join(temp_dir, 'manifest') CreateManifest(manifest_file, classpath, options.main_class, entries) else: manifest_file = None if options.additional_jar_files: for f in additional_jar_files: shutil.copyfile(f, os.path.join(classes_dir, os.path.relpath(f, options.jar_source_base_dir))) additional_jar_files = [os.path.relpath(f, options.jar_source_base_dir) for f in additional_jar_files] jar.JarDirectory(classes_dir, build_utils.ParseGypList(options.jar_excluded_classes), options.jar_path, options.jar_bin, manifest_file=manifest_file, additional_jar_files=additional_jar_files) if options.jar_source_path: jar.Jar(java_files, options.jar_source_base_dir, options.jar_source_path, options.jar_bin) if options.classes_dir: build_utils.DeleteDirectory(options.classes_dir) shutil.copytree(classes_dir, options.classes_dir) if options.depfile: build_utils.WriteDepfile(options.depfile, (input_files + build_utils.GetPythonDependencies())) if options.stamp: build_utils.Touch(options.stamp)
def has_uncommitted_files(proj_dir): try: output = subprocess.check_output('git status -s', cwd=proj_dir, shell=True).decode('utf-8') if (len(output) > 0): return (True, output) else: return (False, output) except subprocess.CalledProcessError: log.error("failed to call 'git status -s'") return (False, '')
class System(Base): __tablename__ = 'system' system_id = Column(Integer, primary_key=True) game_id = Column(ForeignKey(Game.game_id)) country_id = Column(ForeignKey('country.country_id'), index=True) name = Column(String(80)) system_id_in_game = Column(Integer, index=True) star_class = Column(String(20)) coordinate_x = Column(Float) coordinate_y = Column(Float) game = relationship('Game', back_populates='systems') country = relationship('Country', back_populates='systems') ownership_history = relationship('SystemOwnership', back_populates='system', cascade='all,delete,delete-orphan') planets = relationship('Planet', back_populates='system', cascade='all,delete,delete-orphan') hyperlanes_one = relationship('HyperLane', foreign_keys=(lambda : [HyperLane.system_one_id])) hyperlanes_two = relationship('HyperLane', foreign_keys=(lambda : [HyperLane.system_two_id])) bypasses = relationship('Bypass', cascade='all,delete,delete-orphan') historical_events = relationship('HistoricalEvent', back_populates='system', cascade='all,delete,delete-orphan') def neighbors(self): for hl in self.hyperlanes_one: (yield hl.system_two) for hl in self.hyperlanes_two: (yield hl.system_one) def get_owner_country_at(self, time_in_days: int) -> Optional['Country']: if ((not self.ownership_history) and (self.country is None)): return None for ownership in sorted(self.ownership_history, key=(lambda oh: oh.start_date_days)): start = (ownership.start_date_days or float('-inf')) end = (ownership.end_date_days or float('inf')) if (start <= time_in_days <= end): return ownership.country elif (ownership.start_date_days > time_in_days): return None return self.country def rendered_name(self): rendered = game_info.render_name(self.name) return rendered def __str__(self): return f'System "{self.name}" {self.coordinate_x}, {self.coordinate_y}'
('cuda.perm021fc_crc.gen_function') def gen_function(func_attrs, exec_cond_template, dim_info_dict): problem_args = bmm_common.PROBLEM_ARGS_TEMPLATE.render(mm_info=_get_problem_info(alpha_value=func_attrs.get('alpha', 1), beta_value=0)) return bmm_common.gen_function(func_attrs, exec_cond_template, problem_args, dim_info_dict)
class AssetChannel(RFUniverseChannel): def __init__(self, channel_id: str) -> None: super().__init__(channel_id) self.data = {} self.Messages = {} def _parse_message(self, msg: IncomingMessage) -> None: msg_type = msg.read_string() if (msg_type in self.Messages): for i in self.Messages[msg_type]: i(msg) elif (msg_type == 'PreLoadDone'): self.data['load_done'] = True elif (msg_type == 'RFMoveColliders'): collider = [] object_count = msg.read_int32() for i in range(object_count): one = {} object_id = msg.read_int32() one['object_id'] = object_id collider_count = msg.read_int32() one['collider'] = [] for j in range(collider_count): collider_data = {} collider_data['type'] = msg.read_string() collider_data['position'] = [] collider_data['position'].append(msg.read_float32()) collider_data['position'].append(msg.read_float32()) collider_data['position'].append(msg.read_float32()) if (collider_data['type'] == 'box'): collider_data['rotation'] = [] collider_data['rotation'].append(msg.read_float32()) collider_data['rotation'].append(msg.read_float32()) collider_data['rotation'].append(msg.read_float32()) collider_data['rotation'].append(msg.read_float32()) collider_data['size'] = [] collider_data['size'].append(msg.read_float32()) collider_data['size'].append(msg.read_float32()) collider_data['size'].append(msg.read_float32()) elif (collider_data['type'] == 'sphere'): collider_data['radius'] = msg.read_float32() elif (collider_data['type'] == 'capsule'): collider_data['rotation'] = [] collider_data['rotation'].append(msg.read_float32()) collider_data['rotation'].append(msg.read_float32()) collider_data['rotation'].append(msg.read_float32()) collider_data['rotation'].append(msg.read_float32()) collider_data['direction'] = msg.read_int32() collider_data['radius'] = msg.read_float32() collider_data['height'] = msg.read_float32() one['collider'].append(collider_data) collider.append(one) self.data['colliders'] = collider elif (msg_type == 'CurrentCollisionPairs'): collision_pairs = [] pair_count = msg.read_int32() for i in range(pair_count): data = [msg.read_int32(), msg.read_int32()] collision_pairs.append(data) self.data['collision_pairs'] = collision_pairs else: ext_data = ext.parse_message(msg, msg_type) self.data.update(ext_data) def set_action(self, action: str, **kwargs) -> None: try: if hasattr(self, action): eval(('self.' + action))(kwargs) else: msg = eval(('ext.' + action))(kwargs) self.send_message(msg) except AttributeError: print(("There is no action called '%s' or this function has bug, please fix it." % action)) exit((- 1)) def PreLoadAssetsAsync(self, names: list) -> None: msg = OutgoingMessage() msg.write_string('PreLoadAssetsAsync') count = len(names) msg.write_int32(count) for i in range(count): msg.write_string(names[i]) self.send_message(msg) def LoadSceneAsync(self, file: str) -> None: msg = OutgoingMessage() msg.write_string('LoadSceneAsync') msg.write_string(file) self.send_message(msg) def SendMessage(self, message: str, *args) -> None: msg = OutgoingMessage() msg.write_string('SendMessage') msg.write_string(message) for i in args: if (type(i) == str): msg.write_string(i) elif (type(i) == bool): msg.write_bool(i) elif (type(i) == int): msg.write_int32(i) elif (type(i) == float): msg.write_float32(i) elif ((type(i) == list) and (type(i[0]) == float)): msg.write_float32_list(i) else: print(f'dont support this data type:{type(i)}') self.send_message(msg) def AddListener(self, message: str, fun): if (message in self.Messages): if (fun in self.Messages[message]): self.Messages[message].append(fun) else: self.Messages[message] = [fun] def RemoveListener(self, message: str, fun): if (message in self.Messages): if (fun in self.Messages[message]): self.Messages[message].remove(fun) if (len(self.Messages[message]) == 0): self.Messages[message].pop(message) def InstanceObject(self, kwargs: dict) -> None: compulsory_params = ['name', 'id'] optional_params = [] utility.CheckKwargs(kwargs, compulsory_params) msg = OutgoingMessage() msg.write_string('InstanceObject') msg.write_string(kwargs['name']) msg.write_int32(kwargs['id']) self.send_message(msg) def LoadURDF(self, kwargs: dict) -> None: compulsory_params = ['id', 'path', 'native_ik'] optional_params = [] utility.CheckKwargs(kwargs, compulsory_params) msg = OutgoingMessage() msg.write_string('LoadURDF') msg.write_int32(kwargs['id']) msg.write_string(kwargs['path']) msg.write_bool(kwargs['native_ik']) self.send_message(msg) def LoadMesh(self, kwargs: dict) -> None: compulsory_params = ['id', 'path'] optional_params = [] utility.CheckKwargs(kwargs, compulsory_params) msg = OutgoingMessage() msg.write_string('LoadMesh') msg.write_int32(kwargs['id']) msg.write_string(kwargs['path']) self.send_message(msg) def IgnoreLayerCollision(self, kwargs: dict) -> None: compulsory_params = ['layer1', 'layer2', 'ignore'] optional_params = [] utility.CheckKwargs(kwargs, compulsory_params) msg = OutgoingMessage() msg.write_string('IgnoreLayerCollision') msg.write_int32(kwargs['layer1']) msg.write_int32(kwargs['layer2']) msg.write_bool(kwargs['ignore']) self.send_message(msg) def GetCurrentCollisionPairs(self) -> None: msg = OutgoingMessage() msg.write_string('GetCurrentCollisionPairs') self.send_message(msg) def GetRFMoveColliders(self) -> None: msg = OutgoingMessage() msg.write_string('GetRFMoveColliders') self.send_message(msg) def SetGravity(self, kwargs: dict) -> None: compulsory_params = ['x', 'y', 'z'] optional_params = [] utility.CheckKwargs(kwargs, compulsory_params) msg = OutgoingMessage() msg.write_string('SetGravity') msg.write_float32(kwargs['x']) msg.write_float32(kwargs['y']) msg.write_float32(kwargs['z']) self.send_message(msg) def SetGroundPhysicMaterial(self, kwargs: dict) -> None: compulsory_params = ['bounciness', 'dynamic_friction', 'static_friction', 'friction_combine', 'bounce_combine'] optional_params = [] utility.CheckKwargs(kwargs, compulsory_params) msg = OutgoingMessage() msg.write_string('SetGroundPhysicMaterial') msg.write_float32(kwargs['bounciness']) msg.write_float32(kwargs['dynamic_friction']) msg.write_float32(kwargs['static_friction']) msg.write_int32(kwargs['friction_combine']) msg.write_int32(kwargs['bounce_combine']) self.send_message(msg) def SetTimeStep(self, kwargs: dict) -> None: compulsory_params = ['delta_time'] optional_params = [] utility.CheckKwargs(kwargs, compulsory_params) msg = OutgoingMessage() msg.write_string('SetTimeStep') msg.write_float32(kwargs['delta_time']) self.send_message(msg) def SetTimeScale(self, kwargs: dict) -> None: compulsory_params = ['time_scale'] optional_params = [] utility.CheckKwargs(kwargs, compulsory_params) msg = OutgoingMessage() msg.write_string('SetTimeScale') msg.write_float32(kwargs['time_scale']) self.send_message(msg) def SetResolution(self, kwargs: dict) -> None: compulsory_params = ['resolution_x', 'resolution_y'] optional_params = [] utility.CheckKwargs(kwargs, compulsory_params) msg = OutgoingMessage() msg.write_string('SetResolution') msg.write_int32(kwargs['resolution_x']) msg.write_int32(kwargs['resolution_y']) self.send_message(msg)
class TopicUpdateToAnnounceView(TopicUpdateTypeBaseView): question = _('Would you want to change this topic to an announce?') target_type = Topic.TOPIC_ANNOUNCE def perform_permissions_check(self, user, obj, perms): return self.request.forum_permission_handler.can_update_topics_to_announces(obj, user)
class DataParser(WebMirror.OutputFilters.FilterBase.FilterBase): amqpint = None amqp_connect = True def __init__(self, transfer=True, debug_print=False, write_debug=False, **kwargs): super().__init__(**kwargs) self.dbg_print = debug_print self.transfer = transfer self.names = set() self.write_debug = write_debug def dispatchReleaseDbBacked(self, item): processor_row = self.db_sess.query(db.RssFeedEntry).filter((db.RssFeedEntry.feed_name == item['srcname'])).scalar() if (not processor_row): self.log.error('No feed filter system found for {} from url {}.'.format(item['srcname'], item['linkUrl'])) def null_func(item): return False return null_func func = processor_row.get_func() ret = func(item) return ret def dispatchRelease(self, item): ret = False try: feed = getCreateRssSource(self.db_sess, item['srcname'], item['linkUrl']) db_func = feed.get_func() ret = db_func(item) except Exception as e: print(("Failure when trying to extract item for source '%s'" % item['srcname'])) raise e if (ret is None): return False (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ret: assert ('type' in ret) assert ('data' in ret) if ((ret['type'] == 'parsed-release') or (ret['type'] == 'delete-release')): reldata = ret['data'] if (reldata and (not (reldata['vol'] or reldata['chp'] or reldata['postfix']))): self.log.info("Skipping item due to no chapter/vol/postfix: '%s', '%s', '%s', '%s', '%s', '%s', '%s'", item['srcname'], item['title'], item['tags'], vol, chp, frag, postfix) ret = False if ('preview' in item['title'].lower()): self.log.info("Skipping item due to preview string: '%s', '%s', '%s', '%s', '%s', '%s', '%s'", item['srcname'], item['title'], item['tags'], vol, chp, frag, postfix) ret = False if reldata: assert ('tl_type' in reldata) return ret def getProcessedReleaseInfo(self, feedDat): if any([(item in feedDat['linkUrl']) for item in common.global_constants.RSS_SKIP_FILTER]): print('Skipping!') return release = self.dispatchRelease(feedDat) if release: return json.dumps(release) return False def getRawFeedMessage(self, feedDat): feedDat = feedDat.copy() feedDat.pop('contents') ret = {'type': 'raw-feed', 'data': feedDat} try: return json.dumps(ret) except TypeError: return None def processFeedData(self, session, feedDat, tx_raw=True, tx_parse=True): if any([(item in feedDat['linkUrl']) for item in common.global_constants.RSS_SKIP_FILTER]): print(("LinkURL '%s' contains a filtered string. Not fetching!" % feedDat['linkUrl'])) return if any([feedDat['title'].lower().startswith(item) for item in common.global_constants.RSS_TITLE_FILTER]): print(("LinkURL '%s' contains a filtered string. Not fetching!" % feedDat['linkUrl'])) return if feedDat['title'].lower().startswith('by: '): self.log.warning("Skipping due to title: '%s'", feedDat['title']) return netloc = urllib.parse.urlparse(feedDat['linkUrl']).netloc nicename = feedNameLut.getNiceName(session, feedDat['linkUrl']) if (not nicename): nicename = netloc feedDat['srcname'] = nicename if should_ignore_feed_post(feedDat): self.log.warning('Skipping due to should_ignore_feed_post') return if ('feedproxy.google.com' in netloc): print('Not sending data for feedproxy netloc: ', netloc) return try: new = self.getProcessedReleaseInfo(feedDat) except AssertionError: self.log.error('Exception when processing release!') for line in traceback.format_exc().split('\n'): self.log.error(line.rstrip()) return if tx_parse: if new: self.log.info('Sending parsed release!') self.put_measurement(measurement_name='chapter_releases', measurement=1, fields={}, extra_tags={'site': 'RSS'}) self.amqp_put_item(new) if (not WebMirror.rules.netloc_send_feed(netloc)): print('Not sending raw feed for netloc due to rules: ', netloc) return raw = self.getRawFeedMessage(feedDat) if tx_raw: if raw: self.amqp_put_item(raw)
class AddMessageToSlack(object): def __init__(self, slack_client): self._slack_client = slack_client def run(self, alert): message = self._build_slack_message(alert) self._slack_client.post_message(message) return message def _build_slack_message(self, alert): return {'text': _output_from_alert(alert), 'attachments': [{'color': self._color_from(alert['priority']), 'fields': [{'title': 'Rule', 'value': alert['rule'], 'short': False}, {'title': 'Priority', 'value': alert['priority'], 'short': True}, {'title': 'Time', 'value': pendulum.parse(alert['time']).to_rfc822_string(), 'short': True}, {'title': 'Kubernetes Pod Name', 'value': alert['output_fields']['k8s.pod.name'], 'short': True}, {'title': 'Container Id', 'value': alert['output_fields']['container.id'], 'short': True}]}]} _COLORS = {'Emergency': '#b12737', 'Alert': '#f24141', 'Critical': '#fc7335', 'Error': '#f28143', 'Warning': '#f9c414', 'Notice': '#397ec3', 'Informational': '#8fc0e7', 'Debug': '#8fc0e7'} def _color_from(self, priority): return self._COLORS.get(priority, '#eeeeee')
def upgrade(): op.create_table('Invoices', sa.Column('id', sa.Integer(), nullable=False), sa.Column('budget_id', sa.Integer(), nullable=True), sa.Column('client_id', sa.Integer(), nullable=True), sa.Column('amount', sa.Float(), nullable=True), sa.Column('unit', sa.String(length=64), nullable=True), sa.ForeignKeyConstraint(['budget_id'], ['Budgets.id']), sa.ForeignKeyConstraint(['client_id'], ['Clients.id']), sa.ForeignKeyConstraint(['id'], ['Entities.id']), sa.PrimaryKeyConstraint('id'))
class HumanoidImitation(gym.Env): def __init__(self, env_config): self.base_env = my_env.Env(env_config) assert (self.base_env._num_agent == 1) ob_scale = 1000.0 dim_state = self.base_env.dim_state(0) dim_state_body = self.base_env.dim_state_body(0) dim_state_task = self.base_env.dim_state_task(0) dim_action = self.base_env.dim_action(0) (action_range_min, action_range_max) = self.base_env.action_range(0) self.observation_space = Box(((- ob_scale) * np.ones(dim_state)), (ob_scale * np.ones(dim_state)), dtype=np.float64) self.observation_space_body = Box(((- ob_scale) * np.ones(dim_state_body)), (ob_scale * np.ones(dim_state_body)), dtype=np.float64) self.observation_space_task = Box(((- ob_scale) * np.ones(dim_state_task)), (ob_scale * np.ones(dim_state_task)), dtype=np.float64) self.action_space = Box(action_range_min, action_range_max, dtype=np.float64) def state(self): return self.base_env.state(idx=0) def reset(self, start_time=None, add_noise=None): if (not self.base_env._initialized): self.base_env.create() self.base_env.reset({'start_time': start_time, 'add_noise': add_noise}) return self.base_env.state(idx=0) def step(self, action): (rew, info) = self.base_env.step([action]) obs = self.state() eoe = self.base_env._end_of_episode return (obs, rew[0], eoe, info[0])
_patch_bwbuild_object('CANCEL_CHECK_PERIOD', 0.5) ('copr_backend.sign.SIGN_BINARY', 'tests/fake-bin-sign') def test_cancel_build_during_log_download(f_build_rpm_sign_on, caplog): config = f_build_rpm_sign_on worker = config.bw config.ssh.set_command('copr-rpmbuild-log', 0, 'canceled stdout\n', 'canceled stderr\n', _CancelFunction(worker)) config.ssh.set_command('copr-rpmbuild-cancel', 0, 'out', 'err') worker.process() assert_logs_exist(['Cancel request succeeded\nout:\nouterr:\nerr', 'Build was canceled', COMMON_MSGS['not finished']], caplog)
def coconut_base_exec(exec_func, mode, expression, globals=None, locals=None, state=False, **kwargs): command = get_state(state) if (command.comp is None): setup() command.check_runner(set_sys_vars=False) if (globals is None): globals = {} command.runner.update_vars(globals) compiled_python = parse(expression, mode, state, **kwargs) return exec_func(compiled_python, globals, locals)
class OptionPlotoptionsDumbbellStatesHoverHalo(Options): def attributes(self): return self._config_get(None) def attributes(self, value: Any): self._config(value, js_type=False) def opacity(self): return self._config_get(0.25) def opacity(self, num: float): self._config(num, js_type=False) def size(self): return self._config_get(10) def size(self, num: float): self._config(num, js_type=False)
class OptionLabel(DataClass): def enabled(self): return self._attrs['enabled'] def enabled(self, val): self._attrs['enabled'] = val def min(self): return self._attrs['min'] def min(self, val): self._attrs['min'] = val def max(self): return self._attrs['max'] def max(self, val): self._attrs['max'] = val def maxVisible(self): return self._attrs['maxVisible'] def maxVisible(self, val): self._attrs['maxVisible'] = val def drawThreshold(self): return self._attrs['drawThreshold'] def drawThreshold(self, val): self._attrs['drawThreshold'] = val
def assign_citations(participants, case_id): with db.engine.connect() as conn: rs = conn.execute(CASE_VIOLATIONS, case_id) for row in rs: entity_id = row['entity_id'] if (entity_id not in participants): logger.warn('Entity %s from violations not found in participants for case %s', entity_id, case_id) continue participants[entity_id]['citations'][row['stage']].extend(parse_statutory_citations(row['statutory_citation'], case_id, entity_id)) participants[entity_id]['citations'][row['stage']].extend(parse_regulatory_citations(row['regulatory_citation'], case_id, entity_id))
_handler(commands=['start']) def start_handler(message: types.Message): text = _('Hello, {user_fist_name}!\nThis is the example of multilanguage bot.\nAvailable commands:\n\n/lang - change your language\n/plural - pluralization example\n/menu - text menu example') text = text.format(user_fist_name=message.from_user.first_name) bot.send_message(message.from_user.id, text)
class CompreFace(object): def __init__(self, domain: str, port: str, options: AllOptionsDict={}): self._domain: str = domain self._port: str = port self._options: AllOptionsDict = options self.recognition: Optional[RecognitionService] = None self.verification: Optional[VerificationService] = None self.detection: Optional[DetectionService] = None def domain(self): return self._domain def domain(self, domain: str): self._domain = domain def port(self): return self._port def port(self, port: str): self._port = port def options(self): return self._options def options(self, options: AllOptionsDict): self._options = options def init_face_recognition(self, api_key: str) -> RecognitionService: self.recognition = RecognitionService(api_key=api_key, domain=self.domain, port=self.port, options=self.options) return self.recognition def init_face_verification(self, api_key: str) -> VerificationService: self.verification = VerificationService(api_key=api_key, domain=self.domain, port=self.port, options=self.options) return self.verification def init_face_detection(self, api_key: str) -> DetectionService: self.detection = DetectionService(api_key=api_key, domain=self.domain, port=self.port, options=self.options) return self.detection
class TorDetector(QThread): found_proxy = pyqtSignal(object) def __init__(self): QThread.__init__(self) def run(self): ports = [9050, 9150] for p in ports: pair = ('localhost', p) if TorDetector.is_tor_port(pair): self.found_proxy.emit(pair) return def is_tor_port(pair): try: s = (socket._socketobject if hasattr(socket, '_socketobject') else socket.socket)(socket.AF_INET, socket.SOCK_STREAM) s.settimeout(0.1) s.connect(pair) s.send(b'GET\n') if (b'Tor is not an HTTP Proxy' in s.recv(1024)): return True except socket.error: pass return False
def add_sticker_to_set(token, user_id, name, sticker): method_url = 'addStickerToSet' (json_dict, files) = sticker.convert_input_sticker() payload = {'user_id': user_id, 'name': name, 'sticker': json_dict} return _make_request(token, method_url, params=payload, files=files, method='post')
class OptionSeriesDumbbellSonificationContexttracks(Options): def activeWhen(self) -> 'OptionSeriesDumbbellSonificationContexttracksActivewhen': return self._config_sub_data('activeWhen', OptionSeriesDumbbellSonificationContexttracksActivewhen) def instrument(self): return self._config_get('piano') def instrument(self, text: str): self._config(text, js_type=False) def mapping(self) -> 'OptionSeriesDumbbellSonificationContexttracksMapping': return self._config_sub_data('mapping', OptionSeriesDumbbellSonificationContexttracksMapping) def midiName(self): return self._config_get(None) def midiName(self, text: str): self._config(text, js_type=False) def pointGrouping(self) -> 'OptionSeriesDumbbellSonificationContexttracksPointgrouping': return self._config_sub_data('pointGrouping', OptionSeriesDumbbellSonificationContexttracksPointgrouping) def roundToMusicalNotes(self): return self._config_get(True) def roundToMusicalNotes(self, flag: bool): self._config(flag, js_type=False) def showPlayMarker(self): return self._config_get(True) def showPlayMarker(self, flag: bool): self._config(flag, js_type=False) def timeInterval(self): return self._config_get(None) def timeInterval(self, num: float): self._config(num, js_type=False) def type(self): return self._config_get('instrument') def type(self, text: str): self._config(text, js_type=False) def valueInterval(self): return self._config_get(None) def valueInterval(self, num: float): self._config(num, js_type=False) def valueMapFunction(self): return self._config_get('linear') def valueMapFunction(self, value: Any): self._config(value, js_type=False) def valueProp(self): return self._config_get('"x"') def valueProp(self, text: str): self._config(text, js_type=False)
class Feed(App): TITLE = 'Meu leitor de RSS!' def compose(self): (yield Header(show_clock=True)) with Horizontal(): (yield feed_parser()) with VerticalScroll(): (yield Markdown(id='md')) def on_tree_node_selected(self, event: Tree.NodeSelected): self.log('Entrei aqui!!!!') if event.node.data: element: Markdown = self.query_one('#md') element.action_scroll_home() element.update(f'''# {event.node.label} {markdownify(event.node.data)}''')
class Daemon(amp.AMP): def __init__(self, client): super().__init__() self.client = client def connectionMade(self): super().connectionMade() self.client.connected(self) def connectionLost(self, reason): super().connectionLost(reason) self.client.disconnected(self) .responder def client_connected(self, handle): self.client.amp_client_connected(handle) return {} .responder def client_disconnected(self, handle): self.client.amp_client_disconnected(handle) return {} def enumerate_clients(self): dfr = self.callRemote(commands.EnumerateClients) dfr.addCallback((lambda r: r['handles'])) return dfr def command(self, handle, command): dfr = self.callRemote(commands.ClientCommand, handle=handle, command=command) def got_result(response): filename = response['filename'] if filename: result = open(filename, 'rb').read() os.unlink(filename) return result return response['result'] dfr.addCallback(got_result) return dfr
class OptionPlotoptionsSplineSonificationTracksMappingHighpassResonance(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def _is_num_param(names, values, to_float=False): fun = ((to_float and float) or int) out_params = [] for (name, val) in zip(names, values): if (val is None): out_params.append(val) elif isinstance(val, (int, int, float, str)): try: out_params.append(fun(val)) except ValueError as e: raise VdtParamError(name, val) else: raise VdtParamError(name, val) return out_params
class _Border(): def __init__(self): self.width = None self.type = 0 self.color = None def apply(self, item): if (self.width is not None): r = item.boundingRect() border = QGraphicsRectItem(r) border.setParentItem(item) if self.color: pen = QPen(QColor(self.color)) else: pen = QPen(Qt.PenStyle.NoPen) set_pen_style(pen, self.type) pen.setWidth(self.width) pen.setCapStyle(Qt.PenCapStyle.FlatCap) border.setPen(pen) return border else: return None
class CssDivPage(CssStyle.Style): _attrs = {'position': 'relative', 'padding': '3%'} _before = {'content': "''", 'position': 'absolute', 'top': '-4px', 'right': '-4px', 'width': 0} def customize(self): rgb = Colors.getHexToRgb(self.page.theme.greys[(- 1)]) rgb_color = Colors.getHexToRgb(self.page.theme.colors[(- 1)]) self.css({'box-shadow': ('0 0 %(size)spx rgba(%(r)s, %(g)s, %(b)s, %(opac)s)' % {'r': rgb[0], 'g': rgb[1], 'b': rgb[2], 'opac': 0.5, 'size': 5})}) self.hover.css({'box-shadow': ('0 0 %(size)spx rgba(%(r)s, %(g)s, %(b)s, %(opac)s)' % {'r': rgb_color[0], 'g': rgb_color[1], 'b': rgb_color[2], 'opac': 0.8, 'size': 5})}) self.before.css({'border-top': ('20px solid %s' % self.page.theme.greys[0]), 'border-left': ('20px solid %s' % self.page.theme.colors[2])})
class WeekCal(IntervalModule): settings = (('startofweek', 'First day of the week (0 = Monday, 6 = Sunday), defaults to 0.'), ('prefixformat', 'Prefix in strftime-format'), ('suffixformat', 'Suffix in strftime-format'), ('todayhighlight', "Characters to highlight today's date")) startofweek = 0 interval = 30 prefixformat = '%a' suffixformat = '%b %Y' todayhighlight = ('[', ']') def __init__(self, *args, **kwargs): IntervalModule.__init__(self, *args, **kwargs) self.cal = Calendar(self.startofweek) def run(self): today = date.today() yesterday = (today - timedelta(days=1)) outstr = (today.strftime(self.prefixformat) + ' ') weekdays = self.cal.iterweekdays() if (today.weekday() == self.startofweek): outstr += self.todayhighlight[0] else: outstr += ' ' nextweek = False for w in weekdays: if ((w == 0) and (self.startofweek != 0)): nextweek = True if (nextweek and (today.weekday() >= self.startofweek)): w += 7 elif ((not nextweek) and (today.weekday() < self.startofweek)): w -= 7 weekday_offset = (today.weekday() - w) weekday_delta = timedelta(days=weekday_offset) weekday = (today - weekday_delta) if (weekday == yesterday): outstr += (weekday.strftime('%d') + self.todayhighlight[0]) elif (weekday == today): outstr += (weekday.strftime('%d') + self.todayhighlight[1]) else: outstr += weekday.strftime('%d ') outstr += (' ' + today.strftime(self.suffixformat)) self.output = {'full_text': outstr, 'urgent': False}
() def rds_describe_instances() -> Generator: describe_instances = {'DBInstances': [{'DBInstanceIdentifier': 'database-1', 'Endpoint': {'Address': 'database-1.cjh1qplnnv3b.us-east-1.rds.amazonaws.com', 'Port': 3306}, 'DBInstanceArn': 'arn:aws:rds:us-east-1::db:database-1'}]} (yield describe_instances)
_from_env def set_stdoutlog(richoutput: bool=sys.stdout.isatty(), verbose: int=4): try: Level = treelog.proto.Level except AttributeError: levels = (4, 3, 2, 1) else: levels = (Level.error, Level.warning, Level.user, Level.info) stdoutlog = (treelog.RichOutputLog() if richoutput else treelog.StdoutLog()) if (0 <= (verbose - 1) < len(levels)): stdoutlog = treelog.FilterLog(stdoutlog, minlevel=levels[(verbose - 1)]) return treelog.set(stdoutlog)
def getUserToken(username, password): try: headers = {'User-Agent': 'OpenSubtitlesDownload v6.0', 'Api-key': f'{API_KEY}', 'Accept': 'application/json', 'Content-Type': 'application/json'} payload = {'username': username, 'password': password} data = json.dumps(payload).encode('utf-8') req = urllib.request.Request(API_URL_LOGIN_ENDPOINT, data=data, headers=headers) with urllib.request.urlopen(req) as response: response_data = json.loads(response.read().decode('utf-8')) return response_data['token'] except Exception: print(((('Unexpected error (line ' + str(sys.exc_info()[(- 1)].tb_lineno)) + '): ') + str(sys.exc_info()[0])))
def _normalize_type_stringmap(k, v): if (v is None): return dict() if (isinstance(v, str) or isinstance(v, int) or isinstance(v, float)): return {_normalize_type_string(v): dict()} if (isinstance(v, list) or isinstance(v, tuple) or isinstance(v, set)): retdict = dict() for i in v: if isinstance(i, dict): if (len(i) != 1): _warn_or_exception(_("'{value}' is not a valid {field}, should be {pattern}").format(field=k, value=v, pattern='key: value')) afname = _normalize_type_string(next(iter(i))) desc = _normalize_type_string(next(iter(i.values()))) retdict[afname] = {common.DEFAULT_LOCALE: desc} else: retdict[_normalize_type_string(i)] = {} return retdict retdict = dict() for (af, afdict) in v.items(): key = _normalize_type_string(af) if afdict: if isinstance(afdict, dict): retdict[key] = afdict else: retdict[key] = {common.DEFAULT_LOCALE: _normalize_type_string(afdict)} else: retdict[key] = dict() return retdict
class FixGRPCImports(Rule): def match(self, node: ast.AST) -> actions.Replace: assert isinstance(node, ast.Import) assert (len(node.names) == 1) assert (not node.names[0].name.startswith('google')) assert node.names[0].name.endswith('_pb2') qualified_name = self.context.config.known_imports.get(node.names[0].name[:(- len('_pb2'))]) if (not qualified_name): parent_dir = _as_relative_path(self.context.file).parent qualified_name = ('.'.join(parent_dir.parts) or '.') return actions.Replace(node, ast.ImportFrom(module=qualified_name, names=node.names, level=0))
class ThreadState(): def __init__(self, state, ns=None, *, autocommit=False): self.state = state self.ns = Namespace(ns) self.access_level = AccessLevels.WRITE_DB self.stacktrace = [] self.autocommit = autocommit def from_components(cls, interp, db, display, ns=None, *, autocommit): state = State(interp, db, display) return cls(state, ns, autocommit=autocommit) def interp(self): return self.state.interp def db(self): return self.state.db def display(self): return self.state.display def clone(cls, inst): s = cls(inst.state) s.ns = copy(inst.ns) s.access_level = inst.access_level s.stacktrace = copy(inst.stacktrace) s.autocommit = inst.autocommit return s def limit_access(self, new_level): return self.reduce_access(min(new_level, self.access_level)) def reduce_access(self, new_level): assert (new_level <= self.access_level) s = copy(self) s.access_level = new_level return s def set_autocommit(self, autocommit): s = copy(self) s.autocommit = autocommit return s def require_access(self, level): if (self.access_level < level): raise InsufficientAccessLevel(level) def catch_access(self, level): if (self.access_level < level): raise Exception('Bad access. Security risk.') def get_all_vars(self): return self.ns.get_all_vars() def get_all_vars_with_rank(self): return self.ns.get_all_vars_with_rank() def has_var(self, name): try: self.ns.get_var(name) except NameNotFound: return False return True def get_var(self, name): try: return self.ns.get_var(name) except NameNotFound: builtins = self.ns.get_var('__builtins__') assert (builtins.type <= T.module) try: return builtins.namespace[name] except KeyError: pass raise Signal.make(T.NameError, name, f"Name '{name!r}' is not defined") def set_var(self, name, value): try: return self.ns.set_var(name, value) except NameNotFound as e: raise Signal.make(T.NameError, None, str(e)) def use_scope(self, scope: dict): return self.ns.use_scope(scope) def __copy__(self): return self.clone(self) def connect(self, uri, auto_create=False): return self.state.connect(uri, auto_create) def unique_name(self, obj): return self.state.unique_name(obj)
(context_settings=CONTEXT_SETTINGS, invoke_without_command=True, name='fides') _option(version=VERSION) ('--config-path', '-f', 'config_path', show_default=True, help='Path to a Fides config file. _Defaults to `.fides/fides.toml`._') ('--local', is_flag=True, help='Run in `local_mode`. Where possible, this will force commands to run without the need for a server.') _context def cli(ctx: Context, config_path: str, local: bool) -> None: ctx.ensure_object(dict) config = get_config(config_path, verbose=True) command = (ctx.invoked_subcommand or '') if (not (local or config.cli.local_mode)): config.cli.local_mode = False else: config.cli.local_mode = True if (config.cli.local_mode and (command not in LOCAL_COMMAND_NAMES)): raise LocalModeException(command) if (not command): echo(cli.get_help(ctx)) if (command in SERVER_CHECK_COMMAND_NAMES): check_server(VERSION, str(config.cli.server_url), quiet=True) no_analytics = config.user.analytics_opt_out if (not no_analytics): ctx.meta['ANALYTICS_CLIENT'] = AnalyticsClient(client_id=config.cli.analytics_id, developer_mode=config.test_mode, os=system(), product_name=(APP + '-cli'), production_version=version(PACKAGE)) ctx.obj['CONFIG'] = config
class WheelMetadataExtractor(LocalMetadataExtractor): def json_metadata(self): if (not hasattr(self, '_json_metadata')): self._json_metadata = self.archive.json_wheel_metadata return self._json_metadata def get_requires(self, requires_types): if (not isinstance(requires_types, list)): requires_types = list(requires_types) extracted_requires = [] for requires_name in requires_types: for requires in self.json_metadata.get(requires_name, []): if ('win' in requires.get('environment', {})): continue extracted_requires.extend(requires['requires']) return extracted_requires def runtime_deps(self): run_requires = self.get_requires(['run_requires', 'meta_requires']) if ('setuptools' not in run_requires): run_requires.append('setuptools') return self.name_convert_deps_list(deps_from_pydit_json(run_requires)) def build_deps(self): build_requires = self.get_requires(['build_requires']) if self.has_test_suite: build_requires += self.get_requires(['test_requires', 'run_requires']) if ('setuptools' not in build_requires): build_requires.append('setuptools') return self.name_convert_deps_list(deps_from_pydit_json(build_requires, runtime=False)) def py_modules(self): return self.archive.record.get('modules') def scripts(self): return self.archive.record.get('scripts', []) def home_page(self): urls = [url for url in self.json_metadata.get('extensions', {}).get('python.details', {}).get('project_urls', {}).values()] if urls: return urls[0] _description def description(self): return self.archive.wheel_description() def summary(self): return self.json_metadata.get('summary', None) def classifiers(self): return self.json_metadata.get('classifiers', []) def license(self): return self.json_metadata.get('license', None) def has_test_suite(self): return (self.has_test_files or (self.json_metadata.get('test_requires', False) is not False)) def doc_files(self): return self.json_metadata.get('extensions', {}).get('python.details', {}).get('document_names', {}).values()
class OptionPlotoptionsWordcloudSonificationTracksMappingLowpassResonance(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def test_performative_string_value(): assert (str(GymMessage.Performative.ACT) == 'act'), 'The str value must be act' assert (str(GymMessage.Performative.PERCEPT) == 'percept'), 'The str value must be percept' assert (str(GymMessage.Performative.STATUS) == 'status'), 'The str value must be status' assert (str(GymMessage.Performative.RESET) == 'reset'), 'The str value must be reset' assert (str(GymMessage.Performative.CLOSE) == 'close'), 'The str value must be close'
class SplitComplementary(Harmony): def harmonize(self, color: 'Color', space: str) -> List['Color']: color1 = self.get_cylinder(color, space) output = space space = color1.space() name = color1._space.hue_name() color2 = color1.clone().set(name, (lambda x: adjust_hue(x, 210))) color3 = color1.clone().set(name, (lambda x: adjust_hue(x, (- 210)))) colors = [color1, color2, color3] if (output != space): colors = [color.new(c.convert(output, in_place=True)) for c in colors] return colors
class ModeField(object): __slots__ = ('default', 'value', 'name', 'io') def __init__(self, default): self.default = default def __set_name__(self, cls, name): if (name is not None): self.name = name self.value = None self.io = anno_for(cls, name, Field.RW) def __get__(self, instance=None, cls=None): if (instance is not None): if (self.io is Field.WO): raise FieldIOError(f'cant access write-only field {self.name}') if isclasstype(cls): return self.get() def __set__(self, instance, value): if (self.io is Field.RO): if (value != self.value): FieldIOError(f'cant set read-only field {self.name}') self.set(value) def value_from_instance(self, instance): pass def get(self): return attr(self, 'value', 'default') def set(self, value): if (value is None): self.value = value return if (type(value) in string_types): value = Mode.for_string(value) if Mode.is_mode(value): if (value is not self.default): self.value = value return else: raise TypeError(('cant set invalid mode: %s (%s)' % (type(value), value)))
def test_example_not_following_basemodel(): wrong_example = [{'text': "I'm a wrong example. Entities should be a dict, not a list", 'entities': [('PER', 'Entities'), ('ORG', ('dict', 'list'))]}] with make_tempdir() as tmpdir: tmp_path = (tmpdir / 'wrong_example.yml') srsly.write_yaml(tmp_path, wrong_example) with pytest.raises(ValueError): make_spancat_task_v2(labels='PER,ORG,LOC', examples=fewshot_reader(tmp_path))
def main(url, model='small', language=None, interval=5, history_buffer_size=0, preferred_quality='audio_only', use_vad=True, direct_url=False, faster_whisper_args=None, **decode_options): n_bytes = ((interval * SAMPLE_RATE) * 2) audio_buffer = RingBuffer(((history_buffer_size // interval) + 1)) previous_text = RingBuffer((history_buffer_size // interval)) print('Loading model...') if faster_whisper_args: from faster_whisper import WhisperModel model = WhisperModel(faster_whisper_args['model_path'], device=faster_whisper_args['device'], compute_type=faster_whisper_args['compute_type']) else: model = whisper.load_model(model) if use_vad: from vad import VAD vad = VAD() print('Opening stream...') (ffmpeg_process, streamlink_process) = open_stream(url, direct_url, preferred_quality) def handler(signum, frame): ffmpeg_process.kill() if streamlink_process: streamlink_process.kill() sys.exit(0) signal.signal(signal.SIGINT, handler) try: while (ffmpeg_process.poll() is None): in_bytes = ffmpeg_process.stdout.read(n_bytes) if (not in_bytes): break audio = (np.frombuffer(in_bytes, np.int16).flatten().astype(np.float32) / 32768.0) if (use_vad and vad.no_speech(audio)): print(f"{datetime.now().strftime('%H:%M:%S')}") continue audio_buffer.append(audio) clear_buffers = False if faster_whisper_args: (segments, info) = model.transcribe(audio, language=language, **decode_options) decoded_language = ('' if language else (('(' + info.language) + ')')) decoded_text = '' previous_segment = '' for segment in segments: if (segment.text != previous_segment): decoded_text += segment.text previous_segment = segment.text new_prefix = decoded_text else: result = model.transcribe(np.concatenate(audio_buffer.get_all()), prefix=''.join(previous_text.get_all()), language=language, without_timestamps=True, **decode_options) decoded_language = ('' if language else (('(' + result.get('language')) + ')')) decoded_text = result.get('text') new_prefix = '' for segment in result['segments']: if ((segment['temperature'] < 0.5) and (segment['no_speech_prob'] < 0.6)): new_prefix += segment['text'] else: clear_buffers = True previous_text.append(new_prefix) if (clear_buffers or previous_text.has_repetition()): audio_buffer.clear() previous_text.clear() print(f"{datetime.now().strftime('%H:%M:%S')} {decoded_language} {decoded_text}") print('Stream ended') finally: ffmpeg_process.kill() if streamlink_process: streamlink_process.kill()
class UI_PT_Panel_Colors(Panel): bl_label = ' ' bl_space_type = 'IMAGE_EDITOR' bl_region_type = 'UI' bl_category = 'TexTools' bl_options = {'DEFAULT_CLOSED'} def draw_header(self, _): layout = self.layout row = layout.row(align=True) if bpy.context.preferences.addons[__package__].preferences.bool_help: row.operator('wm.url_open', text='', icon='INFO').url = ' row.label(text='Color ID') def draw(self, context): layout = self.layout if ((bpy.context.scene.render.engine != 'CYCLES') and (bpy.context.scene.render.engine != 'BLENDER_EEVEE')): row = layout.row(align=True) row.alert = True row.operator('uv.op_enable_cycles', text="Enable 'CYCLES'", icon='CANCEL') return box = layout.box() col = box.column(align=True) def color_mode_icon(): if (context.scene.texToolsSettings.color_assign_mode == 'MATERIALS'): return icon_get('op_color_from_materials') else: return icon_get('op_color_convert_vertex_colors') row = col.row(align=True) split = row.split(factor=0.25, align=True) c = split.column(align=True) c.label(text='Mode:') c = split.column(align=True) c.prop(context.scene.texToolsSettings, 'color_assign_mode', text='', icon_value=color_mode_icon()) col.separator() row = col.row(align=True) split = row.split(factor=0.6, align=True) c = split.column(align=True) c.prop(context.scene.texToolsSettings, 'color_ID_templates', text='') c = split.column(align=True) c.prop(context.scene.texToolsSettings, 'color_ID_count', text='', expand=False) row = box.row(align=True) row.operator(op_color_clear.op.bl_idname, text='Clear', icon='X') row.menu(UI_MT_op_color_dropdown_io.bl_idname, icon='COLOR') max_columns = 5 if (context.scene.texToolsSettings.color_ID_count < max_columns): max_columns = context.scene.texToolsSettings.color_ID_count count = (math.ceil((context.scene.texToolsSettings.color_ID_count / max_columns)) * max_columns) for i in range(count): if ((i % max_columns) == 0): row = box.row(align=True) col = row.column(align=True) if (i < context.scene.texToolsSettings.color_ID_count): col.prop(context.scene.texToolsSettings, 'color_ID_color_{}'.format(i), text='') col.operator(op_color_assign.op.bl_idname, text='', icon='FILE_TICK').index = i if bpy.context.active_object: if (bpy.context.active_object in bpy.context.selected_objects): if (len(bpy.context.selected_objects) == 1): if ((bpy.context.active_object.type == 'MESH') and (context.scene.texToolsSettings.color_assign_mode == 'MATERIALS')): col.operator(op_color_select.op.bl_idname, text='', icon='FACESEL').index = i else: col.label(text=' ') col = box.column(align=True) col.label(text='Convert:') row = col.row(align=True) row.menu(UI_MT_op_color_dropdown_convert_from.bl_idname) row.menu(UI_MT_op_color_dropdown_convert_to.bl_idname)
class OptionSeriesSplineStatesSelect(Options): def animation(self) -> 'OptionSeriesSplineStatesSelectAnimation': return self._config_sub_data('animation', OptionSeriesSplineStatesSelectAnimation) def enabled(self): return self._config_get(True) def enabled(self, flag: bool): self._config(flag, js_type=False) def halo(self) -> 'OptionSeriesSplineStatesSelectHalo': return self._config_sub_data('halo', OptionSeriesSplineStatesSelectHalo) def lineWidth(self): return self._config_get(None) def lineWidth(self, num: float): self._config(num, js_type=False) def lineWidthPlus(self): return self._config_get(1) def lineWidthPlus(self, num: float): self._config(num, js_type=False) def marker(self) -> 'OptionSeriesSplineStatesSelectMarker': return self._config_sub_data('marker', OptionSeriesSplineStatesSelectMarker)
def bulkCmd(snmpEngine, authData, transportTarget, contextData, nonRepeaters, maxRepetitions, *varBinds, **options): def __cbFun(snmpEngine, sendRequestHandle, errorIndication, errorStatus, errorIndex, varBindTable, cbCtx): (lookupMib, deferred) = cbCtx if (options.get('ignoreNonIncreasingOid', False) and errorIndication and isinstance(errorIndication, errind.OidNotIncreasing)): errorIndication = None if errorIndication: deferred.errback(Failure(errorIndication)) else: try: varBindTable = [VB_PROCESSOR.unmakeVarBinds(snmpEngine.cache, varBindTableRow, lookupMib) for varBindTableRow in varBindTable] except Exception as e: deferred.errback(Failure(e)) else: deferred.callback((errorStatus, errorIndex, varBindTable)) (addrName, paramsName) = LCD.configure(snmpEngine, authData, transportTarget, contextData.contextName) varBinds = VB_PROCESSOR.makeVarBinds(snmpEngine.cache, varBinds) deferred = Deferred() cmdgen.BulkCommandGenerator().sendVarBinds(snmpEngine, addrName, contextData.contextEngineId, contextData.contextName, nonRepeaters, maxRepetitions, varBinds, __cbFun, (options.get('lookupMib', True), deferred)) return deferred
class TestResponseToNode(unittest.TestCase): def test_good_response_to_node(self): with tempfile.TemporaryDirectory() as temp_dir: with open(os.path.join(temp_dir, 'index.rst'), 'w'): pass response = {'version': 1, 'name': 'Amazing Demo', 'root': temp_dir} resource = response_to_node(response) self.assertEqual(resource.name, temp_dir) self.assertEqual(resource.nice_name, 'Amazing Demo') self.assertTrue(resource.has_children()) def test_good_response_but_nonexisting_root_to_node(self): response = {'version': 1, 'name': 'Amazing Demo', 'root': 'I_do_not_exist'} with self.assertLogs(LOGGER_NAME) as watcher: resource = response_to_node(response) self.assertEqual(resource.nice_name, 'Amazing Demo') self.assertFalse(resource.has_children()) self.assertIn('Unable to load data.', resource.description) (log_content,) = watcher.output self.assertIn('TraitError', log_content) def test_bad_response_replaced(self): response = {} with self.assertLogs(LOGGER_NAME) as watcher: resource = response_to_node(response) self.assertFalse(resource.has_children()) self.assertEqual(resource.nice_name, '(Empty)') self.assertIn('Unable to load data.', resource.description) (log_content,) = watcher.output self.assertIn('KeyError', log_content) def test_bad_response_type_error(self): bad_values = [None, '1', 1, ()] for bad_value in bad_values: with self.subTest(bad_value=bad_value): with self.assertLogs(LOGGER_NAME) as watcher: resource = response_to_node(bad_value) self.assertFalse(resource.has_children()) self.assertEqual(resource.nice_name, '(Empty)') self.assertIn('Unable to load data.', resource.description) (log_content,) = watcher.output self.assertIn('TypeError', log_content) def test_bad_response_missing_name(self): with tempfile.TemporaryDirectory() as temp_dir: response = {'version': 1, 'root': temp_dir} with self.assertLogs(LOGGER_NAME) as watcher: resource = response_to_node(response) self.assertFalse(resource.has_children()) self.assertEqual(resource.nice_name, '(Empty)') (log_content,) = watcher.output self.assertIn("KeyError: 'name'", log_content) def test_bad_response_missing_version(self): with tempfile.TemporaryDirectory() as temp_dir: response = {'name': 'Name', 'root': temp_dir} with self.assertLogs(LOGGER_NAME) as watcher: resource = response_to_node(response) self.assertFalse(resource.has_children()) (log_content,) = watcher.output self.assertIn("KeyError: 'version'", log_content) def test_bad_response_bad_version(self): with tempfile.TemporaryDirectory() as temp_dir: response = {'version': 2, 'name': 'Name', 'root': temp_dir} with self.assertLogs(LOGGER_NAME) as watcher: resource = response_to_node(response) self.assertFalse(resource.has_children()) (log_content,) = watcher.output self.assertIn('TraitError', log_content) def test_bad_response_bad_name_type(self): with tempfile.TemporaryDirectory() as temp_dir: response = {'version': 1, 'name': 1, 'root': temp_dir} with self.assertLogs(LOGGER_NAME) as watcher: resource = response_to_node(response) self.assertFalse(resource.has_children()) (log_content,) = watcher.output self.assertIn('TraitError', log_content)
class InventoryEvents(ABC): _epoch_stats(np.mean, output_name='mean_episode_total') _episode_stats(sum) _step_stats(len) def piece_discarded(self, piece: (int, int)): _epoch_stats(np.mean, input_name='step_mean', output_name='step_mean') _epoch_stats(max, input_name='step_max', output_name='step_max') _episode_stats(np.mean, output_name='step_mean') _episode_stats(max, output_name='step_max') _step_stats(None) def pieces_in_inventory(self, value: int): _epoch_stats(np.mean, output_name='mean_episode_total') _episode_stats(sum) _step_stats(len) def piece_replenished(self):
def _solve_bezier_x(target: float, a: float, b: float, c: float) -> float: i = 0 t = target while (i < MAX_ITER): x = (_bezier(t, a, b, c) - target) if (abs(x) < EPSILON): return t d = _derivative_x(t, a, b, c) if (abs(d) < EPSILON): break t = (t - (x / d)) i += 1 (low, high) = (0.0, 1.0) t = target while (abs((high - low)) > EPSILON): x = _bezier(t, a, b, c) if (abs((x - target)) < EPSILON): return t if (x > target): high = t else: low = t t = ((high + low) * 0.5) return t
class OptionPlotoptionsSeriesSonificationTracksMappingTremolo(Options): def depth(self) -> 'OptionPlotoptionsSeriesSonificationTracksMappingTremoloDepth': return self._config_sub_data('depth', OptionPlotoptionsSeriesSonificationTracksMappingTremoloDepth) def speed(self) -> 'OptionPlotoptionsSeriesSonificationTracksMappingTremoloSpeed': return self._config_sub_data('speed', OptionPlotoptionsSeriesSonificationTracksMappingTremoloSpeed)
class group_stats_reply(stats_reply): version = 4 type = 19 stats_type = 6 def __init__(self, xid=None, flags=None, entries=None): if (xid != None): self.xid = xid else: self.xid = None if (flags != None): self.flags = flags else: self.flags = 0 if (entries != None): self.entries = entries else: self.entries = [] return def pack(self): packed = [] packed.append(struct.pack('!B', self.version)) packed.append(struct.pack('!B', self.type)) packed.append(struct.pack('!H', 0)) packed.append(struct.pack('!L', self.xid)) packed.append(struct.pack('!H', self.stats_type)) packed.append(struct.pack('!H', self.flags)) packed.append(('\x00' * 4)) packed.append(loxi.generic_util.pack_list(self.entries)) length = sum([len(x) for x in packed]) packed[2] = struct.pack('!H', length) return ''.join(packed) def unpack(reader): obj = group_stats_reply() _version = reader.read('!B')[0] assert (_version == 4) _type = reader.read('!B')[0] assert (_type == 19) _length = reader.read('!H')[0] orig_reader = reader reader = orig_reader.slice(_length, 4) obj.xid = reader.read('!L')[0] _stats_type = reader.read('!H')[0] assert (_stats_type == 6) obj.flags = reader.read('!H')[0] reader.skip(4) obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.group_stats_entry.unpack) return obj def __eq__(self, other): if (type(self) != type(other)): return False if (self.xid != other.xid): return False if (self.flags != other.flags): return False if (self.entries != other.entries): return False return True def pretty_print(self, q): q.text('group_stats_reply {') with q.group(): with q.indent(2): q.breakable() q.text('xid = ') if (self.xid != None): q.text(('%#x' % self.xid)) else: q.text('None') q.text(',') q.breakable() q.text('flags = ') value_name_map = {1: 'OFPSF_REPLY_MORE'} q.text(util.pretty_flags(self.flags, value_name_map.values())) q.text(',') q.breakable() q.text('entries = ') q.pp(self.entries) q.breakable() q.text('}')
class TestGenerators(TestCase): def test_generators(self): with TextIOWrapper(buffer=BytesIO()) as buf, redirect_stdout(buf): obfuscated = generate('dummy', 'phar') save_generated(obfuscated, '-') buf.buffer.seek(0) output = buf.buffer.read() self.assertTrue(output.startswith(b'<?php')) self.assertIn(b'__HALT_COMPILER(); ?>', output) for i in range(0, 200): self._randomize_bd() obfuscated = generate(self.password.decode('utf-8'), self.obfuscator) save_generated(obfuscated, self.path) self.channel = Channel('ObfPost', {'url': self.url, 'password': self.password.decode('utf-8')}) self._incremental_requests(10, 100, 30, 50) self._clean_bd() def _incremental_requests(self, size_start, size_to, step_rand_start, step_rand_to): for i in range(size_start, size_to, random.randint(step_rand_start, step_rand_to)): payload = utils.strings.randstr(i) self.assertEqual(self.channel.send(('echo("%s");' % payload.decode('utf-8')))[0], payload, f'Obfuscator failed: {self.obfuscator}') def _randomize_bd(cls): cls.obfuscator = ('obfusc1_php' if (random.randint(0, 100) > 50) else 'phar') cls.password = utils.strings.randstr(10) password_hash = hashlib.md5(cls.password).hexdigest().lower() filename = ('%s_%s.php' % (__name__, cls.password)) cls.url = os.path.join(base_url, 'generators', filename) cls.path = os.path.join(base_folder, 'generators', filename) def _clean_bd(cls): os.remove(cls.path)
class TestCertificateProviders(unittest.TestCase): def setUp(self) -> None: self.instance_id = 'test_instance_123' self.test_server_cert_content = 'test_server_certificate' self.test_ca_cert_content = 'test_ca_certificate' self.pc_instance = self._create_pc_instance() def test_pc_instance_server_certificate_provider(self) -> None: cert_provider = PCInstanceServerCertificateProvider(self.pc_instance) actual_cert_content = cert_provider.get_certificate() self.assertEqual(self.test_server_cert_content, actual_cert_content) def test_pc_instance_ca_certificate_provider(self) -> None: cert_provider = PCInstanceCaCertificateProvider(self.pc_instance) actual_cert_content = cert_provider.get_certificate() self.assertEqual(self.test_ca_cert_content, actual_cert_content) def _create_pc_instance(self) -> PrivateComputationInstance: infra_config: InfraConfig = InfraConfig(instance_id=self.instance_id, role=PrivateComputationRole.PARTNER, status=PrivateComputationInstanceStatus.PID_PREPARE_COMPLETED, status_update_ts=, instances=[], game_type=PrivateComputationGameType.LIFT, num_pid_containers=2, num_mpc_containers=2, num_files_per_mpc_container=4, status_updates=[], run_id='681ba82c-16d9-11ed-861d-0242ac120002', pcs_features={PCSFeature.PCF_TLS}, server_certificate=self.test_server_cert_content, ca_certificate=self.test_ca_cert_content) common: CommonProductConfig = CommonProductConfig(input_path='456', output_dir='789') product_config: ProductConfig = LiftConfig(common=common) return PrivateComputationInstance(infra_config=infra_config, product_config=product_config)
def test_read_mixed_gridhead(): buf = io.BytesIO() resfo.write(buf, [('FILEHEAD', np.zeros((100,), dtype=np.int32)), ('GRIDUNIT', ['METRES ', 'MAP ']), ('GRIDHEAD', (2 * np.ones((100,), dtype=np.int32)))]) buf.seek(0) reader = xtge.EGridReader(buf) with pytest.raises(NotImplementedError, match='unstructured'): reader.read()
class TestFrozenFieldHook(unittest.TestCase): def setUp(self) -> None: self.dummy_obj = DummyInstance('01', 'Tupper01', '//fbsource', '//fbsource:output', '//fbsource:storage', 'Meta', 'Seattle') def test_update_event_frozen_field_hook(self) -> None: self.dummy_obj.input_path = '//fbcode' self.dummy_obj.output_path = '//fbsource:output' self.dummy_obj.status = 'complete' with self.assertRaises(InstanceFrozenFieldError): self.dummy_obj.input_path = '//www' with self.assertRaises(InstanceFrozenFieldError): self.dummy_obj.output_path = '//www:output' def test_delete_event_frozen_field_hook(self) -> None: self.dummy_obj.location = 'Kirkland' del self.dummy_obj.user with self.assertRaises(AttributeError): self.dummy_obj.user with self.assertRaises(InstanceFrozenFieldError): self.dummy_obj.location = 'Bellevue'
def test_fixed_value_encoding(tmpdir, merge_lis_prs): fpath = os.path.join(str(tmpdir), 'encoded-fixed-value.dlis') content = ['data/lis/records/RHLR-1.lis.part', 'data/lis/records/THLR-1.lis.part', 'data/lis/records/FHLR-1.lis.part', 'data/lis/records/FTLR-1.lis.part', 'data/lis/records/TTLR-encoded.lis.part', 'data/lis/records/RTLR-1.lis.part'] merge_lis_prs(fpath, content) prev_encodings = dlisio.common.get_encodings() dlisio.common.set_encodings([]) try: (f,) = lis.load(fpath) trailer = f.tape.trailer() with pytest.warns(UnicodeWarning): assert (trailer.name == b'\xec\xc5\xce\xd4\xc1001') dlisio.common.set_encodings(['koi8_r']) trailer = f.tape.trailer() assert (trailer.name == '001') finally: dlisio.common.set_encodings(prev_encodings) f.close()
def extractWordsinserialWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
def extractRumanshisLair(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None if item['title'].startswith('Jobless'): return buildReleaseMessageWithType(item, 'I Aim to Be an Adventurer with the Jobclass of "Jobless"', vol, chp, frag=frag, postfix=postfix) if (('The Harem Was a Forced Goal' in item['tags']) or ('THWAFG' in item['title'])): if (('SS' in item['title']) and (not postfix)): postfix = 'Side Story' return buildReleaseMessageWithType(item, 'The Harem Was a Forced Goal', vol, chp, frag=frag, postfix=postfix) if (('Isekai Cheat' in item['tags']) or ('Isekai Cheat' in item['title'])): return buildReleaseMessageWithType(item, 'Different World Reincarnation ~ Enjoying the new world as a cheat ~', vol, chp, frag=frag, postfix=postfix) if (('Other Worlds Monster Breeder' in item['tags']) or ('Other Worlds Monster Breeder (PokeGod)'.lower() in item['title'].lower())): return buildReleaseMessageWithType(item, "Other World's Monster Breeder", vol, chp, frag=frag, postfix=postfix) if ('When I returned home, what I found was fantasy!?'.lower() in item['title'].lower()): return buildReleaseMessageWithType(item, 'Kaettekite mo Fantasy!?', vol, chp, frag=frag, postfix=postfix) return False
class FutureSOAResponse(Generic[_FR]): DelayedException = NamedTuple('DelayedException', (('tp', Type[BaseException]), ('value', BaseException), ('tb', Optional[TracebackType]))) def __init__(self, get_response): self._get_response = get_response self._response = None self._raise = None def result(self, timeout=None): if self._raise: if six.PY2: six.reraise(tp=self._raise.tp, value=self._raise.value, tb=self._raise.tb) else: raise self._raise.value.with_traceback(self._raise.tb) if self._response: return self._response try: self._response = self._get_response(timeout) return self._response except MessageReceiveTimeout: raise except Exception: (t, e, tb) = sys.exc_info() assert ((t is not None) and (e is not None)) self._raise = self.DelayedException(t, e, tb) raise def exception(self, timeout=None): if self.running(): try: self.result(timeout) return None except MessageReceiveTimeout: raise except Exception as e: return e if self._raise: return self._raise.value return None def running(self): return (not self.done()) def done(self): return bool((self._response or self._raise))
class TestComplexLogic(CoprsTestCase): def test_fork_copr_sends_actions(self, f_users, f_coprs, f_mock_chroots, f_builds, f_db): with mock.patch('flask.g') as mc_flask_g: mc_flask_g.user.name = self.u2.name (fc1, created) = ComplexLogic.fork_copr(self.c1, self.u2, u'dstname') self.db.session.commit() actions = ActionsLogic.get_many(ActionTypeEnum('fork')).all() assert (len(actions) == 1) data = json.loads(actions[0].data) assert (data['user'] == self.u2.name) assert (data['copr'] == 'dstname') assert (data['builds_map'] == {'srpm-builds': {'bar': ''}, 'fedora-18-x86_64': {'bar': '-hello-world'}}) .usefixtures('f_users', 'f_fork_prepare', 'f_db') def test_fork_copr_projects_with_more_builds(self): flask.g.user = self.u2 (fc2, created) = ComplexLogic.fork_copr(self.c2, self.u2, u'dstname') self.db.session.commit() actions = ActionsLogic.get_many(ActionTypeEnum('fork')).all() assert (len(actions) == 1) data = json.loads(actions[0].data) assert (data['user'] == self.u2.name) assert (data['copr'] == 'dstname') assert (data['builds_map'] == {'srpm-builds': {'-whatsupthere-world': '', '-hello-world': '', '-new-package': '', '-new-package': ''}, 'fedora-17-x86_64': {'8-whatsupthere-world': '-whatsupthere-world', '6-hello-world': '-hello-world', '10-new-package': '-new-package'}, 'fedora-17-i386': {'8-whatsupthere-world': '-whatsupthere-world', '6-hello-world': '-hello-world', '11-new-package': '-new-package'}}) .usefixtures('f_users', 'f_fork_prepare', 'f_db') def test_fork_copr_with_eoled_chroots(self): flask.g.user = self.u2 self.mc3.is_active = False self.db.session.add(self.mc3) self.db.session.commit() (new_copr, created) = ComplexLogic.fork_copr(self.c2, self.u2, u'dstname') assert created assert ([cc.mock_chroot.name for cc in new_copr.copr_chroots] == ['fedora-17-x86_64']) self.db.session.commit() actions = ActionsLogic.get_many(ActionTypeEnum('fork')).all() assert (len(actions) == 1) data = json.loads(actions[0].data) assert (data['user'] == self.u2.name) assert (data['copr'] == 'dstname') assert (data['builds_map'] == {'srpm-builds': {'-whatsupthere-world': '', '-hello-world': '', '-new-package': ''}, 'fedora-17-x86_64': {'8-whatsupthere-world': '-whatsupthere-world', '6-hello-world': '-hello-world', '10-new-package': '-new-package'}}) def test_delete_expired_coprs(self, f_users, f_mock_chroots, f_coprs, f_builds, f_db): query = self.db.session.query(models.Copr) assert (len([c for c in query.all() if c.deleted]) == 0) self.c1.delete_after_days = 2 self.c2.delete_after = (datetime.datetime.now() - datetime.timedelta(days=1)) ComplexLogic.delete_expired_projects() self.db.session.commit() query = self.db.session.query(models.Copr) assert (len(query.all()) == 3) assert (len([c for c in query.all() if c.deleted]) == 0) b = self.db.session.query(models.Build).get(3) b.canceled = True ComplexLogic.delete_expired_projects() self.db.session.commit() assert (len([c for c in query.all() if c.deleted]) == 1) assert (not self.db.session.query(models.Build).get(3))
class Bishop(Piece): def __init__(self, x, y, c): super().__init__(x, y, c) self.set_letter('') def drag(self, new_p, pieces): if self.grabbed: (path, dist) = self.select_path((self.start_x, self.start_y), [[1, 1], [(- 1), 1]], new_p) path_len = math.sqrt(((path[0] ** 2) + (path[1] ** 2))) self.slide(((path[0] * dist) / path_len), ((path[1] * dist) / path_len), pieces) def draw_paths(self, pieces): if self.targeted: return fake_piece = Bishop(self.start_x, self.start_y, self.color) directions = [[10, 10], [(- 10), (- 10)], [10, (- 10)], [(- 10), 10]] end_positions = [] for d in directions: fake_piece.slide(d[0], d[1], [p for p in pieces if (p != self)], fake=True) end_positions.append((fake_piece.x, fake_piece.y)) fake_piece.slide(0, 0, [p for p in pieces if (p != self)], fake=True) for end_pos in end_positions: draw_line_round_corners_polygon(see_through, to_screen_coords((self.start_x, self.start_y)), to_screen_coords(end_pos), RED_HIGHLIGHT, (((self.radius * 2) * 640) / 8))
class ButtonController(HasTraits): modifier = Enum('control', 'shift', 'alt') active_buttons = List plot = Instance(Plot) plot_overlay = Any _scatterplot_name = 'ButtonControllerPlot' def notify(self, button, type, event): control_down = True if DEBUG: print('[notify]', button.plotname, type, 'control:', control_down) if (type == 'down'): if (control_down and (button in self.active_buttons)): self.button_deselected(button) else: if (not control_down): [self.button_deselected(b) for b in self.active_buttons if (b is not button)] self.button_selected(button) elif 1: self.button_deselected(button) def button_selected(self, button): if DEBUG: print('active:', [b.plotname for b in self.active_buttons]) print('new button selected:', button.plotname) if (button in self.active_buttons): return numbuttons = len(self.active_buttons) if (numbuttons == 0): self.active_buttons.append(button) button.show_overlay() elif (numbuttons == 1): self.active_buttons[0].hide_overlay() self.active_buttons.append(button) self.show_scatterplot(*self.active_buttons) elif (numbuttons == 2): self.active_buttons[1].button_state = 'up' self.active_buttons[1] = button self.hide_scatterplot() self.show_scatterplot(*self.active_buttons) else: return button.button_state = 'down' def button_deselected(self, button): if DEBUG: print('active:', [b.plotname for b in self.active_buttons]) print('new button deselected:', button.plotname) if (button not in self.active_buttons): button.button_state = 'up' return numbuttons = len(self.active_buttons) if (numbuttons == 1): if (button in self.active_buttons): self.active_buttons.remove(button) button.hide_overlay() elif (numbuttons == 2): if (button in self.active_buttons): self.active_buttons.remove(button) self.hide_scatterplot() remaining_button = self.active_buttons[0] remaining_button.show_overlay() else: return button.button_state = 'up' def show_scatterplot(self, b1, b2): if (len(self.plot.plots) > 0): self.plot.delplot(*list(self.plot.plots.keys())) cur_plot = self.plot.plot(((b1.plotname + '_y'), (b2.plotname + '_y')), name=self._scatterplot_name, type='scatter', marker='square', color=tuple(choice(COLOR_PALETTE)), marker_size=8) self.plot.index_axis.title = b1.plotname self.plot.title = ((b1.plotname + ' vs. ') + b2.plotname) self.plot_overlay.visible = True self.plot.request_redraw() def hide_scatterplot(self): if (self._scatterplot_name in self.plot.plots): self.plot.delplot(self._scatterplot_name) self.plot.index_range.set_bounds('auto', 'auto') self.plot.value_range.set_bounds('auto', 'auto') self.plot_overlay.visible = False
def parse_arguments(program_version, arguments=sys.argv[1:]): parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description='SVIM (pronounced SWIM) is a structural variant caller for long reads. \nIt discriminates six different variant classes: deletions, tandem and interspersed duplications, \ninversions, insertions and translocations. SVIM is unique in its capability of extracting both the genomic origin and \ndestination of duplications.\n\nSVIM consists of four major steps:\n- COLLECT detects signatures for SVs in long read alignments\n- CLUSTER merges signatures that come from the same SV\n- COMBINE combines clusters from different genomic regions and classifies them into distinct SV types\n- GENOTYPE uses alignments spanning SVs to determine their genotype\n\nSVIM can process two types of input. Firstly, it can detect SVs from raw reads by aligning them to a given reference genome first ("SVIM.py reads [options] working_dir reads genome").\nAlternatively, it can detect SVs from existing reads alignments in SAM/BAM format ("SVIM.py alignment [options] working_dir bam_file").\n') subparsers = parser.add_subparsers(help='modes', dest='sub') parser.add_argument('--version', '-v', action='version', version='%(prog)s {version}'.format(version=program_version)) parser_fasta = subparsers.add_parser('reads', help='Detect SVs from raw reads. Align reads to given reference genome first.') parser_fasta.add_argument('working_dir', type=str, help='Working and output directory. Existing files in the directory are overwritten. If the directory does not exist, it is created.') parser_fasta.add_argument('reads', type=str, help='Read file (FASTA, FASTQ, gzipped FASTA, gzipped FASTQ or file list). The read file has to have one of the following supported file endings: FASTA: .fa, .fasta, .FA, .fa.gz, .fa.gzip, .fasta.gz, .fasta.gzip FASTQ: .fq, .fastq, .FQ, .fq.gz, .fq.gzip, .fastq.gz, .fastq.gzip FILE LIST: .fa.fn, fq.fn') parser_fasta.add_argument('genome', type=str, help='Reference genome file (FASTA)') parser_fasta.add_argument('--verbose', action='store_true', help='Enable more verbose logging (default: %(default)s)') group_fasta_align = parser_fasta.add_argument_group('ALIGN') group_fasta_align.add_argument('--cores', type=int, default=1, help='CPU cores to use for the alignment (default: %(default)s)') group_fasta_align.add_argument('--aligner', type=str, default='ngmlr', choices=['ngmlr', 'minimap2'], help='Tool for read alignment: ngmlr or minimap2 (default: %(default)s)') group_fasta_align.add_argument('--nanopore', action='store_true', help='Use Nanopore settings for read alignment (default: %(default)s)') group_fasta_collect = parser_fasta.add_argument_group('COLLECT') group_fasta_collect.add_argument('--min_mapq', type=int, default=20, help='Minimum mapping quality of reads to consider (default: %(default)s). Reads with a lower mapping quality are ignored.') group_fasta_collect.add_argument('--min_sv_size', type=int, default=40, help='Minimum SV size to detect (default: %(default)s). SVIM can potentially detect events of any size but is limited by the signal-to-noise ratio in the input alignments. That means that more accurate reads and alignments enable the detection of smaller events. For current PacBio or Nanopore data, we would recommend a minimum size of 40bp or larger.') group_fasta_collect.add_argument('--max_sv_size', type=int, default=100000, help='Maximum SV size to detect (default: %(default)s). This parameter is used to distinguish long deletions (and inversions) from translocations which cannot be distinguished from the alignment alone. Split read segments mapping far apart on the reference could either indicate a very long deletion (inversion) or a translocation breakpoint. SVIM calls a translocation breakpoint if the mapping distance is larger than this parameter and a deletion (or inversion) if it is smaller or equal.') group_fasta_collect.add_argument('--segment_gap_tolerance', type=int, default=10, help='Maximum tolerated gap between adjacent alignment segments (default: %(default)s). This parameter applies to gaps on the reference and the read. Example: Deletions are detected from two subsequent segments of a split read that are mapped far apart from each other on the reference. The segment gap tolerance determines the maximum tolerated length of the read gap between both segments. If there is an unaligned read segment larger than this value between the two segments, no deletion is called.') group_fasta_collect.add_argument('--segment_overlap_tolerance', type=int, default=5, help='Maximum tolerated overlap between adjacent alignment segments (default: %(default)s). This parameter applies to overlaps on the reference and the read. Example: Deletions are detected from two subsequent segments of a split read that are mapped far apart from each other on the reference. The segment overlap tolerance determines the maximum tolerated length of an overlap between both segments on the read. If the overlap between the two segments on the read is larger than this value, no deletion is called.') group_fasta_collect.add_argument('--all_bnds', action='store_true', help="Output all rearrangements additionally in BND notation (default: %(default)s). By default, SV signatures from the read alignments are used to detect complete SVs, such as deletions, insertions and inversions. When this option is enabled, all SVs are also output in breakend (BND) notation as defined in the VCF specs. For instance, a deletion gets two records in the VCF output: 1. the normal <DEL> record and 2. a <BND> record representing the novel adjacency between the deletion's start and end coordinate in the sample genome.") group_fasta_cluster = parser_fasta.add_argument_group('CLUSTER') group_fasta_cluster.add_argument('--partition_max_distance', type=int, default=1000, help='Maximum distance in bp between SVs in a partition (default: %(default)s). Before clustering, the SV signatures are divided into coarse partitions. This parameter determines the maximum distance between two subsequent signatures in the same partition. If the distance between two subsequent signatures is larger than this parameter, they are distributed into separate partitions.') group_fasta_cluster.add_argument('--position_distance_normalizer', type=int, default=900, help='Distance normalizer used for span-position distance (default: %(default)s). SVIM clusters the SV signatures using an hierarchical clustering approach and a novel distance metric called "span-position distance". Span-position distance is the sum of two components, span distance and position distance. The span distance is the difference in lengths between signatures normalized by the greater length and always lies in the interval [0,1]. The position distance is the difference in position between signatures normalized by the distance normalizer (this parameter). For a position difference of 1.8kb and a distance normalizer of 900, the position distance will be 2. A smaller distance normalizer leads to a higher position distance and as a consequence increases the importance of the position distance in the span-position distance relative to the span distance.') group_fasta_cluster.add_argument('--edit_distance_normalizer', type=float, default=1.0, help='Distance normalizer used specifically for insertions (default: %(default)s). SVIM clusters insertion signatures using an hierarchical clustering approach and a special distance metric for insertions. This distance is the sum of two components, position distance and edit distance between the insertion sequences. The edit distance is normalized (i.e. divided) by the product of the span of the longer insertion and this normalizer. The position distance is the difference in position between signatures normalized by the position distance normalizer (another parameter). A smaller edit distance normalizer leads to a larger edit distance and as a consequence increases the importance of the edit distance in the clustering process so that only insertions with very similar sequences are clustered together. A larger edit distance normalizer diminishes the importance of the insertion sequences in the clustering process.') group_fasta_cluster.add_argument('--cluster_max_distance', type=float, default=0.5, help='Maximum span-position distance between SVs in a cluster (default: %(default)s). This is the most important parameter because it determines the strictness of clustering. Choosing a large value leads to fewer but larger clusters with larger distances between its members. Choosing a small value leads to more but smaller clusters with smaller distances between its members. This parameter determines the height of the cut-off in the hierarchical clustering dendrogram.') group_fasta_combine = parser_fasta.add_argument_group('COMBINE') group_fasta_combine.add_argument('--del_ins_dup_max_distance', type=float, default=1.0, help='Maximum span-position distance between the origin of an insertion and a deletion to be flagged as a potential cut&paste insertion (default: %(default)s)') group_fasta_combine.add_argument('--trans_sv_max_distance', type=int, default=500, help='Maximum distance in bp between a translocation breakpoint and an SV signature to be combined (default: %(default)s)') group_fasta_combine.add_argument('--skip_consensus', action='store_true', help='Disable consensus computation for insertions (default: %(default)s). This reduces the time and memory consumption of SVIM and might be useful if consensus sequences are not needed. With this option, insertion calls are represented by symbolic alleles (<INS>) instead of sequence alles in the output VCF. Consensus computation requires a modern CPU with the SSE 4.1 instruction set. For older CPUs missing this instruction set, consensus computation is automatically disabled.') group_fasta_combine.add_argument('--max_consensus_length', type=int, default=10000, help='Maximum size of insertion sequences for consensus computation. (default: %(default)s) For insertions longer than this threshold, no consensus is computed to save memory.') group_fasta_genotype = parser_fasta.add_argument_group('GENOTYPE') group_fasta_genotype.add_argument('--skip_genotyping', action='store_true', help='Disable genotyping (default: %(default)s)') group_fasta_genotype.add_argument('--minimum_score', type=int, default=3, help='Minimum score for genotyping (default: %(default)s). Only SV candidates with a higher or equal score are genotyped. Depending on the score distribution among the SV candidates, decreasing this value increases the runtime. We recommend to choose a value close to the score threshold used for filtering the SV candidates.') group_fasta_genotype.add_argument('--homozygous_threshold', type=float, default=0.8, help='Minimum variant allele frequency to be called as homozygous (default: %(default)s). Allele frequency is computed as the fraction of reads supporting the variant over the total number of reads covering the variant. Variants with an allele frequence greater than or equal to this threshold are called as homozygous alternative.') group_fasta_genotype.add_argument('--heterozygous_threshold', type=float, default=0.2, help='Minimum variant allele frequency to be called as heterozygous (default: %(default)s). Allele frequency is computed as the fraction of reads supporting the variant over the total number of reads covering the variant. Variants with an allele frequence greater than or equal to this threshold but lower than the homozygous threshold are called as heterozygous alternative. Variants with an allele frequence lower than this threshold are called as homozygous reference.') group_fasta_genotype.add_argument('--minimum_depth', type=int, default=4, help='Minimum total read depth for genotyping (default: %(default)s). Variants covered by a total number of reads lower than this value are not assigned a genotype (./. in the output VCF file).') group_fasta_output = parser_fasta.add_argument_group('OUTPUT') group_fasta_output.add_argument('--sample', type=str, default='Sample', help='Sample ID to include in output vcf file (default: %(default)s)') group_fasta_output.add_argument('--types', type=str, default='DEL,INS,INV,DUP:TANDEM,DUP:INT,BND', help='SV types to include in output VCF (default: %(default)s). Give a comma-separated list of SV types. The possible SV types are: DEL (deletions), INS (novel insertions), INV (inversions), DUP:TANDEM (tandem duplications), DUP:INT (interspersed duplications), BND (breakends).') group_fasta_output.add_argument('--symbolic_alleles', action='store_true', help='Use symbolic alleles, such as <DEL> or <INV> in output VCF (default: %(default)s). By default, all SV alleles are represented by nucleotide sequences.') group_fasta_output.add_argument('--insertion_sequences', action='store_true', help='Output insertion sequences in INFO tag of VCF (default: %(default)s). If enabled, the INFO/SEQS tag contains a list of insertion sequences from the supporting reads.') group_fasta_output.add_argument('--tandem_duplications_as_insertions', action='store_true', help='Represent tandem duplications as insertions in output VCF (default: %(default)s). By default, tandem duplications are represented by the SVTYPE=DUP:TANDEM and the genomic source is given by the POS and END tags. When enabling this option, duplications are instead represented by the SVTYPE=INS and POS and END both give the insertion point of the duplication.') group_fasta_output.add_argument('--interspersed_duplications_as_insertions', action='store_true', help='Represent interspersed duplications as insertions in output VCF (default: %(default)s). By default, interspersed duplications are represented by the SVTYPE=DUP:INT and the genomic source is given by the POS and END tags. When enabling this option, duplications are instead represented by the SVTYPE=INS and POS and END both give the insertion point of the duplication.') group_fasta_output.add_argument('--read_names', action='store_true', help='Output names of supporting reads in INFO tag of VCF (default: %(default)s). If enabled, the INFO/READS tag contains the list of names of the supporting reads.') group_fasta_output.add_argument('--zmws', action='store_true', help='look for information on ZMWs in PacBio read names (default: %(default)s). If enabled, the INFO/ZMWS tag contains the number of ZMWs that produced supporting reads.') parser_bam = subparsers.add_parser('alignment', help='Detect SVs from an existing alignment') parser_bam.add_argument('working_dir', type=os.path.abspath, help='Working and output directory. Existing files in the directory are overwritten. If the directory does not exist, it is created.') parser_bam.add_argument('bam_file', type=str, help='Coordinate-sorted and indexed BAM file with aligned long reads') parser_bam.add_argument('genome', type=str, help='Reference genome file that the long reads were aligned to (FASTA)') parser_bam.add_argument('--verbose', action='store_true', help='Enable more verbose logging (default: %(default)s)') group_bam_collect = parser_bam.add_argument_group('COLLECT') group_bam_collect.add_argument('--min_mapq', type=int, default=20, help='Minimum mapping quality of reads to consider (default: %(default)s). Reads with a lower mapping quality are ignored.') group_bam_collect.add_argument('--min_sv_size', type=int, default=40, help='Minimum SV size to detect (default: %(default)s). SVIM can potentially detect events of any size but is limited by the signal-to-noise ratio in the input alignments. That means that more accurate reads and alignments enable the detection of smaller events. For current PacBio or Nanopore data, we would recommend a minimum size of 40bp or larger.') group_bam_collect.add_argument('--max_sv_size', type=int, default=100000, help='Maximum SV size to detect (default: %(default)s). This parameter is used to distinguish long deletions (and inversions) from translocations which cannot be distinguished from the alignment alone. Split read segments mapping far apart on the reference could either indicate a very long deletion (inversion) or a translocation breakpoint. SVIM calls a translocation breakpoint if the mapping distance is larger than this parameter and a deletion (or inversion) if it is smaller or equal.') group_bam_collect.add_argument('--segment_gap_tolerance', type=int, default=10, help='Maximum tolerated gap between adjacent alignment segments (default: %(default)s). This parameter applies to gaps on the reference and the read. Example: Deletions are detected from two subsequent segments of a split read that are mapped far apart from each other on the reference. The segment gap tolerance determines the maximum tolerated length of the read gap between both segments. If there is an unaligned read segment larger than this value between the two segments, no deletion is called.') group_bam_collect.add_argument('--segment_overlap_tolerance', type=int, default=5, help='Maximum tolerated overlap between adjacent alignment segments (default: %(default)s). This parameter applies to overlaps on the reference and the read. Example: Deletions are detected from two subsequent segments of a split read that are mapped far apart from each other on the reference. The segment overlap tolerance determines the maximum tolerated length of an overlap between both segments on the read. If the overlap between the two segments on the read is larger than this value, no deletion is called.') group_bam_cluster = parser_bam.add_argument_group('CLUSTER') group_bam_cluster.add_argument('--partition_max_distance', type=int, default=1000, help='Maximum distance in bp between SVs in a partition (default: %(default)s). Before clustering, the SV signatures are divided into coarse partitions. This parameter determines the maximum distance between two subsequent signatures in the same partition. If the distance between two subsequent signatures is larger than this parameter, they are distributed into separate partitions.') group_bam_cluster.add_argument('--position_distance_normalizer', type=int, default=900, help='Distance normalizer used for span-position distance (default: %(default)s). SVIM clusters the SV signatures using an hierarchical clustering approach and a novel distance metric called "span-position distance". Span-position distance is the sum of two components, span distance and position distance. The span distance is the difference in lengths between signatures normalized by the greater length and always lies in the interval [0,1]. The position distance is the difference in position between signatures normalized by the distance normalizer (this parameter). For a position difference of 1.8kb and a distance normalizer of 900, the position distance will be 2. A smaller distance normalizer leads to a higher position distance and as a consequence increases the importance of the position distance in the span-position distance relative to the span distance.') group_bam_cluster.add_argument('--edit_distance_normalizer', type=float, default=1.0, help='Distance normalizer used specifically for insertions (default: %(default)s). SVIM clusters insertion signatures using an hierarchical clustering approach and a special distance metric for insertions. This distance is the sum of two components, position distance and edit distance between the insertion sequences. The edit distance is normalized (i.e. divided) by the product of the span of the longer insertion and this normalizer. The position distance is the difference in position between signatures normalized by the position distance normalizer (another parameter). A smaller edit distance normalizer leads to a larger edit distance and as a consequence increases the importance of the edit distance in the clustering process so that only insertions with very similar sequences are clustered together. A larger edit distance normalizer diminishes the importance of the insertion sequences in the clustering process.') group_bam_cluster.add_argument('--cluster_max_distance', type=float, default=0.5, help='Maximum span-position distance between SVs in a cluster (default: %(default)s). This is the most important parameter because it determines the strictness of clustering. Choosing a large value leads to fewer but larger clusters with larger distances between its members. Choosing a small value leads to more but smaller clusters with smaller distances between its members. This parameter determines the height of the cut-off in the hierarchical clustering dendrogram.') group_bam_cluster.add_argument('--all_bnds', action='store_true', help="Output all rearrangements additionally in BND notation (default: %(default)s). By default, SV signatures from the read alignments are used to detect complete SVs, such as deletions, insertions and inversions. When this option is enabled, all SVs are also output in breakend (BND) notation as defined in the VCF specs. For instance, a deletion gets two records in the VCF output: 1. the normal <DEL> record and 2. a <BND> record representing the novel adjacency between the deletion's start and end coordinate in the sample genome.") group_bam_combine = parser_bam.add_argument_group('COMBINE') group_bam_combine.add_argument('--del_ins_dup_max_distance', type=float, default=1.0, help='Maximum span-position distance between the origin of an insertion and a deletion to be flagged as a potential cut&paste insertion (default: %(default)s)') group_bam_combine.add_argument('--trans_sv_max_distance', type=int, default=500, help='Maximum distance in bp between a translocation breakpoint and an SV signature to be combined (default: %(default)s)') group_bam_combine.add_argument('--skip_consensus', action='store_true', help='Disable consensus computation for insertions (default: %(default)s). This reduces the time and memory consumption of SVIM and might be useful if consensus sequences are not needed. With this option, insertion calls are represented by symbolic alleles (<INS>) instead of sequence alles in the output VCF. Consensus computation requires a modern CPU with the SSE 4.1 instruction set. For older CPUs missing this instruction set, consensus computation is automatically disabled.') group_bam_combine.add_argument('--max_consensus_length', type=int, default=10000, help='Maximum size of insertion sequences for consensus computation. (default: %(default)s) For insertions longer than this threshold, no consensus is computed to save memory.') group_bam_genotype = parser_bam.add_argument_group('GENOTYPE') group_bam_genotype.add_argument('--skip_genotyping', action='store_true', help='Disable genotyping (default: %(default)s)') group_bam_genotype.add_argument('--minimum_score', type=int, default=3, help='Minimum score for genotyping (default: %(default)s). Only SV candidates with a higher or equal score are genotyped. Depending on the score distribution among the SV candidates, decreasing this value increases the runtime. We recommend to choose a value close to the score threshold used for filtering the SV candidates.') group_bam_genotype.add_argument('--homozygous_threshold', type=float, default=0.8, help='Minimum variant allele frequency to be called as homozygous (default: %(default)s). Allele frequency is computed as the fraction of reads supporting the variant over the total number of reads covering the variant. Variants with an allele frequence greater than or equal to this threshold are called as homozygous alternative.') group_bam_genotype.add_argument('--heterozygous_threshold', type=float, default=0.2, help='Minimum variant allele frequency to be called as heterozygous (default: %(default)s). Allele frequency is computed as the fraction of reads supporting the variant over the total number of reads covering the variant. Variants with an allele frequence greater than or equal to this threshold but lower than the homozygous threshold are called as heterozygous alternative. Variants with an allele frequence lower than this threshold are called as homozygous reference.') group_bam_genotype.add_argument('--minimum_depth', type=int, default=4, help='Minimum total read depth for genotyping (default: %(default)s). Variants covered by a total number of reads lower than this value are not assigned a genotype (./. in the output VCF file).') group_bam_output = parser_bam.add_argument_group('OUTPUT') group_bam_output.add_argument('--sample', type=str, default='Sample', help='Sample ID to include in output vcf file (default: %(default)s)') group_bam_output.add_argument('--types', type=str, default='DEL,INS,INV,DUP:TANDEM,DUP:INT,BND', help='SV types to include in output VCF (default: %(default)s). Give a comma-separated list of SV types. The possible SV types are: DEL (deletions), INS (novel insertions), INV (inversions), DUP:TANDEM (tandem duplications), DUP:INT (interspersed duplications), BND (breakends).') group_bam_output.add_argument('--symbolic_alleles', action='store_true', help='Use symbolic alleles, such as <DEL> or <INV> in output VCF (default: %(default)s). By default, all SV alleles are represented by nucleotide sequences.') group_bam_output.add_argument('--insertion_sequences', action='store_true', help='Output insertion sequences in INFO tag of VCF (default: %(default)s). If enabled, the INFO/SEQS tag contains a list of insertion sequences from the supporting reads.') group_bam_output.add_argument('--tandem_duplications_as_insertions', action='store_true', help='Represent tandem duplications as insertions in output VCF (default: %(default)s). By default, tandem duplications are represented by the SVTYPE=DUP:TANDEM and the genomic source is given by the POS and END tags. When enabling this option, duplications are instead represented by the SVTYPE=INS and POS and END both give the insertion point of the duplication.') group_bam_output.add_argument('--interspersed_duplications_as_insertions', action='store_true', help='Represent interspersed duplications as insertions in output VCF (default: %(default)s). By default, interspersed duplications are represented by the SVTYPE=DUP:INT and the genomic source is given by the POS and END tags. When enabling this option, duplications are instead represented by the SVTYPE=INS and POS and END both give the insertion point of the duplication.') group_bam_output.add_argument('--read_names', action='store_true', help='Output names of supporting reads in INFO tag of VCF (default: %(default)s). If enabled, the INFO/READS tag contains the list of names of the supporting reads.') group_bam_output.add_argument('--zmws', action='store_true', help='look for information on ZMWs in PacBio read names (default: %(default)s). If enabled, the INFO/ZMWS tag contains the number of ZMWs that produced supporting reads.') return parser.parse_args(arguments)
.parametrize('degree', [1, 2]) def test_rtcf_expansion(tpc_quad, degree): actual = FiniteElement('RTCF', tpc_quad, degree) C_elt = FiniteElement('CG', interval, degree) D_elt = FiniteElement('DG', interval, (degree - 1)) expected = (HDiv(TensorProductElement(C_elt, D_elt)) + HDiv(TensorProductElement(D_elt, C_elt))) assert (expected == actual)
def parse_value_string(type_: Type, value_string: str): if (type_.is_primitive_type() and (not type_.is_array)): return parse_primitive_value_string(type_, value_string) if (type_.is_primitive_type() and type_.is_array): if ((not value_string.startswith('[')) or (not value_string.endswith(']'))): raise InvalidValue(type_, value_string, "array value must start with '[' and end with ']'") elements_string = value_string[1:(- 1)] if (type_.type in ('string', 'wstring')): value_strings = parse_string_array_value_string(elements_string, type_.array_size) else: value_strings = (elements_string.split(',') if elements_string else []) if type_.array_size: if ((not type_.is_upper_bound) and (len(value_strings) != type_.array_size)): raise InvalidValue(type_, value_string, ('array must have exactly %u elements, not %u' % (type_.array_size, len(value_strings)))) if (type_.is_upper_bound and (len(value_strings) > type_.array_size)): raise InvalidValue(type_, value_string, ('array must have not more than %u elements, not %u' % (type_.array_size, len(value_strings)))) values: List[Union[(bool, int, float, str)]] = [] for (index, element_string) in enumerate(value_strings): element_string = element_string.strip() try: base_type = Type(BaseType.__str__(type_)) value = parse_primitive_value_string(base_type, element_string) except InvalidValue as e: raise InvalidValue(type_, value_string, ('element %u with %s' % (index, e))) values.append(value) return values raise NotImplementedError(("parsing string values into type '%s' is not supported" % type_))
class OptionPlotoptionsTreemapSonificationContexttracksMappingTremoloDepth(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def extractSpringraintranslationsWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('mary sue does not stick to the plot', 'mary sue does not stick to the plot', 'translated'), ("scheming villainess's counterattack", "scheming villainess's counterattack", 'translated'), ('Princess and the General', 'Princess and the General', 'translated'), ('Shen Yi Di Nu', 'Shen Yi Di Nu', 'translated'), ('Epoch of the Dragon', 'X Epoch of the Dragon', 'translated')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
def main(): data = datasets.load_iris() X = data.data y = data.target X = X[(y != 2)] y = y[(y != 2)] (X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size=0.33) lda = LDA() lda.fit(X_train, y_train) y_pred = lda.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print('Accuracy:', accuracy) Plot().plot_in_2d(X_test, y_pred, title='LDA', accuracy=accuracy)
def get_heatmaps_widget(*, title: str='', primary_data: HeatmapData, secondary_data: Optional[HeatmapData]=None, size: WidgetSize=WidgetSize.FULL, color_options: ColorOptions) -> BaseWidgetInfo: if (secondary_data is not None): subplot_titles = [primary_data.name, secondary_data.name] heatmaps_count = 2 else: subplot_titles = [''] heatmaps_count = 1 figure = make_subplots(rows=1, cols=heatmaps_count, subplot_titles=subplot_titles, shared_yaxes=True) for (idx, heatmap_data) in enumerate([primary_data, secondary_data]): if (heatmap_data is None): continue data = heatmap_data.matrix columns = heatmap_data.matrix.columns if (len(columns) < 15): heatmap_text = np.round(data, 2).astype(str) heatmap_text_template: Optional[str] = '%{text}' else: heatmap_text = None heatmap_text_template = None figure.add_trace(go.Heatmap(z=data, x=columns, y=columns, text=heatmap_text, texttemplate=heatmap_text_template, coloraxis='coloraxis'), 1, (idx + 1)) figure.update_layout(coloraxis={'colorscale': color_options.heatmap}) figure.update_yaxes(type='category') figure.update_xaxes(tickangle=(- 45)) return plotly_figure(title=title, figure=figure, size=size)
def run(): snmpEngine = SnmpEngine() iterator = getCmd(snmpEngine, CommunityData('public', mpModel=0), UdpTransportTarget(('demo.snmplabs.com', 161)), ContextData(), ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0))) (errorIndication, errorStatus, errorIndex, varBinds) = (yield trollius.From(iterator)) if errorIndication: print(errorIndication) elif errorStatus: print(('%s at %s' % (errorStatus.prettyPrint(), ((errorIndex and varBinds[(int(errorIndex) - 1)][0]) or '?')))) else: for varBind in varBinds: print(' = '.join([x.prettyPrint() for x in varBind])) snmpEngine.transportDispatcher.closeDispatcher()
class TestSetRequest(BasePyTestCase): def test_set_request_locked_update(self, *args): nvr = 'bodhi-2.0-1.fc17' up = self.db.query(Build).filter_by(nvr=nvr).one().update up.locked = True post_data = dict(update=nvr, request='stable', csrf_token=self.app.get('/csrf').json_body['csrf_token']) res = self.app.post_json(f'/updates/{up.alias}/request', post_data, status=400) assert (res.json_body['status'] == 'error') assert (res.json_body['errors'][0]['description'] == "Can't change request on a locked update") def test_set_request_rawhide(self, *args): nvr = 'bodhi-2.0-1.fc17' up = self.db.query(Build).filter_by(nvr=nvr).one().update up.locked = False up.test_gating_status = TestGatingStatus.passed up.date_testing = (datetime.utcnow() - timedelta(days=8)) up.release.composed_by_bodhi = False post_data = dict(update=nvr, request='stable', csrf_token=self.app.get('/csrf').json_body['csrf_token']) res = self.app.post_json(f'/updates/{up.alias}/request', post_data, status=400) assert (res.json_body['status'] == 'error') assert (res.json_body['errors'][0]['description'] == 'Setting a request on an Update for a Release not composed by Bodhi is not allowed') def test_set_request_archived_release(self, *args): nvr = 'bodhi-2.0-1.fc17' up = self.db.query(Build).filter_by(nvr=nvr).one().update up.locked = False up.release.state = ReleaseState.archived post_data = dict(update=up.alias, request='stable', csrf_token=self.app.get('/csrf').json_body['csrf_token']) res = self.app.post_json(f'/updates/{up.alias}/request', post_data, status=400) assert (res.json_body['status'] == 'error') assert (res.json_body['errors'][0]['description'] == 'cannot edit Update for an archived Release') def test_set_request_testing_from_stable(self, *args): nvr = 'bodhi-2.0-1.fc17' up = self.db.query(Build).filter_by(nvr=nvr).one().update up.locked = False up.status = UpdateStatus.stable post_data = dict(update=up.alias, request='testing', csrf_token=self.app.get('/csrf').json_body['csrf_token']) res = self.app.post_json(f'/updates/{up.alias}/request', post_data, status=400) assert (res.json_body['status'] == 'error') assert (res.json_body['errors'][0]['description'] == 'Pushing back to testing a stable update is not allowed') ('bodhi.server.services.updates.log.info') .dict(config, {'test_gating.required': True}) def test_test_gating_status_failed(self, info): nvr = 'bodhi-2.0-1.fc17' up = self.db.query(Build).filter_by(nvr=nvr).one().update up.locked = False up.test_gating_status = TestGatingStatus.failed up.date_testing = (datetime.utcnow() - timedelta(days=8)) up.request = None post_data = dict(update=nvr, request='stable', csrf_token=self.get_csrf_token()) res = self.app.post_json(f'/updates/{up.alias}/request', post_data, status=400) up = self.db.query(Build).filter_by(nvr=nvr).one().update assert (up.request is None) assert (res.json_body['status'] == 'error') assert (res.json_body['errors'][0]['description'] == 'Requirement not met Required tests did not pass on this update.') info_logs = '\n'.join([c[1][0] for c in info.mock_calls]) assert (f'Unable to set request for {up.alias} to stable due to failed requirements: Required tests did not pass on this update.' in info_logs) .dict(config, {'test_gating.required': True}) def test_test_gating_status_passed(self): nvr = 'bodhi-2.0-1.fc17' up = self.db.query(Build).filter_by(nvr=nvr).one().update up.locked = False up.test_gating_status = TestGatingStatus.passed up.date_testing = (datetime.utcnow() - timedelta(days=8)) post_data = dict(update=nvr, request='stable', csrf_token=self.get_csrf_token()) with fml_testing.mock_sends(api.Message): res = self.app.post_json(f'/updates/{up.alias}/request', post_data, status=200) up = self.db.query(Build).filter_by(nvr=nvr).one().update assert (up.request == UpdateRequest.stable) assert (res.json['update']['request'] == 'stable') ('bodhi.server.services.updates.Update.set_request', side_effect=BodhiException('BodhiException. oops!')) ('bodhi.server.services.updates.Update.check_requirements', return_value=(True, 'a fake reason')) ('bodhi.server.services.updates.log.info') def test_BodhiException_exception(self, log_info, check_requirements, send_request, *args): nvr = 'bodhi-2.0-1.fc17' up = self.db.query(Build).filter_by(nvr=nvr).one().update up.locked = False up.release.state = ReleaseState.current post_data = dict(update=up.alias, request='stable', csrf_token=self.app.get('/csrf').json_body['csrf_token']) res = self.app.post_json(f"/updates/{post_data['update']}/request", post_data, status=400) assert (res.json_body['status'] == 'error') assert (res.json_body['errors'][0]['description'] == 'BodhiException. oops!') assert (log_info.call_count == 1) assert (log_info.call_args_list[0][0][0] == 'Failed to set the request: %s') ('bodhi.server.services.updates.Update.set_request', side_effect=IOError('IOError. oops!')) ('bodhi.server.services.updates.Update.check_requirements', return_value=(True, 'a fake reason')) ('bodhi.server.services.updates.log.exception') def test_unexpected_exception(self, log_exception, check_requirements, send_request, *args): nvr = 'bodhi-2.0-1.fc17' up = self.db.query(Build).filter_by(nvr=nvr).one().update up.locked = False up.release.state = ReleaseState.current post_data = dict(update=nvr, request='stable', csrf_token=self.app.get('/csrf').json_body['csrf_token']) res = self.app.post_json(f'/updates/{up.alias}/request', post_data, status=400) assert (res.json_body['status'] == 'error') assert (res.json_body['errors'][0]['description'] == 'IOError. oops!') log_exception.assert_called_once_with('Unhandled exception in set_request')
.parametrize('fn, geoms, ref_energy', Bm) def test_birkholz_benchmark(fn, geoms, ref_energy, results_bag): prefix = (Path(fn).stem + '_') (reactants, _, products) = geoms rcp = reactants.copy() pcp = products.copy() (rgeom, pgeom) = precon_pos_rot(reactants, products, prefix=prefix) rr = rgeom.rmsd(rcp) pr = pgeom.rmsd(pcp) print(f' {fn} R_RMSD={rr:.4f} P_PRMSD={pr:.4f}') r_org_xyz = rcp.as_xyz(comment='R, original') p_org_xyz = pcp.as_xyz(comment='P, original') r_xyz = rgeom.as_xyz(comment='R, precon') p_xyz = pgeom.as_xyz(comment='P, precon') trj = '\n'.join((r_org_xyz, r_xyz, p_org_xyz, p_xyz)) with open((prefix + 'comp.trj'), 'w') as handle: handle.write(trj) results_bag.fn = fn results_bag.r_rmsd = rr results_bag.p_rmsd = pr results_bag.trj = trj
_login def create_or_login(resp): flask.session['openid'] = resp.identity_url fasusername = FedoraAccounts.fed_raw_name(resp.identity_url) if (not FedoraAccounts.is_user_allowed(fasusername)): flask.flash("User '{0}' is not allowed".format(fasusername)) return flask.redirect(oid.get_next_url()) user = UserAuth.user_object(oid_resp=resp) return do_create_or_login(user)
class LayoutType(enum.Enum): ColumnMajor = auto() RowMajor = auto() NWC = auto() KXC = auto() NWK = auto() NCW = auto() KCX = auto() NKW = auto() NHWC = auto() KYXC = auto() NHWK = auto() NCHW = auto() KCYX = auto() NKWH = auto() NDHWC = auto() KZYXC = auto() NDHWK = auto() NCDHW = auto() KCZYX = auto() NKDHW = auto() G_NHW_C = auto() G_K_YX_C = auto() G_NHW_K = auto() NHWGC = auto() KYXGC = auto() NHWGK = auto() GNHWC = auto() GKYXC = auto() GNHWK = auto() GNWC = auto() GKXC = auto() GNWK = auto()
('Event Types > Event Type of an Event > Event Type Details of an Event') def event_event_type_get_detail(transaction): with stash['app'].app_context(): event_type = EventTypeFactory() db.session.add(event_type) event = EventFactoryBasic(event_type_id=1) db.session.add(event) db.session.commit()