code
stringlengths
281
23.7M
class TaskTestFunction(): def __init__(self) -> None: self.temp_dir: Optional[str] = None self.overrides: Optional[List[str]] = None self.calling_file: Optional[str] = None self.calling_module: Optional[str] = None self.config_path: Optional[str] = None self.config_name: Optional[str] = None self.hydra: Optional[Hydra] = None self.job_ret: Optional[JobReturn] = None self.configure_logging: bool = False def __call__(self, cfg: DictConfig) -> Any: return 100 def __enter__(self) -> 'TaskTestFunction': try: validate_config_path(self.config_path) job_name = detect_task_name(self.calling_file, self.calling_module) self.hydra = Hydra.create_main_hydra_file_or_module(calling_file=self.calling_file, calling_module=self.calling_module, config_path=self.config_path, job_name=job_name) self.temp_dir = tempfile.mkdtemp() overrides = copy.deepcopy(self.overrides) assert (overrides is not None) overrides.append(f'hydra.run.dir={self.temp_dir}') self.job_ret = self.hydra.run(config_name=self.config_name, task_function=self, overrides=overrides, with_log_configuration=self.configure_logging) return self finally: GlobalHydra().clear() def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: if self.configure_logging: logging.shutdown() assert (self.temp_dir is not None) shutil.rmtree(self.temp_dir, ignore_errors=True)
class Migration(migrations.Migration): initial = True dependencies = [('player', '0001_initial')] operations = [migrations.CreateModel(name='EmojiPack', fields=[('id', models.AutoField(help_text='ID', primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text='', max_length=100, verbose_name='')), ('avail_to', models.ManyToManyField(blank=True, help_text='', related_name='emoji_sets', to='player.Player', verbose_name=''))], options={'verbose_name': '', 'verbose_name_plural': ''}), migrations.CreateModel(name='FixedText', fields=[('id', models.AutoField(help_text='ID', primary_key=True, serialize=False, verbose_name='ID')), ('text', models.CharField(help_text='', max_length=200, verbose_name='')), ('voice', models.URLField(blank=True, help_text='', null=True, verbose_name='')), ('actor', models.CharField(blank=True, help_text='', max_length=50, null=True, verbose_name='')), ('character', models.CharField(blank=True, help_text='', max_length=200, null=True, verbose_name='')), ('avail_to', models.ManyToManyField(blank=True, help_text='', related_name='fixed_texts', to='player.Player', verbose_name='')), ('pinned_by', models.ManyToManyField(blank=True, help_text='Pin ', related_name='pinned_fixed_texts', to='player.Player', verbose_name='Pin '))], options={'verbose_name': '', 'verbose_name_plural': ''}), migrations.CreateModel(name='SharedFixedText', fields=[('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ref', models.OneToOneField(help_text='', on_delete=django.db.models.deletion.CASCADE, related_name='shared', to='chat.fixedtext', verbose_name=''))], options={'verbose_name': '', 'verbose_name_plural': ''}), migrations.CreateModel(name='SharedEmojiPack', fields=[('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('ref', models.OneToOneField(help_text='', on_delete=django.db.models.deletion.CASCADE, related_name='shared', to='chat.emojipack', verbose_name=''))], options={'verbose_name': '', 'verbose_name_plural': ''}), migrations.CreateModel(name='Emoji', fields=[('id', models.AutoField(help_text='ID', primary_key=True, serialize=False, verbose_name='ID')), ('name', models.CharField(help_text='', max_length=100, verbose_name='')), ('url', models.URLField(help_text='URL', verbose_name='URL')), ('pack', models.ForeignKey(help_text='', on_delete=django.db.models.deletion.CASCADE, related_name='items', to='chat.emojipack', verbose_name=''))], options={'verbose_name': '', 'verbose_name_plural': ''})]
class RegressionPredictedVsActualPlot(Metric[ColumnScatterResult]): def __init__(self, options: AnyOptions=None): super().__init__(options=options) def calculate(self, data: InputData) -> ColumnScatterResult: dataset_columns = process_columns(data.current_data, data.column_mapping) target_name = dataset_columns.utility_columns.target prediction_name = dataset_columns.utility_columns.prediction datetime_column_name = dataset_columns.utility_columns.date curr_df = data.current_data.copy() ref_df = data.reference_data if ((target_name is None) or (prediction_name is None)): raise ValueError("The columns 'target' and 'prediction' columns should be present") if (not isinstance(prediction_name, str)): raise ValueError('Expect one column for prediction. List of columns was provided.') curr_df = self._make_df_for_plot(curr_df, target_name, prediction_name, datetime_column_name) if (ref_df is not None): ref_df = self._make_df_for_plot(ref_df.copy(), target_name, prediction_name, datetime_column_name) reference_scatter: Optional[Union[(dict, ColumnScatter)]] = None raw_data = self.get_options().render_options.raw_data if raw_data: current_scatter = {} current_scatter['Predicted'] = curr_df[prediction_name] current_scatter['Actual'] = curr_df[target_name] if (datetime_column_name is not None): current_scatter['x'] = curr_df[datetime_column_name] x_name = 'Timestamp' else: current_scatter['x'] = curr_df.index x_name = 'Index' if (ref_df is not None): reference_scatter = {} reference_scatter['Predicted'] = ref_df[prediction_name] reference_scatter['Actual'] = ref_df[target_name] reference_scatter['x'] = (ref_df[datetime_column_name] if datetime_column_name else ref_df.index) return ColumnScatterResult(current=current_scatter, reference=reference_scatter, x_name=x_name) current_scatter = {} (plot_df, prefix) = prepare_df_for_time_index_plot(curr_df, prediction_name, datetime_column_name) current_scatter['Predicted'] = plot_df (current_scatter['Actual'], _) = prepare_df_for_time_index_plot(curr_df, target_name, datetime_column_name) x_name_ref: Optional[str] = None if (ref_df is not None): reference_scatter = {} (plot_df, prefix_ref) = prepare_df_for_time_index_plot(ref_df, prediction_name, datetime_column_name) reference_scatter['Predicted'] = plot_df (reference_scatter['Actual'], _) = prepare_df_for_time_index_plot(ref_df, target_name, datetime_column_name) if (datetime_column_name is None): x_name_ref = 'Index binned' else: x_name_ref = (datetime_column_name + f' ({prefix_ref})') if (datetime_column_name is None): x_name = 'Index binned' else: x_name = (datetime_column_name + f' ({prefix})') cls = ColumnScatterResult if (not raw_data): cls = ColumnAggScatterResult return cls(current=current_scatter, reference=reference_scatter, x_name=x_name, x_name_ref=x_name_ref) def _make_df_for_plot(self, df, target_name: str, prediction_name: str, datetime_column_name: Optional[str]): result = df.replace([np.inf, (- np.inf)], np.nan) if (datetime_column_name is not None): result.dropna(axis=0, how='any', inplace=True, subset=[target_name, prediction_name, datetime_column_name]) return result.sort_values(datetime_column_name) result.dropna(axis=0, how='any', inplace=True, subset=[target_name, prediction_name]) return result.sort_index()
class MockDelegate(QStyledItemDelegate): def __init__(self, parent=None) -> None: super().__init__(parent) self._size = QSize(50, 50) self._max_id = 0 def paint(self, painter, option: QStyleOptionViewItem, index: QModelIndex) -> None: self._max_id = max(int(index.internalPointer().id), self._max_id) def sizeHint(self, option, index) -> QSize: return self._size
class UsersEventsRoles(db.Model): __tablename__ = 'users_events_roles' __table_args__ = (db.UniqueConstraint('user_id', 'event_id', 'role_id', name='uq_uer_user_event_role'),) id = db.Column(db.Integer, primary_key=True) event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='CASCADE'), nullable=False) user_id = db.Column(db.Integer, db.ForeignKey('users.id', ondelete='CASCADE'), nullable=False) user = db.relationship('User') role_id = db.Column(db.Integer, db.ForeignKey('roles.id', ondelete='CASCADE'), nullable=False) role = db.relationship('Role') def __repr__(self): return f'<UER {self.user!r}:{self.event_id!r}:{self.role!r}>'
def check_server(cli_version: str, server_url: str, quiet: bool=False) -> None: health_response = check_server_health((str(server_url) or '')) if (health_response.status_code == 429): echo_red('Server ratelimit reached. Please wait one minute and try again.') raise SystemExit(1) server_version = health_response.json()['version'] if compare_application_versions(server_version, cli_version): if (not quiet): echo_green('Server is reachable and the client/server application versions match.') else: echo_red(f'''Mismatched versions! Server Version: {server_version} CLI Version: {cli_version}''')
def test_extractionschemanode_without_type_cannot_be_deserialized() -> None: json = '\n {\n "id": "root_object",\n "description": "Deserialization Example",\n "many": true,\n "attributes": [\n {\n "id": "number_attribute",\n "description": "Description for Number",\n "many": true,\n "examples": [\n ["Here is 1 number", 1],\n ["Here are 0 numbers", 0]\n ]\n }\n ]\n }\n ' if (PYDANTIC_MAJOR_VERSION == 1): exception_class: Type[Exception] = ValueError else: exception_class = NotImplementedError with pytest.raises(exception_class): Object.parse_raw(json)
class TestFigureTrainingTeiParser(): def test_should_parse_single_token_labelled_training_tei_lines(self): tei_root = _get_training_tei_with_figures([E('figure', E('head', TOKEN_1, E('lb')), '\n', E('figDesc', TOKEN_2, E('lb')), '\n')]) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) assert (tag_result == [[(TOKEN_1, 'B-<figure_head>'), (TOKEN_2, 'B-<figDesc>')]]) def test_should_parse_single_label_with_multiple_lines(self): tei_root = _get_training_tei_with_figures([E('figure', E('figDesc', TOKEN_1, E('lb'), '\n', TOKEN_2, E('lb')), '\n')]) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) assert (tag_result == [[(TOKEN_1, 'B-<figDesc>'), (TOKEN_2, 'I-<figDesc>')]]) def test_should_parse_figure_head_with_label(self): tei_root = _get_training_tei_with_figures([E('figure', E('head', TOKEN_1, ' ', E('label', TOKEN_2, E('lb'))), '\n')]) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) assert (tag_result == [[(TOKEN_1, 'B-<figure_head>'), (TOKEN_2, 'B-<label>')]]) def test_should_parse_figure_head_with_label_and_continued_head(self): tei_root = _get_training_tei_with_figures([E('figure', E('head', TOKEN_1, ' ', E('label', TOKEN_2, E('lb')), ' ', TOKEN_3), '\n')]) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) assert (tag_result == [[(TOKEN_1, 'B-<figure_head>'), (TOKEN_2, 'B-<label>'), (TOKEN_3, 'I-<figure_head>')]]) def test_should_interpret_text_in_figure_as_unlabelled(self): tei_root = _get_training_tei_with_figures([E('figure', TOKEN_1, E('lb'), '\n')]) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) assert (tag_result == [[(TOKEN_1, 'O')]]) def test_should_output_multiple_tokens_of_each_unlabelled_lines(self): tei_root = _get_training_tei_with_figures([E('figure', TOKEN_1, ' ', TOKEN_2, E('lb'), '\n', TOKEN_3, ' ', TOKEN_4, E('lb'), '\n')]) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) LOGGER.debug('tag_result: %r', tag_result) assert (tag_result == [[(TOKEN_1, 'O'), (TOKEN_2, 'O'), (TOKEN_3, 'O'), (TOKEN_4, 'O')]]) def test_should_parse_single_label_with_multiple_tokens_on_multiple_lines(self): tei_root = _get_training_tei_with_figures([E('figure', E('figDesc', TOKEN_1, ' ', TOKEN_2, E('lb'), '\n', TOKEN_3, ' ', TOKEN_4, E('lb')), '\n')]) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) LOGGER.debug('tag_result: %r', tag_result) assert (tag_result == [[(TOKEN_1, 'B-<figDesc>'), (TOKEN_2, 'I-<figDesc>'), (TOKEN_3, 'I-<figDesc>'), (TOKEN_4, 'I-<figDesc>')]]) .parametrize('tei_label,element_path', list(TRAINING_XML_ELEMENT_PATH_BY_LABEL.items())) def test_should_parse_all_supported_labels(self, tei_label: str, element_path: Sequence[str]): xml_writer = XmlTreeWriter(E('tei'), element_maker=E) xml_writer.require_path(element_path) xml_writer.append_all(TOKEN_1, ' ', TOKEN_2, E('lb'), '\n', TOKEN_3, ' ', TOKEN_4, E('lb')) tei_root = xml_writer.root LOGGER.debug('tei_root: %r', etree.tostring(tei_root)) tag_result = get_training_tei_parser().parse_training_tei_to_tag_result(tei_root) LOGGER.debug('tag_result: %r', tag_result) assert (tag_result == [[(TOKEN_1, f'B-{tei_label}'), (TOKEN_2, f'I-{tei_label}'), (TOKEN_3, f'I-{tei_label}'), (TOKEN_4, f'I-{tei_label}')]])
def _compare_public_ids(component_configuration: ComponentConfiguration, package_directory: Path) -> None: if (component_configuration.package_type != PackageType.SKILL): return filename = '__init__.py' public_id_in_init = _get_public_id_from_file(component_configuration, package_directory, filename) if ((public_id_in_init is not None) and (public_id_in_init != component_configuration.public_id)): raise ValueError(f'The public id specified in {filename} for package {package_directory} does not match the one specific in {component_configuration.package_type.value}.yaml')
class AEATestWrapper(): def __init__(self, name: str='my_aea', components: List[Component]=None): self.components = (components or []) self.name = name self._fake_connection: Optional[FakeConnection] = None self.aea = self.make_aea(self.name, self.components) self._thread = None def make_aea(self, name: Optional[str]=None, components: List[Component]=None) -> AEA: components = (components or []) builder = AEABuilder() builder.set_name((name or self.name)) builder.add_private_key(_FETCHAI_IDENTIFIER, private_key_path=None) for component in components: builder.add_component_instance(component) aea = builder.build() return aea def make_skill(cls, config: SkillConfig=None, context: SkillContext=None, handlers: Optional[Dict[(str, Type[Handler])]]=None) -> Skill: handlers = (handlers or {}) context = (context or SkillContext()) config = (config or SkillConfig(name='skill_{}'.format(uuid.uuid4().hex[0:5]), author='fetchai')) handlers_instances = {name: handler_cls(name=name, skill_context=context) for (name, handler_cls) in handlers.items()} skill = Skill(configuration=config, skill_context=context, handlers=handlers_instances) return skill def dummy_default_message(cls, dialogue_reference: Tuple[(str, str)]=('', ''), message_id: int=1, target: int=0, performative: DefaultMessage.Performative=DefaultMessage.Performative.BYTES, content: Union[(str, bytes)]='hello world!') -> Message: if isinstance(content, str): content = content.encode('utf-8') return DefaultMessage(dialogue_reference=dialogue_reference, message_id=message_id, target=target, performative=performative, content=content) def dummy_envelope(cls, to: str='test', sender: str='test', message: Message=None) -> Envelope: message = (message or cls.dummy_default_message()) return Envelope(to=to, sender=sender, protocol_specification_id=DefaultMessage.protocol_specification_id, message=DefaultSerializer().encode(message)) def set_loop_timeout(self, period: float) -> None: self.aea._period = period def setup(self) -> None: self.aea.setup() def stop(self) -> None: self.aea.stop() def put_inbox(self, envelope: Envelope) -> None: self.aea.runtime.multiplexer.in_queue.put(envelope) def is_inbox_empty(self) -> bool: return self.aea.runtime.multiplexer.in_queue.empty() def __enter__(self) -> None: self.start_loop() def __exit__(self, exc_type=None, exc=None, traceback=None) -> None: self.stop_loop() return None def start_loop(self) -> None: self._thread = Thread(target=self.aea.start) self._thread.start() def stop_loop(self) -> None: if (self._thread is None): raise ValueError('Thread not set, call start_loop first.') self.aea.stop() self._thread.join() def is_running(self) -> bool: return (not self.aea.is_running) def set_fake_connection(self, inbox_num: int, envelope: Optional[Envelope]=None) -> None: if self._fake_connection: raise Exception('Fake connection is already set!') envelope = (envelope or self.dummy_envelope()) self._fake_connection = FakeConnection(envelope, inbox_num, connection_id='fake_connection') self.aea.runtime.multiplexer.add_connection(self._fake_connection) def is_messages_in_fake_connection(self) -> bool: if (not self._fake_connection): raise Exception('Fake connection is not set!') return (self._fake_connection.num != 0)
def test_transact_sending_ether_to_nonpayable_function(w3, payable_tester_contract, transact, call): initial_value = call(contract=payable_tester_contract, contract_function='wasCalled') assert (initial_value is False) with pytest.raises(Web3ValidationError): txn_hash = transact(contract=payable_tester_contract, contract_function='doNoValueCall', tx_params={'value': 1}) txn_receipt = w3.eth.wait_for_transaction_receipt(txn_hash) assert (txn_receipt is not None) final_value = call(contract=payable_tester_contract, contract_function='wasCalled') assert (final_value is False)
class PageAboutStoryComposedBlock(AbstractObject): def __init__(self, api=None): super(PageAboutStoryComposedBlock, self).__init__() self._isPageAboutStoryComposedBlock = True self._api = api class Field(AbstractObject.Field): depth = 'depth' entity_ranges = 'entity_ranges' inline_style_ranges = 'inline_style_ranges' text = 'text' type = 'type' _field_types = {'depth': 'int', 'entity_ranges': 'list<PageAboutStoryComposedBlockEntityRanges>', 'inline_style_ranges': 'list<PageAboutStoryComposedBlockInlineStyle>', 'text': 'string', 'type': 'string'} def _get_field_enum_info(cls): field_enum_info = {} return field_enum_info
def test_by_time(events, cue, ball1, ball2, ball3, cushion): assert (filter_time(events, 4) == [sliding_rolling_transition(ball2, 5), rolling_stationary_transition(ball2, 6), ball_ball_collision(ball1, ball3, 7), sliding_rolling_transition(ball1, 8), sliding_rolling_transition(ball3, 9), rolling_stationary_transition(ball1, 10), ball_linear_cushion_collision(ball3, cushion, 12), null_event(inf)]) assert (filter_time(events, 4, after=False) == [null_event(0), stick_ball_collision(cue, ball2, 1), sliding_rolling_transition(ball1, 2), ball_ball_collision(ball1, ball2, 3)]) with pytest.raises(ValueError, match='chronological'): filter_time(events[::(- 1)], 4)
def test_get_timeout_for_common_handshake_exceptions(): common_exc_types = (COMMON_PEER_CONNECTION_EXCEPTIONS + (HandshakeFailure, HandshakeFailureTooManyPeers, MalformedMessage, NoMatchingPeerCapabilities)) for exc_type in common_exc_types: assert (get_timeout_for_failure(exc_type()) <= (60 * 60))
class OptionPlotoptionsFunnel3dSonificationDefaultinstrumentoptionsMappingHighpass(Options): def frequency(self) -> 'OptionPlotoptionsFunnel3dSonificationDefaultinstrumentoptionsMappingHighpassFrequency': return self._config_sub_data('frequency', OptionPlotoptionsFunnel3dSonificationDefaultinstrumentoptionsMappingHighpassFrequency) def resonance(self) -> 'OptionPlotoptionsFunnel3dSonificationDefaultinstrumentoptionsMappingHighpassResonance': return self._config_sub_data('resonance', OptionPlotoptionsFunnel3dSonificationDefaultinstrumentoptionsMappingHighpassResonance)
class EmojiExtension(Extension): def __init__(self, *args, **kwargs): self.config = {'emoji_index': [emojione, "Function that returns the desired emoji index. - Default: 'pymdownx.emoji.emojione'"], 'emoji_generator': [to_png, 'Emoji generator method. - Default: pymdownx.emoji.to_png'], 'title': ['short', "What title to use on images. You can use 'long' which shows the long name, 'short' which shows the shortname (:short:), or 'none' which shows no title. - Default: 'short'"], 'alt': ['unicode', "Control alt form. 'short' sets alt to the shortname (:short:), 'uniocde' sets alt to the raw Unicode value, and 'html_entity' sets alt to the HTML entity. - Default: 'unicode'"], 'remove_variation_selector': [False, 'Remove variation selector 16 from unicode. - Default: False'], 'options': [{}, 'Emoji options see documentation for options for github and emojione.']} super().__init__(*args, **kwargs) def extendMarkdown(self, md): config = self.getConfigs() util.escape_chars(md, [':']) md.inlinePatterns.register(EmojiPattern(RE_EMOJI, config, md), 'emoji', 75)
class PrintSelectorPatternTest(BowlerTestCase): def test_print_selector_pattern(self): node = self.parse_line('x + 1') expected = "arith_expr < 'x' '+' '1' > \n" print_selector_pattern(node) self.assertMultiLineEqual(expected, self.buffer.getvalue()) def test_print_selector_pattern_capture(self): node = self.parse_line('x + 1') expected = "arith_expr < 'x' op='+' '1' > \n" print_selector_pattern(node, {'op': node.children[1]}) self.assertMultiLineEqual(expected, self.buffer.getvalue()) def test_print_selector_pattern_capture_list(self): node = self.parse_line('x + 1') expected = "arith_expr < 'x' rest='+' rest='1' > \n" print_selector_pattern(node, {'rest': node.children[1:]}) self.assertMultiLineEqual(expected, self.buffer.getvalue())
def test_average_regions_start(): outfile = NamedTemporaryFile(suffix='.npz', prefix='average_region', delete=False) matrix = (ROOT + 'small_test_matrix.cool') bed_file = (ROOT + 'hicAverageRegions/regions_multi.bed') args = '--matrix {} --regions {} -o {} --range 100000 100000 -cb {}'.format(matrix, bed_file, outfile.name, 'start').split() log.debug('path: {}'.format(matrix)) compute(hicAverageRegions.main, args, 5) test_file = load_npz((ROOT + 'hicAverageRegions/regions_multi_start.npz')) new_file = load_npz(outfile.name) nt.assert_almost_equal(test_file.data, new_file.data, decimal=0) os.remove(outfile.name)
_os(*metadata.platforms) (MS_XSL, XML_FILE, XSL_FILE) def main(): common.log('MsXsl Beacon') (server, ip, port) = common.serve_web() common.clear_web_cache() new_callback = (' % (ip, port)) common.log(('Updating the callback to %s' % new_callback)) common.patch_regex(XSL_FILE, common.CALLBACK_REGEX, new_callback) common.execute([MS_XSL, XML_FILE, XSL_FILE]) server.shutdown()
def test_empty_search_finds_everything(data_client, es_version, commit_search_cls): cs = commit_search_cls() r = cs.execute() assert (r.hits.total.value == 52) assert ([('elasticsearch_dsl', 40, False), ('test_elasticsearch_dsl', 35, False), ('elasticsearch_dsl/query.py', 19, False), ('test_elasticsearch_dsl/test_search.py', 15, False), ('elasticsearch_dsl/utils.py', 14, False), ('test_elasticsearch_dsl/test_query.py', 13, False), ('elasticsearch_dsl/search.py', 12, False), ('elasticsearch_dsl/aggs.py', 11, False), ('test_elasticsearch_dsl/test_result.py', 5, False), ('elasticsearch_dsl/result.py', 3, False)] == r.facets.files) assert ([(datetime(2014, 3, 3, 0, 0), 2, False), (datetime(2014, 3, 4, 0, 0), 1, False), (datetime(2014, 3, 5, 0, 0), 3, False), (datetime(2014, 3, 6, 0, 0), 3, False), (datetime(2014, 3, 7, 0, 0), 9, False), (datetime(2014, 3, 10, 0, 0), 2, False), (datetime(2014, 3, 15, 0, 0), 4, False), (datetime(2014, 3, 21, 0, 0), 2, False), (datetime(2014, 3, 23, 0, 0), 2, False), (datetime(2014, 3, 24, 0, 0), 10, False), (datetime(2014, 4, 20, 0, 0), 2, False), (datetime(2014, 4, 22, 0, 0), 2, False), (datetime(2014, 4, 25, 0, 0), 3, False), (datetime(2014, 4, 26, 0, 0), 2, False), (datetime(2014, 4, 27, 0, 0), 2, False), (datetime(2014, 5, 1, 0, 0), 2, False), (datetime(2014, 5, 2, 0, 0), 1, False)] == r.facets.frequency) assert ([('ok', 19, False), ('good', 14, False), ('better', 19, False)] == r.facets.deletions)
class RCR(Layout): cutlass_layout_a = 'cutlass::layout::RowMajor' cutlass_layout_b = 'cutlass::layout::ColumnMajor' cutlass_layout_c = 'cutlass::layout::RowMajor' stride_a = 'K' stride_b = 'K' stride_c = 'N' args_parser = '\n int64_t a_dim0 = M;\n int64_t a_dim1 = K;\n int64_t b_dim0 = N;\n int64_t b_dim1 = K;\n int64_t c_dim0 = M;\n int64_t c_dim1 = N;\n' def fproc_op(op): import cutlass_lib row_major = cutlass_lib.library.LayoutType.RowMajor op.C.layout = row_major def fcond_op(op): import cutlass_lib row_major = cutlass_lib.library.LayoutType.RowMajor col_major = cutlass_lib.library.LayoutType.ColumnMajor return ((op.A.layout == row_major) and (op.B.layout == col_major)) def cutlass_lib_layouts(): import cutlass_lib return [cutlass_lib.library.LayoutType.RowMajor, cutlass_lib.library.LayoutType.ColumnMajor, cutlass_lib.library.LayoutType.RowMajor]
def version_is_compatible(version: Union[(str, semver.Version)], other: Union[(str, semver.Version)], forgiving: bool=False) -> bool: version_is_semver = True try: if isinstance(version, str): version = semver.Version.parse(version) except ValueError: version_is_semver = forgiving version = _coerce_version(version) other_is_semver = True try: if isinstance(other, str): other = semver.Version.parse(other) except ValueError: other_is_semver = forgiving other = _coerce_version(other) if (version_is_semver ^ other_is_semver): return False if ((not version_is_semver) and (not other_is_semver)): try: return (packaging.version.Version(version) == packaging.version.Version(other)) except packaging.version.InvalidVersion as invalid_version: raise ValueError from invalid_version return version.is_compatible(other)
.compilertest def test_errorresponse_onemapper_onstatuscode_textformat_contenttype(): _test_errorresponse_onemapper_onstatuscode_textformat_contenttype('503', 'oops', 'text/what') _test_errorresponse_onemapper_onstatuscode_textformat_contenttype('429', '<html>too fast, too furious on host %REQ(:authority)%</html>', 'text/html') _test_errorresponse_onemapper_onstatuscode_textformat_contenttype('404', "{'error':'notfound'}", 'application/json')
class IEEE802_15_4(object): def __init__(self, ioservers=[]): self.ioservers = [] self.host_socket = None for server in ioservers: self.add_server(server) def add_server(self, ioserver): self.ioservers.append(ioserver) ioserver.register_topic('Peripheral.IEEE802_15_4.tx_frame', self.received_frame) def received_frame(self, from_server, msg): for server in self.ioservers: if (server != from_server): log.info('Forwarding, msg') server.send_msg('Peripheral.IEEE802_15_4.rx_frame', msg) if (self.host_socket is not None): frame = msg['frame'] self.host_socket.send(frame) def shutdown(self): for server in self.ioservers: server.shutdown()
class BudgetEntryTestCase(BudgetTestBase): def test_budget_argument_is_skipped(self): with pytest.raises(TypeError) as cm: BudgetEntry(amount=10.0) assert (str(cm.value) == 'BudgetEntry.budget should be a Budget instance, not NoneType') def test_budget_argument_is_none(self): with pytest.raises(TypeError) as cm: BudgetEntry(budget=None, amount=10.0) assert (str(cm.value) == 'BudgetEntry.budget should be a Budget instance, not NoneType') def test_budget_attribute_is_set_to_none(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) with pytest.raises(TypeError) as cm: entry.budget = None assert (str(cm.value) == 'BudgetEntry.budget should be a Budget instance, not NoneType') def test_budget_argument_is_not_a_budget_instance(self): with pytest.raises(TypeError) as cm: BudgetEntry(budget='not a budget', amount=10.0) assert (str(cm.value) == 'BudgetEntry.budget should be a Budget instance, not str') def test_budget_attribute_is_not_a_budget_instance(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=10.0) with pytest.raises(TypeError) as cm: entry.budget = 'not a budget instance' assert (str(cm.value) == 'BudgetEntry.budget should be a Budget instance, not str') def test_budget_argument_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=10.0) assert (entry.budget == self.test_budget) def test_budget_attribute_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=10.0) new_budget = Budget(name='Test Budget', project=self.test_project, status_list=self.budget_status_list) assert (entry.budget != new_budget) entry.budget = new_budget assert (entry.budget == new_budget) def test_cost_attribute_value_will_be_copied_from_the_supplied_good_argument(self): good = Good(name='Some Good', cost=10, msrp=20, unit='$/hour') entry = BudgetEntry(budget=self.test_budget, good=good) assert (entry.cost == good.cost) def test_cost_attribute_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) assert (entry.cost == self.test_good.cost) entry.cost = None assert (entry.cost == 0.0) def test_cost_attribute_is_not_a_number(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) with pytest.raises(TypeError) as cm: entry.cost = 'some string' assert (str(cm.value) == 'BudgetEntry.cost should be a number, not str') def test_cost_attribute_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) test_value = 5.0 assert (entry.cost != test_value) entry.cost = test_value assert (entry.cost == test_value) def test_msrp_attribute_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) assert (entry.msrp == self.test_good.msrp) entry.msrp = None assert (entry.msrp == 0.0) def test_msrp_attribute_is_not_a_number(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) with pytest.raises(TypeError) as cm: entry.msrp = 'some string' assert (str(cm.value) == 'BudgetEntry.msrp should be a number, not str') def test_msrp_attribute_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) test_value = 5.0 assert (entry.msrp != test_value) entry.msrp = test_value assert (entry.msrp == test_value) def test_msrp_attribute_value_will_be_copied_from_the_supplied_good_argument(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) assert (entry.msrp == self.test_good.msrp) def test_price_argument_is_skipped(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) assert (entry.price == 0.0) def test_price_argument_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, price=None) assert (entry.price == 0.0) def test_price_attribute_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, price=10.0) assert (entry.price == 10.0) entry.price = None assert (entry.price == 0.0) def test_price_argument_is_not_a_number(self): with pytest.raises(TypeError) as cm: BudgetEntry(budget=self.test_budget, good=self.test_good, price='some string') assert (str(cm.value) == 'BudgetEntry.price should be a number, not str') def test_price_attribute_is_not_a_number(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, price=10) with pytest.raises(TypeError) as cm: entry.price = 'some string' assert (str(cm.value) == 'BudgetEntry.price should be a number, not str') def test_price_argument_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, price=10) assert (entry.price == 10.0) def test_price_attribute_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, price=10) test_value = 5.0 assert (entry.price != test_value) entry.price = test_value assert (entry.price == test_value) def test_realized_total_argument_is_skipped(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) assert (entry.realized_total == 0.0) def test_realized_total_argument_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, realized_total=None) assert (entry.realized_total == 0.0) def test_realized_total_attribute_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, realized_total=10.0) assert (entry.realized_total == 10.0) entry.realized_total = None assert (entry.realized_total == 0.0) def test_realized_total_argument_is_not_a_number(self): with pytest.raises(TypeError) as cm: BudgetEntry(budget=self.test_budget, good=self.test_good, realized_total='some string') assert (str(cm.value) == 'BudgetEntry.realized_total should be a number, not str') def test_realized_total_attribute_is_not_a_number(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, realized_total=10) with pytest.raises(TypeError) as cm: entry.realized_total = 'some string' assert (str(cm.value) == 'BudgetEntry.realized_total should be a number, not str') def test_realized_total_argument_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, realized_total=10) assert (entry.realized_total == 10.0) def test_realized_total_attribute_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, realized_total=10) test_value = 5.0 assert (entry.realized_total != test_value) entry.realized_total = test_value assert (entry.realized_total == test_value) def test_unit_attribute_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) assert (entry.unit == self.test_good.unit) entry.unit = None assert (entry.unit == '') def test_unit_attribute_is_not_a_string(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) with pytest.raises(TypeError) as cm: entry.unit = 100.212 assert (str(cm.value) == 'BudgetEntry.unit should be a string, not float') def test_unit_attribute_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) test_value = 'TL/hour' assert (entry.unit != test_value) entry.unit = test_value assert (entry.unit == test_value) def test_unit_attribute_value_will_be_copied_from_the_supplied_good(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) assert (entry.unit == self.test_good.unit) def test_amount_argument_is_skipped(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good) assert (entry.amount == 0.0) def test_amount_argument_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=None) assert (entry.amount == 0.0) def test_amount_attribute_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=10.0) assert (entry.amount == 10.0) entry.amount = None assert (entry.amount == 0.0) def test_amount_argument_is_not_a_number(self): with pytest.raises(TypeError) as cm: BudgetEntry(budget=self.test_budget, good=self.test_good, amount='some string') assert (str(cm.value) == 'BudgetEntry.amount should be a number, not str') def test_amount_attribute_is_not_a_number(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=10) with pytest.raises(TypeError) as cm: entry.amount = 'some string' assert (str(cm.value) == 'BudgetEntry.amount should be a number, not str') def test_amount_argument_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=10) assert (entry.amount == 10.0) def test_amount_attribute_is_working_properly(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=10) test_value = 5.0 assert (entry.amount != test_value) entry.amount = test_value assert (entry.amount == test_value) def test_good_argument_is_skipped(self): with pytest.raises(TypeError) as cm: BudgetEntry(budget=self.test_budget) assert (str(cm.value) == 'BudgetEntry.good should be a stalker.models.budget.Good instance, not NoneType') def test_good_argument_is_None(self): with pytest.raises(TypeError) as cm: BudgetEntry(budget=self.test_budget, good=None, amount=53) assert (str(cm.value) == 'BudgetEntry.good should be a stalker.models.budget.Good instance, not NoneType') def test_good_attribute_is_set_to_None(self): entry = BudgetEntry(budget=self.test_budget, good=Good(name='Some Good'), amount=53) with pytest.raises(TypeError) as cm: entry.good = None assert (str(cm.value) == 'BudgetEntry.good should be a stalker.models.budget.Good instance, not NoneType') def test_good_argument_is_not_a_good_instance(self): with pytest.raises(TypeError) as cm: entry = BudgetEntry(budget=self.test_budget, good='this is not a Good instance', amount=53) assert (str(cm.value) == 'BudgetEntry.good should be a stalker.models.budget.Good instance, not str') def test_good_attribute_is_not_a_good_instance(self): entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=53) with pytest.raises(TypeError) as cm: entry.good = 'this is not a Good instance' assert (str(cm.value) == 'BudgetEntry.good should be a stalker.models.budget.Good instance, not str') def test_good_argument_is_working_properly(self): test_value = Good(name='Some Good') entry = BudgetEntry(budget=self.test_budget, good=test_value, amount=53) assert (entry.good == test_value) def test_good_attribute_is_working_properly(self): test_value = Good(name='Some Other Good') entry = BudgetEntry(budget=self.test_budget, good=self.test_good, amount=53) assert (entry.good != test_value) entry.good = test_value assert (entry.good == test_value)
def test_validate_yaml(): validator = Object(properties=Integer()) text = 'a: 123\nb: 456\n' value = validate_yaml(text, validator=validator) assert (value == {'a': 123, 'b': 456}) validator = Object(properties=Integer()) text = 'a: 123\nb: abc\n' with pytest.raises(ValidationError) as exc_info: validate_yaml(text, validator=validator) exc = exc_info.value assert (exc.messages() == [Message(text='Must be a number.', code='type', index=['b'], start_position=Position(line_no=2, column_no=4, char_index=10), end_position=Position(line_no=2, column_no=6, char_index=12))]) validator = Schema(fields={'a': Integer(), 'b': Integer()}) text = 'a: 123\nb: abc\n' with pytest.raises(ValidationError) as exc_info: validate_yaml(text, validator=validator) exc = exc_info.value assert (exc.messages() == [Message(text='Must be a number.', code='type', index=['b'], start_position=Position(line_no=2, column_no=4, char_index=10), end_position=Position(line_no=2, column_no=6, char_index=12))]) text = 'a: 123' with pytest.raises(ValidationError) as exc_info: validate_yaml(text, validator=validator) exc = exc_info.value assert (exc.messages() == [Message(text="The field 'b' is required.", code='required', index=['b'], start_position=Position(line_no=1, column_no=1, char_index=0), end_position=Position(line_no=1, column_no=6, char_index=5))])
def cached_render(fn): (fn) def render(self, region, context=None): key = (self.cache_key(region) if self.cache_key else None) if key: result = cache.get(key) if (result is not None): return result result = fn(self, region, context) if key: cache.set(key, result, timeout=self.timeout) return result return render
def get_srpm(version): if (version in srpm_cache): return srpm_cache[version] assert (0 == os.system('set -e\n mkdir -p srpm_dir && cd srpm_dir\n export dummy_version={version}\n {script_dir}/generate_qiuck_package\n '.format(script_dir=scriptdir(), version=version))) srpm_path = os.path.join(os.getcwd(), 'srpm_dir', 'quick-package-{0}-0.src.rpm'.format(version)) result = (srpm_path, Munch({'package_name': 'quick-package', 'user': 'bob', 'project': 'blah'})) srpm_cache[version] = result return result
def _find_cast_subexpressions(expression: DataflowObject) -> Iterator[UnaryOperation]: todo = [expression] while (todo and (subexpression := todo.pop())): todo.extend(subexpression) if ((not (isinstance(expression, Assignment) and (expression.destination == subexpression))) and _is_cast(subexpression)): (yield subexpression)
def create_user(date, cognito_id, email_address, character_set_preference): response = table.put_item(Item={'PK': ('USER#' + cognito_id), 'SK': ('USER#' + cognito_id), 'Email address': email_address, 'Date created': date, 'Last login': date, 'Character set preference': character_set_preference, 'User alias': 'Not set', 'User alias pinyin': 'Not set', 'User alias emoji': 'Not set', 'GSI1PK': 'USER', 'GSI1SK': ('USER#' + cognito_id)}, ConditionExpression='attribute_not_exists(PK)') return response
class bsn_debug_counter_desc_stats_reply(bsn_stats_reply): version = 5 type = 19 stats_type = 65535 experimenter = 6035143 subtype = 13 def __init__(self, xid=None, flags=None, entries=None): if (xid != None): self.xid = xid else: self.xid = None if (flags != None): self.flags = flags else: self.flags = 0 if (entries != None): self.entries = entries else: self.entries = [] return def pack(self): packed = [] packed.append(struct.pack('!B', self.version)) packed.append(struct.pack('!B', self.type)) packed.append(struct.pack('!H', 0)) packed.append(struct.pack('!L', self.xid)) packed.append(struct.pack('!H', self.stats_type)) packed.append(struct.pack('!H', self.flags)) packed.append(('\x00' * 4)) packed.append(struct.pack('!L', self.experimenter)) packed.append(struct.pack('!L', self.subtype)) packed.append(loxi.generic_util.pack_list(self.entries)) length = sum([len(x) for x in packed]) packed[2] = struct.pack('!H', length) return ''.join(packed) def unpack(reader): obj = bsn_debug_counter_desc_stats_reply() _version = reader.read('!B')[0] assert (_version == 5) _type = reader.read('!B')[0] assert (_type == 19) _length = reader.read('!H')[0] orig_reader = reader reader = orig_reader.slice(_length, 4) obj.xid = reader.read('!L')[0] _stats_type = reader.read('!H')[0] assert (_stats_type == 65535) obj.flags = reader.read('!H')[0] reader.skip(4) _experimenter = reader.read('!L')[0] assert (_experimenter == 6035143) _subtype = reader.read('!L')[0] assert (_subtype == 13) obj.entries = loxi.generic_util.unpack_list(reader, ofp.common.bsn_debug_counter_desc_stats_entry.unpack) return obj def __eq__(self, other): if (type(self) != type(other)): return False if (self.xid != other.xid): return False if (self.flags != other.flags): return False if (self.entries != other.entries): return False return True def pretty_print(self, q): q.text('bsn_debug_counter_desc_stats_reply {') with q.group(): with q.indent(2): q.breakable() q.text('xid = ') if (self.xid != None): q.text(('%#x' % self.xid)) else: q.text('None') q.text(',') q.breakable() q.text('flags = ') value_name_map = {1: 'OFPSF_REPLY_MORE'} q.text(util.pretty_flags(self.flags, value_name_map.values())) q.text(',') q.breakable() q.text('entries = ') q.pp(self.entries) q.breakable() q.text('}')
class TrackTestCase(unittest.TestCase): def test_to_xml_method_is_working_properly(self): t = Track() t.enabled = True t.locked = False f = File() f.duration = 34 f.name = 'shot2' f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov' c = Clip() c.id = 'shot2' c.start = 1 c.end = 35 c.name = 'shot2' c.enabled = True c.duration = 34 c.in_ = 0 c.out = 34 c.file = f t.clips.append(c) f = File() f.duration = 30 f.name = 'shot' f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov' c = Clip() c.id = 'shot' c.start = 35 c.end = 65 c.name = 'shot' c.enabled = True c.duration = 30 c.in_ = 0 c.out = 30 c.file = f t.clips.append(c) f = File() f.duration = 45 f.name = 'shot1' f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov' c = Clip() c.id = 'shot1' c.start = 65 c.end = 110 c.name = 'shot1' c.enabled = True c.duration = 45 c.in_ = 0 c.out = 45 c.file = f t.clips.append(c) expected_xml = '<track>\n <locked>FALSE</locked>\n <enabled>TRUE</enabled>\n <clipitem id="shot2">\n <end>35</end>\n <name>shot2</name>\n <enabled>True</enabled>\n <start>1</start>\n <in>0</in>\n <duration>34</duration>\n <out>34</out>\n <file id="shot2.mov">\n <duration>34</duration>\n <name>shot2</name>\n <pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov</pathurl>\n </file>\n </clipitem>\n <clipitem id="shot">\n <end>65</end>\n <name>shot</name>\n <enabled>True</enabled>\n <start>35</start>\n <in>0</in>\n <duration>30</duration>\n <out>30</out>\n <file id="shot.mov">\n <duration>30</duration>\n <name>shot</name>\n <pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov</pathurl>\n </file>\n </clipitem>\n <clipitem id="shot1">\n <end>110</end>\n <name>shot1</name>\n <enabled>True</enabled>\n <start>65</start>\n <in>0</in>\n <duration>45</duration>\n <out>45</out>\n <file id="shot1.mov">\n <duration>45</duration>\n <name>shot1</name>\n <pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov</pathurl>\n </file>\n </clipitem>\n</track>' self.assertEqual(expected_xml, t.to_xml()) def test_from_xml_method_is_working_properly(self): from xml.etree import ElementTree track_node = ElementTree.Element('track') locked_node = ElementTree.SubElement(track_node, 'locked') locked_node.text = 'FALSE' enabled_node = ElementTree.SubElement(track_node, 'enabled') enabled_node.text = 'TRUE' clip_node = ElementTree.SubElement(track_node, 'clipitem', attrib={'id': 'shot2'}) end_node = ElementTree.SubElement(clip_node, 'end') end_node.text = '35' name_node = ElementTree.SubElement(clip_node, 'name') name_node.text = 'shot2' enabled_node = ElementTree.SubElement(clip_node, 'enabled') enabled_node.text = 'True' start_node = ElementTree.SubElement(clip_node, 'start') start_node.text = '1' in_node = ElementTree.SubElement(clip_node, 'in') in_node.text = '0' duration_node = ElementTree.SubElement(clip_node, 'duration') duration_node.text = '34' out_node = ElementTree.SubElement(clip_node, 'out') out_node.text = '34' file_node = ElementTree.SubElement(clip_node, 'file') duration_node = ElementTree.SubElement(file_node, 'duration') duration_node.text = '34' name_node = ElementTree.SubElement(file_node, 'name') name_node.text = 'shot2' pathurl_node = ElementTree.SubElement(file_node, 'pathurl') pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov' pathurl_node.text = pathurl clip_node = ElementTree.SubElement(track_node, 'clipitem', attrib={'id': 'shot'}) end_node = ElementTree.SubElement(clip_node, 'end') end_node.text = '65' name_node = ElementTree.SubElement(clip_node, 'name') name_node.text = 'shot' enabled_node = ElementTree.SubElement(clip_node, 'enabled') enabled_node.text = 'True' start_node = ElementTree.SubElement(clip_node, 'start') start_node.text = '35' in_node = ElementTree.SubElement(clip_node, 'in') in_node.text = '0' duration_node = ElementTree.SubElement(clip_node, 'duration') duration_node.text = '30' out_node = ElementTree.SubElement(clip_node, 'out') out_node.text = '30' file_node = ElementTree.SubElement(clip_node, 'file') duration_node = ElementTree.SubElement(file_node, 'duration') duration_node.text = '30' name_node = ElementTree.SubElement(file_node, 'name') name_node.text = 'shot' pathurl_node = ElementTree.SubElement(file_node, 'pathurl') pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov' pathurl_node.text = pathurl clip_node = ElementTree.SubElement(track_node, 'clipitem', attrib={'id': 'shot1'}) end_node = ElementTree.SubElement(clip_node, 'end') end_node.text = '110' name_node = ElementTree.SubElement(clip_node, 'name') name_node.text = 'shot1' enabled_node = ElementTree.SubElement(clip_node, 'enabled') enabled_node.text = 'True' start_node = ElementTree.SubElement(clip_node, 'start') start_node.text = '65' in_node = ElementTree.SubElement(clip_node, 'in') in_node.text = '0' duration_node = ElementTree.SubElement(clip_node, 'duration') duration_node.text = '45' out_node = ElementTree.SubElement(clip_node, 'out') out_node.text = '45' file_node = ElementTree.SubElement(clip_node, 'file') duration_node = ElementTree.SubElement(file_node, 'duration') duration_node.text = '45' name_node = ElementTree.SubElement(file_node, 'name') name_node.text = 'shot1' pathurl_node = ElementTree.SubElement(file_node, 'pathurl') pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov' pathurl_node.text = pathurl t = Track() t.from_xml(track_node) self.assertEqual(False, t.locked) self.assertEqual(True, t.enabled) c = t.clips[0] self.assertEqual(35, c.end) self.assertEqual('shot2', c.name) self.assertEqual(True, c.enabled) self.assertEqual(1, c.start) self.assertEqual(0, c.in_) self.assertEqual(34, c.duration) self.assertEqual(34, c.out) f = c.file self.assertEqual(34, f.duration) self.assertEqual('shot2', f.name) self.assertEqual('file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov', f.pathurl) c = t.clips[1] self.assertEqual(65, c.end) self.assertEqual('shot', c.name) self.assertEqual(True, c.enabled) self.assertEqual(35, c.start) self.assertEqual(0, c.in_) self.assertEqual(30, c.duration) self.assertEqual(30, c.out) f = c.file self.assertEqual(30, f.duration) self.assertEqual('shot', f.name) self.assertEqual('file://localhost/home/eoyilmaz/maya/projects/default/data/shot.mov', f.pathurl) c = t.clips[2] self.assertEqual(110, c.end) self.assertEqual('shot1', c.name) self.assertEqual(True, c.enabled) self.assertEqual(65, c.start) self.assertEqual(0, c.in_) self.assertEqual(45, c.duration) self.assertEqual(45, c.out) f = c.file self.assertEqual(45, f.duration) self.assertEqual('shot1', f.name) self.assertEqual('file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov', f.pathurl) def test_optimize_clips_is_working_properly(self): t = Track() t.enabled = True t.locked = False f = File() f.duration = 34 f.name = 'shot2' f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov' c = Clip() c.id = 'shot2' c.start = 1 c.end = 35 c.name = 'shot2' c.enabled = True c.duration = 34 c.in_ = 0 c.out = 34 c.file = f t.clips.append(c) f = File() f.duration = 30 f.name = 'shot' f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov' c = Clip() c.id = 'shot' c.start = 35 c.end = 65 c.name = 'shot' c.enabled = True c.duration = 30 c.in_ = 0 c.out = 30 c.file = f t.clips.append(c) f = File() f.duration = 45 f.name = 'shot1' f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov' c = Clip() c.id = 'shot1' c.start = 65 c.end = 110 c.name = 'shot1' c.enabled = True c.duration = 45 c.in_ = 0 c.out = 45 c.file = f t.clips.append(c) self.assertNotEqual(t.clips[0].file, t.clips[1].file) self.assertNotEqual(t.clips[0].file, t.clips[2].file) self.assertNotEqual(t.clips[1].file, t.clips[2].file) t.optimize_clips() self.assertEqual(t.clips[0].file, t.clips[1].file) self.assertNotEqual(t.clips[0].file, t.clips[2].file) self.assertNotEqual(t.clips[1].file, t.clips[2].file) def test_to_xml_method_with_optimized_clips_is_working_properly(self): t = Track() t.enabled = True t.locked = False f = File() f.duration = 34 f.name = 'shot2' f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov' c = Clip() c.id = 'shot2' c.start = 1 c.end = 35 c.name = 'shot2' c.enabled = True c.duration = 34 c.in_ = 0 c.out = 34 c.file = f t.clips.append(c) f = File() f.duration = 30 f.name = 'shot' f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov' c = Clip() c.id = 'shot2' c.start = 35 c.end = 65 c.name = 'shot2' c.enabled = True c.duration = 30 c.in_ = 0 c.out = 30 c.file = f t.clips.append(c) f = File() f.duration = 45 f.name = 'shot1' f.pathurl = 'file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov' c = Clip() c.id = 'shot1' c.start = 65 c.end = 110 c.name = 'shot1' c.enabled = True c.duration = 45 c.in_ = 0 c.out = 45 c.file = f t.clips.append(c) expected_xml = '<track>\n <locked>FALSE</locked>\n <enabled>TRUE</enabled>\n <clipitem id="shot2">\n <end>35</end>\n <name>shot2</name>\n <enabled>True</enabled>\n <start>1</start>\n <in>0</in>\n <duration>34</duration>\n <out>34</out>\n <file id="shot2.mov">\n <duration>34</duration>\n <name>shot2</name>\n <pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot2.mov</pathurl>\n </file>\n </clipitem>\n <clipitem id="shot2 2">\n <end>65</end>\n <name>shot2</name>\n <enabled>True</enabled>\n <start>35</start>\n <in>0</in>\n <duration>30</duration>\n <out>30</out>\n <file id="shot2.mov"/>\n </clipitem>\n <clipitem id="shot1">\n <end>110</end>\n <name>shot1</name>\n <enabled>True</enabled>\n <start>65</start>\n <in>0</in>\n <duration>45</duration>\n <out>45</out>\n <file id="shot1.mov">\n <duration>45</duration>\n <name>shot1</name>\n <pathurl>file://localhost/home/eoyilmaz/maya/projects/default/data/shot1.mov</pathurl>\n </file>\n </clipitem>\n</track>' t.optimize_clips() self.assertEqual(expected_xml, t.to_xml())
def variate(oid, tag, value, **context): if ((not context['nextFlag']) and (not context['exactMatch'])): return (context['origOid'], tag, context['errorStatus']) if ('settings' not in recordContext): recordContext['settings'] = dict([split(x, '=') for x in split(value, ',')]) if ('vlist' in recordContext['settings']): vlist = {} recordContext['settings']['vlist'] = split(recordContext['settings']['vlist'], ':') while recordContext['settings']['vlist']: (o, v, e) = recordContext['settings']['vlist'][:3] vl = recordContext['settings']['vlist'][3:] recordContext['settings']['vlist'] = vl (type_tag, _) = SnmprecRecord.unpack_tag(tag) v = SnmprecGrammar.TAG_MAP[type_tag](v) if (o not in vlist): vlist[o] = {} if (o == 'eq'): vlist[o][v] = e elif (o in ('lt', 'gt')): vlist[o] = (v, e) else: log.info(('writecache: bad vlist syntax: %s' % recordContext['settings']['vlist'])) recordContext['settings']['vlist'] = vlist if ('status' in recordContext['settings']): st = recordContext['settings']['status'].lower() recordContext['settings']['status'] = st if (oid not in moduleContext): moduleContext[oid] = {} (type_tag, _) = SnmprecRecord.unpack_tag(tag) moduleContext[oid]['type'] = SnmprecGrammar.TAG_MAP[type_tag]() text_oid = str(oid) if context['setFlag']: if ('vlist' in recordContext['settings']): if (('eq' in recordContext['settings']['vlist']) and (context['origValue'] in recordContext['settings']['vlist']['eq'])): e = recordContext['settings']['vlist']['eq'][context['origValue']] elif (('lt' in recordContext['settings']['vlist']) and (context['origValue'] < recordContext['settings']['vlist']['lt'][0])): e = recordContext['settings']['vlist']['lt'][1] elif (('gt' in recordContext['settings']['vlist']) and (context['origValue'] > recordContext['settings']['vlist']['gt'][0])): e = recordContext['settings']['vlist']['gt'][1] else: e = None if (e in ERROR_TYPES): idx = max(0, ((context['varsTotal'] - context['varsRemaining']) - 1)) raise ERROR_TYPES[e](name=oid, idx=idx) if moduleContext[oid]['type'].isSameTypeWith(context['origValue']): moduleContext['cache'][text_oid] = context['origValue'] else: return (context['origOid'], tag, context['errorStatus']) if ('status' in recordContext['settings']): if (('op' not in recordContext['settings']) or (recordContext['settings']['op'] == 'any') or ((recordContext['settings']['op'] == 'set') and context['setFlag']) or ((recordContext['settings']['op'] == 'get') and (not context['setFlag']))): e = recordContext['settings']['status'] if (e in ERROR_TYPES): idx = max(0, ((context['varsTotal'] - context['varsRemaining']) - 1)) raise ERROR_TYPES[e](name=oid, idx=idx) if (text_oid in moduleContext['cache']): return (oid, tag, moduleContext['cache'][text_oid]) elif ('hexvalue' in recordContext['settings']): return (oid, tag, moduleContext[oid]['type'].clone(hexValue=recordContext['settings']['hexvalue'])) elif ('value' in recordContext['settings']): return (oid, tag, moduleContext[oid]['type'].clone(recordContext['settings']['value'])) else: return (oid, tag, context['errorStatus'])
class Locals(LimitedTestCase): def passthru(self, *args, **kw): self.results.append((args, kw)) return (args, kw) def setUp(self): self.results = [] super().setUp() def tearDown(self): self.results = [] super().tearDown() def test_assignment(self): my_local = corolocal.local() my_local.a = 1 def do_something(): my_local.b = 2 self.assertEqual(my_local.b, 2) try: my_local.a self.fail() except AttributeError: pass eventlet.spawn(do_something).wait() self.assertEqual(my_local.a, 1) def test_calls_init(self): init_args = [] class Init(corolocal.local): def __init__(self, *args): init_args.append((args, eventlet.getcurrent())) my_local = Init(1, 2, 3) self.assertEqual(init_args[0][0], (1, 2, 3)) self.assertEqual(init_args[0][1], eventlet.getcurrent()) def do_something(): my_local.foo = 'bar' self.assertEqual(len(init_args), 2, init_args) self.assertEqual(init_args[1][0], (1, 2, 3)) self.assertEqual(init_args[1][1], eventlet.getcurrent()) eventlet.spawn(do_something).wait() def test_calling_methods(self): class Caller(corolocal.local): def callme(self): return self.foo my_local = Caller() my_local.foo = 'foo1' self.assertEqual('foo1', my_local.callme()) def do_something(): my_local.foo = 'foo2' self.assertEqual('foo2', my_local.callme()) eventlet.spawn(do_something).wait() my_local.foo = 'foo3' self.assertEqual('foo3', my_local.callme()) def test_no_leaking(self): refs = weakref.WeakKeyDictionary() my_local = corolocal.local() class X(): pass def do_something(i): o = X() refs[o] = True my_local.foo = o p = eventlet.GreenPool() for i in range(100): p.spawn(do_something, i) p.waitall() del p gc.collect() eventlet.sleep(0) gc.collect() self.assertEqual(len(refs), 1)
class TypeMutualAuthentication(ModelSimple): allowed_values = {('value',): {'MUTUAL_AUTHENTICATION': 'mutual_authentication'}} validations = {} additional_properties_type = None _nullable = False _property def openapi_types(): return {'value': (str,)} _property def discriminator(): return None attribute_map = {} read_only_vars = set() _composed_schemas = None required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes']) _js_args_to_python_args def __init__(self, *args, **kwargs): _path_to_item = kwargs.pop('_path_to_item', ()) if ('value' in kwargs): value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: value = 'mutual_authentication' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.value = value if kwargs: raise ApiTypeError(('Invalid named arguments=%s passed to %s. Remove those invalid named arguments.' % (kwargs, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) _js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): _path_to_item = kwargs.pop('_path_to_item', ()) self = super(OpenApiModel, cls).__new__(cls) if ('value' in kwargs): value = kwargs.pop('value') elif args: args = list(args) value = args.pop(0) else: value = 'mutual_authentication' _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) self.value = value if kwargs: raise ApiTypeError(('Invalid named arguments=%s passed to %s. Remove those invalid named arguments.' % (kwargs, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) return self
('/chat', methods=['POST']) def chat(): message = request.form['message'] parse = nltk_stanford_parse(message) (response, terms) = question_type(parse) if (response == 'default'): answer = 'Sorry, did you have a question about measurements \n or recipes?\n ' elif (response == 'quantity'): if (len(terms) < 2): answer = "I can convert between units of measurement if \n you ask 'How many x are in a y?' " else: (units, source, dest) = conversion(terms[0], terms[1]) if (units == None): answer = "Sorry, I don't know that conversion." else: engine = inflect.engine() answer = 'There are {} {} in a {}.'.format(units, engine.plural(source), dest) elif (response == 'recipe'): if (len(terms) == 0): answer = "I can suggest some recipes if you ask 'What \n can I make with x, y, z...?' " else: (recs, build_time) = suggest_recipe(message) answer = 'Here are a few recipes you might like: ' for (idx, rec) in enumerate(recs): answer += ((' #{}. '.format((idx + 1)) + rec) + ' ') return json.dumps({'sender': 'bot', 'message': answer, 'timestamp': datetime.now().strftime('%-I:%M %p')})
def test_asyncio_thread1(): r = [] r.append(asyncio.get_event_loop()) t = threading.Thread(target=append_current_loop, args=(r, False)) t.start() t.join() t = threading.Thread(target=append_current_loop, args=(r, True)) t.start() t.join() r.append(asyncio.get_event_loop()) assert (len(r) == 4) assert (isinstance(r[1], str) and ('no current event loop in thread' in r[1])) assert (r[0] is not r[2]) assert (r[0] is r[3]) return r
def extractSugaminnyjpWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('The Eternal World', 'The Eternal World', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
def enable_mxnet(): warn_msg = 'Built-in MXNet support will be removed in Thinc v9. If you need MXNet support in the future, you can transition to using a custom copy of the current MXNetWrapper in your package or project.' warnings.warn(warn_msg, DeprecationWarning) global mxnet, has_mxnet import mxnet has_mxnet = True
def execute(mh, options, extra_options, back_end, process_slx=True, process_tests=False): assert isinstance(mh, errors.Message_Handler) assert isinstance(back_end, MISS_HIT_Back_End) try: if options.entry_point: cfg_tree.register_item(mh, pathutil.abspath('.'), options) prj_root = cfg_tree.get_root(pathutil.abspath('.')) cfg_tree.register_item(mh, prj_root, options) cfg_tree.validate_project_config(mh) n_ep = cfg_tree.get_entry_point(options.entry_point) if (n_ep is None): mh.command_line_error(("Entry point or library '%s' does not exist." % options.entry_point)) item_list = [(False, item) for item in cfg_tree.get_source_path(n_ep)] if process_tests: item_list += [(True, item) for item in cfg_tree.get_test_path(n_ep)] if options.debug_show_path: print('Using the following PATH:') for (_, path) in item_list: print(('> %s' % os.path.relpath(path))) code_in_path = set() test_in_path = set() for (in_test_dir, path_root) in item_list: container = (test_in_path if in_test_dir else code_in_path) for (path, dirs, files) in os.walk(path_root): container.add(os.path.normpath(path)) for f in files: if has_relevant_extension(f): container.add(os.path.normpath(os.path.join(path, f))) irrelevant_dirs = set((d for d in dirs if (not (d.startswith('+') or d.startswith('') or (d == 'private'))))) for idir in irrelevant_dirs: dirs.remove(idir) if options.files: item_list = [] for item in options.files: if (pathutil.abspath(item) in code_in_path): item_list.append((False, item)) elif (pathutil.abspath(item) in test_in_path): item_list.append((True, item)) else: mh.command_line_error(("'%s' is not part of entry point %s" % (item, options.entry_point))) else: item_list = [(False, item) for item in sorted(code_in_path) if has_relevant_extension(item)] item_list += [(True, item) for item in sorted(test_in_path) if has_relevant_extension(item)] item_list = [(in_test_dir, os.path.relpath(item)) for (in_test_dir, item) in item_list] else: if options.files: item_list = [(False, item) for item in list(options.files)] else: item_list = [(False, '.')] for (in_test_dir, item) in item_list: if (os.path.isdir(item) or os.path.isfile(item)): cfg_tree.register_item(mh, pathutil.abspath(item), options) mh.reset_seen() except errors.Error: mh.summary_and_exit() work_list = [] for (in_test_dir, item) in item_list: if os.path.isdir(item): for (path, dirs, files) in os.walk(item): dirs.sort() for excluded_dir in cfg_tree.get_excluded_directories(path): dirs.remove(excluded_dir) hidden_dirs = [d for d in dirs if d.startswith('.')] for hidden_dir in hidden_dirs: dirs.remove(hidden_dir) if (path == '.'): path = '' for f in sorted(files): if (not os.path.isfile(os.path.join(path, f))): continue if has_relevant_extension(f, process_slx): work_list.append(work_package.create(in_test_dir, os.path.join(path, f), options.input_encoding, mh, options, extra_options)) elif has_relevant_extension(item, process_slx): work_list.append(work_package.create(in_test_dir, item, options.input_encoding, mh, options, extra_options)) else: pass process_fn = functools.partial(dispatch_wp, back_end.process_wp, back_end.process_simulink_wp) if options.single: for wp in work_list: for result in process_fn(wp): assert isinstance(result, work_package.Result) mh.integrate(result.wp.mh) if result.processed: mh.finalize_file(result.wp.filename) back_end.process_result(result) else: with multiprocessing.Pool() as pool: for results in pool.imap(process_fn, work_list, 5): for result in results: assert isinstance(result, work_package.Result) mh.integrate(result.wp.mh) if result.processed: mh.finalize_file(result.wp.filename) back_end.process_result(result) back_end.post_process() mh.summary_and_exit()
class AinneveTestMixin(): def setUp(self): super().setUp() self.account.permissions.remove('Developer') self.helmet = create.create_object(Helmet, key='helmet') self.shield = create.create_object(Shield, key='shield') self.armor = create.create_object(ArmorObject, key='armor') self.weapon = create.create_object(WeaponObject, key='weapon') self.big_weapon = create.create_object(WeaponObject, key='big_weapon', attributes=[('inventory_use_slot', enums.WieldLocation.TWO_HANDS)]) self.item = create.create_object(Object, key='backpack item')
def validate_deserialize(cls_body): print('Starting process') mqueue = multiprocessing.Queue() proc = multiprocessing.Process(target=client_thread, args=(mqueue,)) proc.start() proc.join() print('Process generated class') message_bytes = mqueue.get() print('Retrieved bytes:', len(message_bytes)) ser_obj_out = msgpack.unpackb(message_bytes, use_list=True, encoding='utf-8') ret = cls_body.call_code(ser_obj_out) cls_body.log.info('Return size: %s', len(ret)) return ret
def power_spectral_density_von_karman(r0, L0): def func(grid): u = (grid.as_('polar').r + 1e-10) u0 = ((2 * np.pi) / L0) res = ((0.0229 * ((((u ** 2) + (u0 ** 2)) / ((2 * np.pi) ** 2)) ** ((- 11) / 6.0))) * (r0 ** ((- 5) / 3))) res[(u < 1e-09)] = 0 return Field(res, grid) return func
class Required(BranchPattern): def match(self, left: List['Pattern'], collected: List['Pattern']=None) -> Any: collected = ([] if (collected is None) else collected) original_collected = collected original_left = left for pattern in self.children: (matched, left, collected) = pattern.match(left, collected) if (not matched): return (False, original_left, original_collected) return (True, left, collected)
def main(): parser = argparse.ArgumentParser(description='LiteEth Bench on ColorLight 5A-75B') parser.add_argument('--build', action='store_true', help='Build bitstream') parser.add_argument('--load', action='store_true', help='Load bitstream') args = parser.parse_args() soc = BenchSoC() builder = Builder(soc, csr_csv='csr.csv') builder.build(run=args.build) if args.load: prog = soc.platform.create_programmer() prog.load_bitstream(os.path.join(builder.gateware_dir, (soc.build_name + '.svf')))
.parametrize('primitive, hexstr, text, validator_address, expected_signable', ((b'', None, None, (b'\xff' * 20), SignableMessage(b'\x00', (b'\xff' * 20), b'')), (b'', None, None, '0xFFfFfFffFFfffFFfFFfFFFFFffFFFffffFfFFFfF', SignableMessage(b'\x00', (b'\xff' * 20), b'')), (None, '0x', None, (b'\xff' * 20), SignableMessage(b'\x00', (b'\xff' * 20), b'')), (None, '', None, (b'\xff' * 20), SignableMessage(b'\x00', (b'\xff' * 20), b'')), (None, None, '0x', (b'\xff' * 20), SignableMessage(b'\x00', (b'\xff' * 20), b'0x')), (None, None, '', (b'\xff' * 20), SignableMessage(b'\x00', (b'\xff' * 20), b'')))) def test_encode_intended_validator(primitive, hexstr, text, validator_address, expected_signable): signable_message = encode_intended_validator(validator_address, primitive, hexstr=hexstr, text=text) assert (signable_message == expected_signable)
def go_to_goal(goal): global robot_location, robot_rotation d = Distance_compute(robot_location, goal) theta = robot_rotation[2] kl = 1 ka = 4 vx = 0 va = 0 heading = math.atan2((goal[1] - robot_location[1]), (goal[0] - robot_location[0])) err_theta = (heading - theta) if (d > 0.01): vx = (kl * abs(d)) vx = 1 if (abs(err_theta) > 0.01): va = (ka * err_theta) vel_1 = rospy.Publisher('/cmd_vel', geometry_msgs.msg.Twist, queue_size=10) cmd = geometry_msgs.msg.Twist() cmd.linear.x = vx cmd.angular.z = va vel_1.publish(cmd)
def test_matcher_set_value_operator(en_vocab): matcher = Matcher(en_vocab) pattern = [{'ORTH': {'IN': ['a', 'the']}, 'OP': '?'}, {'ORTH': 'house'}] matcher.add('DET_HOUSE', [pattern]) doc = Doc(en_vocab, words=['In', 'a', 'house']) matches = matcher(doc) assert (len(matches) == 1) doc = Doc(en_vocab, words=['my', 'house']) matches = matcher(doc) assert (len(matches) == 1)
_group.command('remove-model') ('model-id', required=False) _context def remove_model(ctx: click.Context, model_id): es_client = MlClient(ctx.obj['es']) model_ids = MachineLearningClient.get_existing_model_ids(ctx.obj['es']) if (not model_id): model_id = click.prompt('Model ID to remove', type=click.Choice(model_ids)) try: result = es_client.delete_trained_model(model_id) except elasticsearch.ConflictError as e: click.echo(f'{e}: try running `remove-scripts-pipelines` first') ctx.exit(1) table = Table.from_list(['model_id', 'status'], [{'model_id': model_id, 'status': result}]) click.echo(table) return result
def test_kronos_devices_list_devices(client: TestClient, with_registered_device: None): from tests.test_database_models import SAMPLE_DEVICE_HID response = client.get('/api/v1/kronos/devices') assert (response.status_code == 200) gateways = response.json()['data'] assert (len(gateways) == 1) assert (gateways[0]['hid'] == SAMPLE_DEVICE_HID)
def test_agent_configuration_dump_multipage(): loader = ConfigLoaders.from_package_type(PackageType.AGENT) agent_config = loader.load(Path(CUR_PATH, 'data', 'aea-config.example_multipage.yaml').open()) assert (agent_config.agent_name == 'myagent') assert (agent_config.author == 'fetchai') assert (len(agent_config.component_configurations) == 1) fp = io.StringIO() loader.dump(agent_config, fp) fp.seek(0) agent_config = yaml_load_all(fp) assert (agent_config[0]['agent_name'] == 'myagent') assert (agent_config[1]['public_id'] == 'dummy_author/dummy:0.1.0') assert (agent_config[1]['type'] == 'skill')
.parallel(nprocs=3) def test_poisson_mixed_parallel_fieldsplit(): x = poisson_mixed(3, parameters={'ksp_type': 'fgmres', 'pc_type': 'fieldsplit', 'pc_fieldsplit_type': 'schur', 'fieldsplit_schur_fact_type': 'diag', 'fieldsplit_0_ksp_type': 'preonly', 'fieldsplit_1_ksp_type': 'cg', 'fieldsplit_0_pc_type': 'bjacobi', 'fieldsplit_0_sub_pc_type': 'icc', 'fieldsplit_1_pc_type': 'none'}) assert (x < 2e-05)
def is_azure_chat(kwargs): if (not ('api_config' in kwargs)): return False api_config = kwargs['api_config'] if (not ('api_type' in api_config)): return (os.environ.get('OPENAI_API_TYPE', 'azure') == 'azure-chat') return (('api_type' in api_config) and ('azure-chat' in api_config.get('api_type', '')))
class OptionPlotoptionsNetworkgraphSonificationDefaultspeechoptionsMappingPlaydelay(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class OptionSeriesPyramid3dAccessibility(Options): def description(self): return self._config_get(None) def description(self, text: str): self._config(text, js_type=False) def descriptionFormat(self): return self._config_get(None) def descriptionFormat(self, text: str): self._config(text, js_type=False) def enabled(self): return self._config_get(None) def enabled(self, flag: bool): self._config(flag, js_type=False) def exposeAsGroupOnly(self): return self._config_get(None) def exposeAsGroupOnly(self, flag: bool): self._config(flag, js_type=False) def keyboardNavigation(self) -> 'OptionSeriesPyramid3dAccessibilityKeyboardnavigation': return self._config_sub_data('keyboardNavigation', OptionSeriesPyramid3dAccessibilityKeyboardnavigation) def point(self) -> 'OptionSeriesPyramid3dAccessibilityPoint': return self._config_sub_data('point', OptionSeriesPyramid3dAccessibilityPoint)
def extract_tar_file(path: PathIn, dest: PathIn, *, autodelete: bool=False, content_paths: (Iterable[tarfile.TarInfo] | None)=None) -> None: path = _get_path(path) dest = _get_path(dest) assert_file(path) assert_not_file(dest) make_dirs(dest) with tarfile.TarFile(path, 'r') as file: file.extractall(dest, members=content_paths) if autodelete: remove_file(path)
class FileEntry(): __slots__ = ('path', 'size', 'mtime') def __init__(self, path, stat_cache=None): self.path = path stat_function = (os.stat if (stat_cache is None) else stat_cache.__getitem__) stat = self.stat_regular_file(path, stat_function) self.size = stat.st_size self.mtime = stat.st_mtime def stat_regular_file(path, stat_function): try: stat_result = stat_function(path) except KeyError: raise MissingFileError(path) except OSError as e: if (e.errno in (errno.ENOENT, errno.ENAMETOOLONG)): raise MissingFileError(path) else: raise if (not stat.S_ISREG(stat_result.st_mode)): if stat.S_ISDIR(stat_result.st_mode): raise IsDirectoryError(f'Path is a directory: {path}') else: raise NotARegularFileError(f'Not a regular file: {path}') return stat_result
class ButtonMenu(Html.Html): name = 'Button Menu' _option_cls = OptButton.OptionsButtonMenu tag = 'div' def __init__(self, page: primitives.PageModel, record, text: str, icon: Optional[str], width: Optional[tuple], height: Optional[tuple], html_code: Optional[str], tooltip: Optional[str], profile: Optional[Union[(bool, dict)]], options: Optional[dict], verbose: bool=False): super(ButtonMenu, self).__init__(page, record, html_code=html_code, profile=profile, css_attrs={'width': width, 'height': height}, verbose=verbose) self.button = page.ui.button(text, icon, width, height, html_code, tooltip, profile, options) self.button.options.managed = False self.set_attrs(name='data-count', value=0) self.style.css.position = 'relative' self.style.css.display = 'inline-block' self.container = page.ui.div() self.container.attr['class'].add('dropdown-content') self.container.options.managed = False def __getitem__(self, i: int): if (i not in self.components): self.components[i] = ButtonMenuItem(self.page, ("document.getElementById('%s').querySelectorAll('a')[%s]" % (self.htmlCode, i)), self) return self.components[i] def options(self) -> OptButton.OptionsButtonMenu: return super().options def style(self) -> GrpClsButton.ClassButtonMenu: if (self._styleObj is None): self._styleObj = GrpClsButton.ClassButtonMenu(self) return self._styleObj def __str__(self): self.page.properties.js.add_builders(self.refresh()) events = [] for comp in self.components.values(): events.extend(comp._events) self.onReady(events) return ('<%s %s>%s%s</%s>' % (self.tag, self.get_attrs(css_class_names=self.style.get_classes()), self.button.html(), self.container.html(), self.tag))
class SimulationConfigPanel(QWidget): simulationConfigurationChanged = Signal() def __init__(self, simulation_model): QWidget.__init__(self) self.setContentsMargins(10, 10, 10, 10) self.__simulation_model = simulation_model def getSimulationModel(self): return self.__simulation_model def isConfigurationValid(self): return True def getSimulationArguments(self) -> Dict[(str, Any)]: return {}
def get_model(model_type: str) -> BaseEstimator: models_map = {'lr': LinearRegression, 'svc': partial(SVC, kernel='linear')} x = np.random.normal(size=(10, 2)) y = np.random.randint(2, size=(10,)) while (len(set(y)) < 2): y = np.random.randint(2, size=(10,)) model = models_map[model_type]() model.fit(x, y) return model
class OptionPlotoptionsSplineSonificationDefaultinstrumentoptionsPointgrouping(Options): def algorithm(self): return self._config_get('minmax') def algorithm(self, text: str): self._config(text, js_type=False) def enabled(self): return self._config_get(True) def enabled(self, flag: bool): self._config(flag, js_type=False) def groupTimespan(self): return self._config_get(15) def groupTimespan(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get('y') def prop(self, text: str): self._config(text, js_type=False)
class TestDailyAggQuarterFeatures(): .parametrize('data', datas) .parametrize(['tickers', 'columns', 'agg_day_counts', 'max_back_quarter', 'min_back_quarter'], [(['AAPL', 'TSLA'], ['marketcap'], [100], 10, 0), (['NVDA', 'TSLA'], ['marketcap'], [100, 200], 5, 2), (['AAPL', 'NVDA', 'TSLA', 'WORK'], ['marketcap', 'pe'], [50, 200], 10, 0), (['AAPL', 'ZLG'], ['marketcap', 'pe'], [50, 200], 5, 0)]) def test_calculate(self, data, tickers, columns, agg_day_counts, max_back_quarter, min_back_quarter): fc = DailyAggQuarterFeatures(daily_data_key='daily', quarterly_data_key='quarterly', columns=columns, agg_day_counts=agg_day_counts, max_back_quarter=max_back_quarter, min_back_quarter=min_back_quarter) X = fc.calculate(data, tickers) assert (type(X) == pd.DataFrame) assert ('ticker' in X.index.names) assert ('date' in X.index.names) assert (X.shape[0] <= ((max_back_quarter - min_back_quarter) * len(tickers))) assert (X.shape[1] == ((len(calc_series_stats([])) * len(columns)) * len(agg_day_counts))) for col in columns: for count in agg_day_counts: min_col = '_days{}_{}_min'.format(count, col) max_col = '_days{}_{}_max'.format(count, col) mean_col = '_days{}_{}_mean'.format(count, col) median_col = '_days{}_{}_median'.format(count, col) assert ((X[max_col] >= X[min_col]) | (X[max_col].isnull() | X[min_col].isnull())).min() assert ((X[max_col] >= X[mean_col]) | (X[max_col].isnull() | X[mean_col].isnull())).min() assert ((X[max_col] >= X[median_col]) | (X[max_col].isnull() | X[median_col].isnull())).min() assert ((X[max_col] >= X[min_col]) | (X[max_col].isnull() | X[min_col].isnull())).min() assert ((X[median_col] >= X[min_col]) | (X[median_col].isnull() | X[min_col].isnull())).min() .parametrize('data', datas) .parametrize(['tickers', 'columns', 'agg_day_counts', 'max_back_quarter', 'min_back_quarter'], [(['AAPL', 'TSLA'], ['marketcap'], [100], 10, 0), (['NVDA', 'TSLA'], ['marketcap'], [100, 200], 5, 2), (['AAPL', 'NVDA', 'TSLA', 'WORK'], ['marketcap', 'pe'], [50, 200], 10, 0), (['AAPL', 'ZLG'], ['marketcap', 'pe'], [50, 200], 5, 0)]) def test_calculate_dayly_index(self, data, tickers, columns, agg_day_counts, max_back_quarter, min_back_quarter): commodities_codes = ['AAPL', 'MSFT'] fc = DailyAggQuarterFeatures(daily_data_key='daily', quarterly_data_key='quarterly', columns=columns, agg_day_counts=agg_day_counts, max_back_quarter=max_back_quarter, min_back_quarter=min_back_quarter, daily_index=commodities_codes) X = fc.calculate(data, tickers) assert (type(X) == pd.DataFrame) assert ('ticker' in X.index.names) assert ('date' in X.index.names) assert (X.shape[0] <= ((max_back_quarter - min_back_quarter) * len(tickers))) assert (X.shape[1] == (((len(calc_series_stats([])) * len(columns)) * len(agg_day_counts)) * len(commodities_codes))) for code in commodities_codes: for col in columns: for count in agg_day_counts: min_col = '{}_days{}_{}_min'.format(code, count, col) max_col = '{}_days{}_{}_max'.format(code, count, col) mean_col = '{}_days{}_{}_mean'.format(code, count, col) median_col = '{}_days{}_{}_median'.format(code, count, col) assert ((X[max_col] >= X[min_col]) | (X[max_col].isnull() | X[min_col].isnull())).min() assert ((X[max_col] >= X[mean_col]) | (X[max_col].isnull() | X[mean_col].isnull())).min() assert ((X[max_col] >= X[median_col]) | (X[max_col].isnull() | X[median_col].isnull())).min() assert ((X[max_col] >= X[min_col]) | (X[max_col].isnull() | X[min_col].isnull())).min() assert ((X[median_col] >= X[min_col]) | (X[median_col].isnull() | X[min_col].isnull())).min()
class TestPostHooks(): def test_return_raise(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): class FailReturnConfig(): val_1: float = 0.5 def __post_hook__(self): return self.val_1 config = SpockBuilder(FailReturnConfig, desc='Test Builder') config.generate() def test_sum_none_fail_config(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(SumNoneFailConfig, desc='Test Builder') config.generate() def test_sum_not_equal_config(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(SumNoneNotEqualConfig, desc='Test Builder') config.generate() def test_eq_len_two_len_fail(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(EqLenNoneTwoLenConfig, desc='Test Builder') config.generate() def test_eq_len_none_fail(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(EqLenNoneFailConfig, desc='Test Builder') config.generate() def test_eq_len_none(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) config = SpockBuilder(EqLenNoneConfig, desc='Test Builder') config.generate() def test_within_low(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(WithinLowFailConfig, desc='Test Builder') config.generate() def test_within_high(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(WithinHighFailConfig, desc='Test Builder') config.generate() def test_within_none(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(WithinNoneFailConfig, desc='Test Builder') config.generate() def test_gt(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(GTFailConfig, desc='Test Builder') config.generate() def test_gt_none(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(GTFailNoneConfig, desc='Test Builder') config.generate() def test_ge(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(GEFailConfig, desc='Test Builder') config.generate() def test_ge_none(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(GEFailNoneConfig, desc='Test Builder') config.generate() def test_lt(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(LTFailConfig, desc='Test Builder') config.generate() def test_lt_none(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(LTFailNoneConfig, desc='Test Builder') config.generate() def test_le(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(LEFailConfig, desc='Test Builder') config.generate() def test_le_none(self, monkeypatch, tmp_path): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['']) with pytest.raises(_SpockInstantiationError): config = SpockBuilder(LEFailNoneConfig, desc='Test Builder') config.generate()
class GetBlockBodiesV65Exchange(BaseGetBlockBodiesV65Exchange): _normalizer = GetBlockBodiesNormalizer() tracker_class = GetBlockBodiesTracker _request_command_type = GetBlockBodiesV65 _response_command_type = BlockBodiesV65 async def __call__(self, headers: Sequence[BlockHeaderAPI], timeout: float=None) -> BlockBodyBundles: validator = GetBlockBodiesValidator(headers) block_hashes = tuple((header.hash for header in headers)) request = GetBlockBodiesV65(block_hashes) return (await self.get_result(request, self._normalizer, validator, noop_payload_validator, timeout))
def test_get_elasticsearch_awards_csv_sources(db): original = VALUE_MAPPINGS['elasticsearch_awards']['filter_function'] VALUE_MAPPINGS['elasticsearch_awards']['filter_function'] = MagicMock(returned_value='') csv_sources = download_generation.get_download_sources({'download_types': ['elasticsearch_awards'], 'filters': {'award_type_codes': list(award_type_mapping.keys())}}) assert (len(csv_sources) == 2) VALUE_MAPPINGS['elasticsearch_awards']['filter_function'] = original assert (csv_sources[0].file_type == 'd1') assert (csv_sources[0].source_type == 'elasticsearch_awards') assert (csv_sources[1].file_type == 'd2') assert (csv_sources[1].source_type == 'elasticsearch_awards')
class OptionPlotoptionsOrganizationSonificationDefaultspeechoptionsActivewhen(Options): def crossingDown(self): return self._config_get(None) def crossingDown(self, num: float): self._config(num, js_type=False) def crossingUp(self): return self._config_get(None) def crossingUp(self, num: float): self._config(num, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get(None) def prop(self, text: str): self._config(text, js_type=False)
def echo_body(environ: Environ, start_response: StartResponse) -> List[bytes]: status = '200 OK' output = environ['wsgi.input'].read() headers = [('Content-Type', 'text/plain; charset=utf-8'), ('Content-Length', str(len(output)))] start_response(status, headers, None) return [output]
def test_xcom_push(): ctx = FlyteContextManager.current_context() ctx.user_space_params._attrs = {} execution_state = ctx.execution_state.with_params(user_space_params=ctx.user_space_params.new_builder().add_attr('GET_ORIGINAL_TASK', True).add_attr('XCOM_DATA', {}).build()) with FlyteContextManager.with_context(ctx.with_execution_state(execution_state)) as child_ctx: print(child_ctx.user_space_params.get_original_task) op = BashSensor(task_id='Sensor_succeeds', bash_command='exit 0') op.xcom_push(Context(), 'key', 'value') assert (child_ctx.user_space_params.xcom_data[1] == 'key') assert (child_ctx.user_space_params.xcom_data[2] == 'value')
def gripper_test(mc): print('Start check IO part of api\n') flag = mc.is_gripper_moving() print('Is gripper moving: {}'.format(flag)) time.sleep(1) mc.set_encoder(7, 2048) time.sleep(3) mc.set_encoder(7, 1300) time.sleep(3) mc.set_gripper_value(2048, 70) time.sleep(5) mc.set_gripper_value(1500, 70) time.sleep(5) mc.set_gripper_state(0, 70) time.sleep(5) mc.set_gripper_state(1, 70) time.sleep(5) print('') print(mc.get_gripper_value())
('pyscf') .parametrize('fn, geom, charge, mult, ref_energy', BakerTSBm.geom_iter) def test_baker_ts_dimer(fn, geom, charge, mult, ref_energy, results_bag): N_init_dict = make_N_init_dict() calc_kwargs = {'charge': charge, 'mult': mult, 'pal': 2, 'base_name': Path(fn).stem} def calc_getter(): return PySCF(basis='321g', verbose=0, **calc_kwargs) geom.set_calculator(calc_getter()) dimer_kwargs = {'max_step': 0.25, 'dR_base': 0.0189, 'rot_opt': 'lbfgs', 'trans_opt': 'lbfgs', 'angle_tol': 5, 'f_thresh': 0.001, 'max_cycles': 50, 'f_tran_mod': True, 'multiple_translations': True} dimer_kwargs['N_init'] = N_init_dict[fn] geoms = (geom,) results = dimer_method(geoms, calc_getter, **dimer_kwargs) same_energy = (geom.energy == pytest.approx(ref_energy)) print(f' energy: {str(same_energy): >5}, {fn}, {results.force_evals} force evaluations') if (not same_energy): do_final_hessian(geom) assert (geom.energy == pytest.approx(ref_energy))
class Command(BaseCommand): args = '' help = 'Imports practice dispensing status.' def add_arguments(self, parser): parser.add_argument('--filename') parser.add_argument('--date') def handle(self, *args, **options): self.IS_VERBOSE = False if (options['verbosity'] > 1): self.IS_VERBOSE = True if (not options['filename']): raise CommandError('Please supply a filename') if (not options['date']): raise CommandError('Please supply a date') workbook = xlrd.open_workbook(options['filename']) worksheet = workbook.sheet_by_name('Sheet1') num_rows = (worksheet.nrows - 1) curr_row = (- 1) name_and_postcode_matches = 0 useful_rows = 0 address1_and_postcode_matches = 0 name_only_matches = 0 postcode_only_matches = 0 multiple_matches_found = 0 while (curr_row < num_rows): curr_row += 1 address = worksheet.cell_value(curr_row, 1).strip() if ((address == 'Dispensing Practices Address Details') or (address == 'Primary Care Trust:') or (address == 'Report For:') or (address == 'Practice Name and Address')): continue useful_rows += 1 addresses = address.split(',') name = addresses[0].strip().upper() postcode = addresses[(- 1)].strip().replace('\n', ' ') addr_with_no_postcode = 'Old School Surgery, Church Street, ' addr_with_no_postcode += 'Seaford, East Sussex' if (address == addr_with_no_postcode): postcode = 'BN25 1HH' if ((' ' not in postcode) or (len(postcode) > 8)): print('POSTCODE ISSUE', address) p = None try: p = Practice.objects.get(name=name, postcode=postcode) name_and_postcode_matches += 1 except Practice.DoesNotExist: try: p = Practice.objects.get(address1=name, postcode=postcode) address1_and_postcode_matches += 1 except Practice.DoesNotExist: ps = Practice.objects.filter(postcode=postcode) if ps: name_only_matches += 1 else: ps = Practice.objects.filter(name=name) if ps: postcode_only_matches += 1 except Practice.MultipleObjectsReturned: multiple_matches_found += 1 except Practice.MultipleObjectsReturned: multiple_matches_found += 1 if p: (pds, created) = PID.objects.get_or_create(practice=p, date=options['date'])
def test_flags(): instance = HostBase() mac_df = pd.DataFrame.from_dict({'test_col': [1, 2, 4]}) assert (instance._get_flags(mac_df, 'test_col', {0: 'foo', 1: 'baz', 2: 'blah'}, suffix=None, field_name=None) == {'tshark_test_col_foo': 1, 'tshark_test_col_baz': 1, 'tshark_test_col_blah': 1}) mac_df = pd.DataFrame.from_dict({'test_col': [1, 0, 4]}) assert (instance._get_flags(mac_df, 'test_col', {0: 'foo', 1: 'baz', 2: 'blah'}, suffix=None, field_name=None) == {'tshark_test_col_foo': 1, 'tshark_test_col_baz': 0, 'tshark_test_col_blah': 1})
class FailingAuthAccessedInRenderer(TestCase): def setUp(self): class AuthAccessingRenderer(renderers.BaseRenderer): media_type = 'text/plain' format = 'txt' def render(self, data, media_type=None, renderer_context=None): request = renderer_context['request'] if request.user.is_authenticated: return b'authenticated' return b'not authenticated' class FailingAuth(BaseAuthentication): def authenticate(self, request): raise exceptions.AuthenticationFailed('authentication failed') class ExampleView(APIView): authentication_classes = (FailingAuth,) renderer_classes = (AuthAccessingRenderer,) def get(self, request): return Response({'foo': 'bar'}) self.view = ExampleView.as_view() def test_failing_auth_accessed_in_renderer(self): request = factory.get('/') response = self.view(request) content = response.render().content assert (content == b'not authenticated')
(name='generate-bundles') def generate_bundles(): for platform in PLATFORMS: bundle_dir = os.path.join('bundle', platform) if (not os.path.exists(bundle_dir)): os.makedirs(bundle_dir) bundle_file = os.path.join(bundle_dir, BUNDLE_NAME) click.echo('Generating bundle {}'.format(bundle_file)) edm_command = (['edm', 'bundle', 'generate', '--bundle-format', '2.0', '--platform', platform, '--version', DEFAULT_RUNTIME, '--output-file', bundle_file] + sorted(full_app_dependencies)) try: subprocess.check_call(edm_command) except subprocess.CalledProcessError: click.echo('Failed to generate bundle {}'.format(bundle_file))
class Systray(widget.Systray): _qte_compatibility = True def draw(self): offset = self.padding self.drawer.clear((self.background or self.bar.background)) self.drawer.draw(offsetx=self.offset, offsety=self.offsety, width=self.length) for (pos, icon) in enumerate(self.tray_icons): rect_decs = [d for d in self.decorations if isinstance(d, RectDecoration)] if rect_decs: top = rect_decs[(- 1)] if top.filled: fill_colour = top.fill_colour else: fill_colour = (self.background or self.bar.background) if fill_colour.startswith('#'): fill_colour = fill_colour[1:] icon.window.set_attribute(backpixel=int(fill_colour, 16)) else: icon.window.set_attribute(backpixmap=self.drawer.pixmap) if self.bar.horizontal: xoffset = (self.offsetx + offset) yoffset = (((self.bar.height // 2) - (self.icon_size // 2)) + self.offsety) step = icon.width else: xoffset = (((self.bar.width // 2) - (self.icon_size // 2)) + self.offsetx) yoffset = (self.offsety + offset) step = icon.height icon.place(xoffset, yoffset, icon.width, self.icon_size, 0, None) if icon.hidden: icon.unhide() data = [self.conn.atoms['_XEMBED_EMBEDDED_NOTIFY'], xcffib.xproto.Time.CurrentTime, 0, self.bar.window.wid, XEMBED_PROTOCOL_VERSION] u = xcffib.xproto.ClientMessageData.synthetic(data, ('I' * 5)) event = xcffib.xproto.ClientMessageEvent.synthetic(format=32, window=icon.wid, type=self.conn.atoms['_XEMBED'], data=u) self.window.send_event(event) offset += (step + self.padding)
.parametrize('points_shape', [(3,), (3, 10)]) def test_rotate_points(points_shape): points = np.random.random(points_shape) points_rotated = td.Geometry.rotate_points(points=points, axis=(0, 0, 1), angle=(2 * np.pi)) assert np.allclose(points, points_rotated) points_rotated = td.Geometry.rotate_points(points=points, axis=(0, 0, 1), angle=np.pi)
class OptionSeriesSolidgaugeSonificationTracksMappingTremoloDepth(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def _check_is_parent_folders_are_aea_projects_recursively() -> None: current = Path('.').resolve() root = Path('/').resolve() home = current.home() while (current not in (home, root)): files = set(map((lambda x: x.name), current.iterdir())) if (DEFAULT_AEA_CONFIG_FILE in files): raise ValueError('Folder {} has file named {}'.format(current, DEFAULT_AEA_CONFIG_FILE)) current = current.parent.resolve()
class OptionSeriesScatter3dSonificationContexttracksMappingGapbetweennotes(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class SysTrayApp(object): WM_NOTIFY = None WNDCLASS = None CLASS_ATOM = None _instance = None def initialize(klass): WM_RESTART = win32gui.RegisterWindowMessage('TaskbarCreated') klass.WM_NOTIFY = (win32con.WM_USER + 1) klass.WNDCLASS = win32gui.WNDCLASS() klass.WNDCLASS.hInstance = win32gui.GetModuleHandle(None) klass.WNDCLASS.lpszClassName = ('Py_' + klass.__name__) klass.WNDCLASS.style = (win32con.CS_VREDRAW | win32con.CS_HREDRAW) klass.WNDCLASS.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW) klass.WNDCLASS.hIcon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION) klass.WNDCLASS.hbrBackground = win32con.COLOR_WINDOW klass.WNDCLASS.lpfnWndProc = {WM_RESTART: klass._restart, klass.WM_NOTIFY: klass._notify, win32con.WM_CLOSE: klass._close, win32con.WM_DESTROY: klass._destroy, win32con.WM_COMMAND: klass._command} klass.CLASS_ATOM = win32gui.RegisterClass(klass.WNDCLASS) klass._instance = {} return def _create(klass, hwnd, instance): klass._instance[hwnd] = instance win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, (hwnd, 0, (win32gui.NIF_ICON | win32gui.NIF_MESSAGE), klass.WM_NOTIFY, klass.WNDCLASS.hIcon)) instance.open() return def _restart(klass, hwnd, msg, wparam, lparam): win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, (hwnd, 0, (win32gui.NIF_ICON | win32gui.NIF_MESSAGE), klass.WM_NOTIFY, klass.WNDCLASS.hIcon)) self = klass._instance[hwnd] self.open() return def _notify(klass, hwnd, msg, wparam, lparam): self = klass._instance[hwnd] if (lparam == win32con.WM_LBUTTONDBLCLK): menu = self.get_popup() wid = win32gui.GetMenuDefaultItem(menu, 0, 0) if (0 < wid): win32gui.PostMessage(hwnd, win32con.WM_COMMAND, wid, 0) elif (lparam == win32con.WM_RBUTTONUP): menu = self.get_popup() pos = win32gui.GetCursorPos() win32gui.SetForegroundWindow(hwnd) win32gui.TrackPopupMenu(menu, win32con.TPM_LEFTALIGN, pos[0], pos[1], 0, hwnd, None) win32gui.PostMessage(hwnd, win32con.WM_NULL, 0, 0) elif (lparam == win32con.WM_LBUTTONUP): pass return True def _close(klass, hwnd, msg, wparam, lparam): win32gui.DestroyWindow(hwnd) return def _destroy(klass, hwnd, msg, wparam, lparam): del klass._instance[hwnd] win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, (hwnd, 0)) win32gui.PostQuitMessage(0) return def _command(klass, hwnd, msg, wparam, lparam): wid = win32gui.LOWORD(wparam) self = klass._instance[hwnd] self.choose(wid) return def __init__(self, name): self.logger = logging.getLogger(name) self.hwnd = win32gui.CreateWindow(self.CLASS_ATOM, name, (win32con.WS_OVERLAPPED | win32con.WS_SYSMENU), 0, 0, win32con.CW_USEDEFAULT, win32con.CW_USEDEFAULT, 0, 0, self.WNDCLASS.hInstance, None) self._create(self.hwnd, self) self.logger.info(f'create: name={name!r}') return def open(self): self.logger.info('open') win32gui.UpdateWindow(self.hwnd) return def run(self): self.logger.info('run') win32gui.PumpMessages() return def idle(self): return (not win32gui.PumpWaitingMessages()) def close(self): self.logger.info('close') win32gui.PostMessage(self.hwnd, win32con.WM_CLOSE, 0, 0) return def set_icon(self, icon): self.logger.info(f'set_icon: {icon!r}') win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, (self.hwnd, 0, win32gui.NIF_ICON, 0, icon)) return def set_text(self, text): self.logger.info(f'set_text: {text!r}') win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, (self.hwnd, 0, win32gui.NIF_TIP, 0, 0, text)) return def show_balloon(self, title, text, timeout=1): self.logger.info(f'show_balloon: {title!r}, {text!r}') win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, (self.hwnd, 0, win32gui.NIF_INFO, 0, 0, '', text, timeout, title, win32gui.NIIF_INFO)) return IDI_QUIT = 100 def get_popup(self): menu = win32gui.CreatePopupMenu() (item, _) = win32gui_struct.PackMENUITEMINFO(text='Quit', wID=self.IDI_QUIT) win32gui.InsertMenuItem(menu, 0, 1, item) win32gui.SetMenuDefaultItem(menu, 0, self.IDI_QUIT) (item, _) = win32gui_struct.PackMENUITEMINFO(text='Test', wID=123) win32gui.InsertMenuItem(menu, 0, 1, item) return menu def choose(self, wid): self.logger.info(f'choose: wid={wid!r}') if (wid == self.IDI_QUIT): self.close() return
def setCbText(txt): txt = wx.TextDataObject(txt) while True: try: if (not wx.TheClipboard.IsOpened()): if wx.TheClipboard.Open(): wx.TheClipboard.SetData(txt) wx.TheClipboard.Close() break pass except Exception as e: pass time.sleep(0.01) pass pass
def _setup_ensemble_experiment(config: ErtConfig, storage: StorageAccessor, args: Namespace, experiment_id: UUID) -> EnsembleExperiment: min_realizations_count = config.analysis_config.minimum_required_realizations active_realizations = _realizations(args, config.model_config.num_realizations) active_realizations_count = int(np.sum(active_realizations)) if (active_realizations_count < min_realizations_count): config.analysis_config.minimum_required_realizations = active_realizations_count ConfigWarning.ert_context_warn(f'Due to active_realizations {active_realizations_count} is lower than MIN_REALIZATIONS {min_realizations_count}, MIN_REALIZATIONS has been set to match active_realizations.') return EnsembleExperiment(EnsembleExperimentRunArguments(random_seed=config.random_seed, active_realizations=active_realizations.tolist(), current_case=args.current_case, iter_num=int(args.iter_num), minimum_required_realizations=config.analysis_config.minimum_required_realizations, ensemble_size=config.model_config.num_realizations, stop_long_running=config.analysis_config.stop_long_running), config, storage, config.queue_config, experiment_id)
class CrossAttention(nn.Module): def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype='float16'): super().__init__() inner_dim = (dim_head * heads) context_dim = default(context_dim, query_dim) self.scale = (dim_head ** (- 0.5)) self.heads = heads self.dim_head = dim_head self.to_q = nn.Linear(query_dim, inner_dim, bias=False) self.to_k = nn.Linear(context_dim, inner_dim, bias=False) self.to_v = nn.Linear(context_dim, inner_dim, bias=False) self.to_out = nn.Sequential(nn.Linear(inner_dim, query_dim), nn.Dropout(dropout)) def forward(self, x, context=None, mask=None, residual=None): nheads = self.heads d = self.dim_head q = self.to_q(x) context = default(context, x) k = self.to_k(context) v = self.to_v(context) bs = q.shape()[0] q = ops.reshape()(q, [bs, (- 1), self.heads, self.dim_head]) k = ops.reshape()(k, [bs, (- 1), self.heads, self.dim_head]) v = ops.reshape()(v, [bs, (- 1), self.heads, self.dim_head]) q = ops.permute()(q, [0, 2, 1, 3]) k = ops.permute()(k, [0, 2, 1, 3]) v = ops.permute()(v, [0, 2, 1, 3]) attn_op = ops.mem_eff_attention(causal=False) out = attn_op(ops.reshape()(q, [bs, nheads, (- 1), d]), ops.reshape()(k, [bs, nheads, (- 1), d]), ops.reshape()(v, [bs, nheads, (- 1), d])) out = ops.reshape()(out, [bs, (- 1), (nheads * d)]) proj = self.to_out(out) proj = ops.reshape()(proj, [bs, (- 1), (nheads * d)]) if (residual is not None): return (proj + residual) else: return proj
class OptionPlotoptionsErrorbarSonificationPointgrouping(Options): def algorithm(self): return self._config_get('minmax') def algorithm(self, text: str): self._config(text, js_type=False) def enabled(self): return self._config_get(True) def enabled(self, flag: bool): self._config(flag, js_type=False) def groupTimespan(self): return self._config_get(15) def groupTimespan(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get('y') def prop(self, text: str): self._config(text, js_type=False)
class OptionPlotoptionsOrganizationSonificationContexttracksMappingTremoloDepth(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class CharactersHandler(): def __init__(self, owner: 'DefaultAccount'): self.owner = owner self._ensure_playable_characters() self._clean() def _ensure_playable_characters(self): if (self.owner.db._playable_characters is None): self.owner.db._playable_characters = [] def _clean(self): self.owner.db._playable_characters = [x for x in self.owner.db._playable_characters if x] def add(self, character: 'DefaultCharacter'): self._clean() if (character not in self.owner.db._playable_characters): self.owner.db._playable_characters.append(character) self.owner.at_post_add_character(character) def remove(self, character: 'DefaultCharacter'): self._clean() if (character in self.owner.db._playable_characters): self.owner.db._playable_characters.remove(character) self.owner.at_post_remove_character(character) def all(self) -> list['DefaultCharacter']: self._clean() return list(self.owner.db._playable_characters) def count(self) -> int: return len(self.all()) __len__ = count def __iter__(self): return iter(self.all())
class OptionSeriesSunburstSonificationDefaultspeechoptions(Options): def activeWhen(self) -> 'OptionSeriesSunburstSonificationDefaultspeechoptionsActivewhen': return self._config_sub_data('activeWhen', OptionSeriesSunburstSonificationDefaultspeechoptionsActivewhen) def language(self): return self._config_get('en-US') def language(self, text: str): self._config(text, js_type=False) def mapping(self) -> 'OptionSeriesSunburstSonificationDefaultspeechoptionsMapping': return self._config_sub_data('mapping', OptionSeriesSunburstSonificationDefaultspeechoptionsMapping) def pointGrouping(self) -> 'OptionSeriesSunburstSonificationDefaultspeechoptionsPointgrouping': return self._config_sub_data('pointGrouping', OptionSeriesSunburstSonificationDefaultspeechoptionsPointgrouping) def preferredVoice(self): return self._config_get(None) def preferredVoice(self, text: str): self._config(text, js_type=False) def showPlayMarker(self): return self._config_get(True) def showPlayMarker(self, flag: bool): self._config(flag, js_type=False) def type(self): return self._config_get('speech') def type(self, text: str): self._config(text, js_type=False)
class AppManager(event.Component): total_sessions = 0 def __init__(self): super().__init__() self._appinfo = {} self._session_map = weakref.WeakValueDictionary() self._last_check_time = time.time() def register_app(self, app): assert isinstance(app, App) name = app.name if (not valid_app_name(name)): raise ValueError(('Given app does not have a valid name %r' % name)) (pending, connected) = ([], []) if (name in self._appinfo): (old_app, pending, connected) = self._appinfo[name] if (app.cls is not old_app.cls): logger.warning(('Re-defining app class %r' % name)) self._appinfo[name] = (app, pending, connected) def create_default_session(self, cls=None): if ('__default__' in self._appinfo): raise RuntimeError('The default session can only be created once.') if (cls is None): cls = JsComponent if ((not isinstance(cls, type)) and issubclass(cls, (PyComponent, JsComponent))): raise TypeError('create_default_session() needs a JsComponent subclass.') app = App(cls) app.serve('__default__') session = Session('__default__') self._session_map[session.id] = session (_, pending, connected) = self._appinfo['__default__'] pending.append(session) app(flx_session=session, flx_is_app=True) return session def remove_default_session(self): s = self.get_default_session() if (s is not None): s.close() self._appinfo.pop('__default__', None) def get_default_session(self): x = self._appinfo.get('__default__', None) if (x is None): return None else: (_, pending, connected) = x sessions = (pending + connected) if sessions: return sessions[(- 1)] def _clear_old_pending_sessions(self, max_age=30): try: count = 0 for name in self._appinfo: if (name == '__default__'): continue (_, pending, _) = self._appinfo[name] to_remove = [s for s in pending if ((time.time() - s._creation_time) > max_age)] for s in to_remove: self._session_map.pop(s.id, None) pending.remove(s) count += len(to_remove) if count: logger.warning(('Cleared %i old pending sessions' % count)) except Exception as err: logger.error(('Error when clearing old pending sessions: %s' % str(err))) def create_session(self, name, id=None, request=None): if ((time.time() - self._last_check_time) > 5): self._last_check_time = time.time() self._clear_old_pending_sessions() if (name == '__default__'): raise RuntimeError('There can be only one __default__ session.') elif (name not in self._appinfo): raise ValueError('Can only instantiate a session with a valid app name.') (app, pending, connected) = self._appinfo[name] session = Session(name, request=request) if (id is not None): session._id = id self._session_map[session.id] = session app(flx_session=session, flx_is_app=True) pending.append(session) logger.debug(('Instantiate app client %s' % session.app_name)) return session def connect_client(self, ws, name, session_id, cookies=None): (_, pending, connected) = self._appinfo[name] for session in pending: if (session.id == session_id): pending.remove(session) break else: raise RuntimeError(('Asked for session id %r, but could not find it' % session_id)) assert (session.id == session_id) assert (session.status == Session.STATUS.PENDING) logger.info(('New session %s %s' % (name, session_id))) session._set_cookies(cookies) session._set_ws(ws) connected.append(session) AppManager.total_sessions += 1 self.connections_changed(session.app_name) return session def disconnect_client(self, session): if (session.app_name == '__default__'): logger.info('Default session lost connection to client.') return (_, pending, connected) = self._appinfo[session.app_name] try: connected.remove(session) except ValueError: pass logger.info(('Session closed %s %s' % (session.app_name, session.id))) session.close() self.connections_changed(session.app_name) def has_app_name(self, name): name = name.lower() for key in self._appinfo.keys(): if (key.lower() == name): return key else: return None def get_app_names(self): return [name for name in sorted(self._appinfo.keys())] def get_session_by_id(self, id): return self._session_map.get(id, None) def get_connections(self, name): (_, pending, connected) = self._appinfo[name] return list(connected) def connections_changed(self, name): return dict(name=str(name))
class FreeCompoundChapterFragmentToken(CompoundToken): def valid_chapter(self, parse_ascii): return self.is_valid_1(parse_ascii) def valid_fragment(self, parse_ascii): return self.is_valid_2(parse_ascii) def get_chapter(self, parse_ascii): return self.to_number_1(parse_ascii) def get_fragment(self, parse_ascii): return self.to_number_2(parse_ascii)
class RichStatus(): def __init__(self, ok, **kwargs): self.ok = ok self.info = kwargs self.info['hostname'] = SystemInfo.MyHostName self.info['version'] = Version def __getattr__(self, key): return self.info.get(key) def __bool__(self): return self.ok def __nonzero__(self): return bool(self) def __contains__(self, key): return (key in self.info) def __str__(self): attrs = [('%s=%s' % (key, repr(self.info[key]))) for key in sorted(self.info.keys())] astr = ' '.join(attrs) if astr: astr = (' ' + astr) return ('<RichStatus %s%s>' % (('OK' if self else 'BAD'), astr)) def as_dict(self): d = {'ok': self.ok} for key in self.info.keys(): d[key] = self.info[key] return d def fromError(self, error, **kwargs): kwargs['error'] = error return RichStatus(False, **kwargs) def OK(self, **kwargs): return RichStatus(True, **kwargs)
def test_hdiv_area(RT2): f = project(as_vector([0.8, 0.6]), RT2) assert (abs((assemble((dot(f, f) * ds_t)) - 1.0)) < 1e-07) assert (abs((assemble((dot(f, f) * ds_b)) - 1.0)) < 1e-07) assert (abs((assemble((dot(f, f) * ds_tb)) - 2.0)) < 1e-07) assert (abs((assemble((dot(f, f) * ds_v)) - 2.0)) < 1e-07) assert (abs((assemble((dot(f('+'), f('+')) * dS_h)) - 3.0)) < 1e-07) assert (abs((assemble((dot(f('-'), f('-')) * dS_h)) - 3.0)) < 1e-07) assert (abs((assemble((dot(f('+'), f('-')) * dS_h)) - 3.0)) < 1e-07) assert (abs((assemble((dot(f('+'), f('+')) * dS_v)) - 3.0)) < 1e-07) assert (abs((assemble((dot(f('-'), f('-')) * dS_v)) - 3.0)) < 1e-07) assert (abs((assemble((dot(f('+'), f('-')) * dS_v)) - 3.0)) < 1e-07)
def insert_observation(selected, sample_collection, component_observations=None, child_name=None): try: sample_col_doc = frappe.db.get_value('Sample Collection', sample_collection, ['reference_name', 'patient', 'referring_practitioner'], as_dict=1) selected = json.loads(selected) if (component_observations and (len(component_observations) > 0)): component_observations = json.loads(component_observations) comp_obs_ref = create_specimen(sample_col_doc.get('patient'), selected, component_observations) for (i, obs) in enumerate(selected): parent_observation = obs.get('component_observation_parent') if child_name: parent_observation = frappe.db.get_value('Observation Sample Collection', child_name, 'component_observation_parent') if (obs.get('status') == 'Open'): if ((not obs.get('has_component')) or (obs.get('has_component') == 0)): observation = add_observation(patient=sample_col_doc.get('patient'), template=obs.get('observation_template'), doc='Sample Collection', docname=sample_collection, parent=parent_observation, specimen=(comp_obs_ref.get(obs.get('name')) or comp_obs_ref.get((i + 1)) or comp_obs_ref.get(obs.get('idx'))), invoice=sample_col_doc.get('reference_name'), practitioner=sample_col_doc.get('referring_practitioner'), child=(obs.get('reference_child') if obs.get('reference_child') else ''), service_request=obs.get('service_request')) if observation: frappe.db.set_value('Observation Sample Collection', obs.get('name'), {'status': 'Collected', 'collection_date_time': now_datetime(), 'specimen': comp_obs_ref.get(obs.get('name'))}) elif obs.get('component_observations'): component_observations = json.loads(obs.get('component_observations')) for (j, comp) in enumerate(component_observations): observation = add_observation(patient=sample_col_doc.get('patient'), template=comp.get('observation_template'), doc='Sample Collection', docname=sample_collection, parent=obs.get('component_observation_parent'), specimen=(comp_obs_ref.get((j + 1)) or comp_obs_ref.get(obs.get('name'))), invoice=sample_col_doc.get('reference_name'), practitioner=sample_col_doc.get('referring_practitioner'), child=(obs.get('reference_child') if obs.get('reference_child') else ''), service_request=obs.get('service_request')) if observation: comp['status'] = 'Collected' comp['collection_date_time'] = now_datetime() comp['specimen'] = (comp_obs_ref.get((j + 1)) or comp_obs_ref.get(obs.get('name'))) frappe.db.set_value('Observation Sample Collection', obs.get('name'), {'collection_date_time': now_datetime(), 'component_observations': json.dumps(component_observations, default=str), 'status': 'Collected', 'specimen': (comp_obs_ref.get((j + 1)) or comp_obs_ref.get(obs.get('name')))}) if component_observations: for (j, comp) in enumerate(component_observations): if (comp.get('observation_template') == obs.get('observation_template')): comp['status'] = 'Collected' comp['collection_date_time'] = now_datetime() comp['specimen'] = comp_obs_ref.get((j + 1)) child_db_set_dict = {'component_observations': json.dumps(component_observations, default=str)} if (component_observations and (not any(((comp['status'] == 'Open') for comp in component_observations)))): child_db_set_dict['status'] = 'Collected' if child_name: frappe.db.set_value('Observation Sample Collection', child_name, child_db_set_dict) if sample_collection: non_collected_samples = frappe.db.get_all('Observation Sample Collection', {'parent': sample_collection, 'status': ['!=', 'Collected']}) if (non_collected_samples and (len(non_collected_samples) > 0)): set_status = 'Partly Collected' else: set_status = 'Collected' frappe.db.set_value('Sample Collection', sample_collection, 'status', set_status) except Exception as e: frappe.log_error(message=e, title='Failed to mark Collected!') frappe.publish_realtime(event='observation_creation_progress', message='Completed', doctype='Sample Collection', docname=sample_collection)
class LiteSATABISTUnitCSR(Module, AutoCSR): def __init__(self, bist_unit): self._start = CSR() self._sector = CSRStorage(48) self._count = CSRStorage(16) self._loops = CSRStorage(8) self._random = CSRStorage() self._done = CSRStatus() self._aborted = CSRStatus() self._errors = CSRStatus(32) self._cycles = CSRStatus(32) self.submodules += bist_unit start = (self._start.r & self._start.re) done = self._done.status loops = self._loops.storage self.comb += [bist_unit.sector.eq(self._sector.storage), bist_unit.count.eq(self._count.storage), bist_unit.random.eq(self._random.storage), self._aborted.status.eq(bist_unit.aborted), self._errors.status.eq(bist_unit.errors)] loop = Signal(8) cycles = Signal(32) self.comb += self._cycles.status.eq(cycles) self.submodules.fsm = fsm = FSM(reset_state='IDLE') fsm.act('IDLE', self._done.status.eq(1), NextValue(loop, 0), If(start, NextValue(cycles, 0), NextState('CHECK'))) fsm.act('CHECK', If((loop < loops), NextState('START')).Else(NextState('IDLE')), NextValue(cycles, (cycles + 1))) fsm.act('START', bist_unit.start.eq(1), NextState('WAIT-DONE'), NextValue(cycles, (cycles + 1))) fsm.act('WAIT-DONE', If(bist_unit.done, NextValue(loop, (loop + 1)), NextState('CHECK')), NextValue(cycles, (cycles + 1)))
def split_channels(color: str): color = color.zfill(8) if (len(color) == 8): return ((parse.norm_hex_channel(color[6:]), parse.norm_hex_channel(color[4:6]), parse.norm_hex_channel(color[2:4])), (1 - parse.norm_hex_channel(color[:2]))) raise RuntimeError('Something is wrong in code logics.')
def exec_wait(_cmd, _output_capture=False, _timeout=0): _output = '' if (_timeout != 0): _cmd = (((_CMD_TIMEOUT + str(_timeout)) + ' ') + _cmd) _logger.debug('{func} - Executing command using the timeout |{timeout}| '.format(func='exec_wait', timeout=_timeout)) _logger.debug('{func} - cmd |{cmd}| '.format(func='exec_wait', cmd=_cmd)) if _output_capture: process = subprocess.Popen(_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) else: process = subprocess.Popen(_cmd, shell=True) _exit_code = process.wait() if _output_capture: output_step1 = process.stdout.read() _output = output_step1.decode('utf-8') _logger.debug('{func} - Executed command - cmd |{cmd}| - exit_code |{exit_code}| - output |{output}| '.format(func='exec_wait', cmd=_cmd, exit_code=_exit_code, output=_output)) return (_exit_code, _output)
class TAInstance(): def __init__(self, logPtr=None): self._logPtr = logPtr def _log(self, name, *args): if (self._logPtr is not None): self._logPtr(name, safe_str(','.join(map(str, args)))) def Highest(records, n, attr=None): return Std._filt(records, n, attr, 5e-324, max) def Lowest(records, n, attr=None): return Std._filt(records, n, attr, 1.e+308, min) def MA(self, records, period=9): self._log('MA', period) return Std._sma(Std._ticks(records), period) def SMA(self, records, period=9): self._log('SMA', period) return Std._sma(Std._ticks(records), period) def EMA(self, records, period=9): self._log('EMA', period) return Std._ema(Std._ticks(records), period) def MACD(self, records, fastEMA=12, slowEMA=26, signalEMA=9): self._log('MACD', fastEMA, slowEMA, signalEMA) ticks = Std._ticks(records) slow = Std._ema(ticks, slowEMA) fast = Std._ema(ticks, fastEMA) dif = Std._diff(fast, slow) sig = Std._ema(dif, signalEMA) histogram = Std._diff(dif, sig) return [dif, sig, histogram] def BOLL(self, records, period=20, multiplier=2): self._log('BOLL', period, multiplier) S = Std._ticks(records) j = (period - 1) while ((j < len(S)) and (S[j] is None)): j += 1 UP = Std._zeros(len(S)) MB = Std._zeros(len(S)) DN = Std._zeros(len(S)) Std._set(UP, 0, j, None) Std._set(MB, 0, j, None) Std._set(DN, 0, j, None) n = 0.0 for i in xrange(j, len(S)): if (i == j): for k in xrange(0, period): n += S[k] else: n = ((n + S[i]) - S[(i - period)]) ma = (n / period) d = 0 for k in xrange(((i + 1) - period), (i + 1)): d += ((S[k] - ma) * (S[k] - ma)) stdev = math.sqrt((d / period)) up = (ma + (multiplier * stdev)) dn = (ma - (multiplier * stdev)) UP[i] = up MB[i] = ma DN[i] = dn return [UP, MB, DN] def KDJ(self, records, n=9, k=3, d=3): self._log('KDJ', n, k, d) RSV = Std._zeros(len(records)) Std._set(RSV, 0, (n - 1), None) K = Std._zeros(len(records)) D = Std._zeros(len(records)) J = Std._zeros(len(records)) hs = Std._zeros(len(records)) ls = Std._zeros(len(records)) for i in xrange(0, len(records)): hs[i] = records[i]['High'] ls[i] = records[i]['Low'] for i in xrange(0, len(records)): if (i >= (n - 1)): c = records[i]['Close'] h = Std._cmp(hs, (i - (n - 1)), (i + 1), max) l = Std._cmp(ls, (i - (n - 1)), (i + 1), min) RSV[i] = ((100 * ((c - l) / (h - l))) if (h != l) else 100) K[i] = (float(((1 * RSV[i]) + ((k - 1) * K[(i - 1)]))) / k) D[i] = (float(((1 * K[i]) + ((d - 1) * D[(i - 1)]))) / d) else: K[i] = D[i] = 50.0 RSV[i] = 0.0 J[i] = ((3 * K[i]) - (2 * D[i])) for i in xrange(0, (n - 1)): K[i] = D[i] = J[i] = None return [K, D, J] def RSI(self, records, period=14): self._log('RSI', period) n = period rsi = Std._zeros(len(records)) Std._set(rsi, 0, len(rsi), None) if (len(records) < n): return rsi ticks = Std._ticks(records) deltas = Std._move_diff(ticks) seed = deltas[:n] up = 0.0 down = 0.0 for i in xrange(0, len(seed)): if (seed[i] >= 0): up += seed[i] else: down += seed[i] up /= n down /= n down = (- down) if (down != 0): rs = (up / down) else: rs = 0 rsi[n] = (100 - (100 / (1 + rs))) delta = 0.0 upval = 0.0 downval = 0.0 for i in xrange((n + 1), len(ticks)): delta = deltas[(i - 1)] if (delta > 0): upval = delta downval = 0.0 else: upval = 0.0 downval = (- delta) up = (((up * (n - 1)) + upval) / n) down = (((down * (n - 1)) + downval) / n) rs = (0 if (down == 0) else (up / down)) rsi[i] = (100 - (100 / (1 + rs))) return rsi def OBV(self, records): self._log('OBV') if (len(records) == 0): return [] if ('Close' not in records[0]): raise 'self.OBV argument must KLine' R = Std._zeros(len(records)) for i in xrange(0, len(records)): if (i == 0): R[i] = records[i]['Volume'] elif (records[i]['Close'] >= records[(i - 1)]['Close']): R[i] = (R[(i - 1)] + records[i]['Volume']) else: R[i] = (R[(i - 1)] - records[i]['Volume']) return R def ATR(self, records, period=14): self._log('ATR', period) if (len(records) == 0): return [] if ('Close' not in records[0]): raise 'self.ATR argument must KLine' R = Std._zeros(len(records)) m = 0.0 n = 0.0 for i in xrange(0, len(records)): TR = 0 if (i == 0): TR = (records[i]['High'] - records[i]['Low']) else: TR = max((records[i]['High'] - records[i]['Low']), abs((records[i]['High'] - records[(i - 1)]['Close'])), abs((records[(i - 1)]['Close'] - records[i]['Low']))) m += TR if (i < period): n = (m / (i + 1)) else: n = ((((period - 1) * n) + TR) / period) R[i] = n return R def Alligator(self, records, jawLength=13, teethLength=8, lipsLength=5): self._log('Alligator', jawLength, teethLength, lipsLength) ticks = [] for i in xrange(0, len(records)): ticks.append(((records[i]['High'] + records[i]['Low']) / 2)) return [(([None] * 8) + Std._smma(ticks, jawLength)), (([None] * 5) + Std._smma(ticks, teethLength)), (([None] * 3) + Std._smma(ticks, lipsLength))] def CMF(self, records, period=20): self._log('CMF', period) ret = [] sumD = 0.0 sumV = 0.0 arrD = [] arrV = [] for i in xrange(0, len(records)): d = 0.0 if (records[i]['High'] != records[i]['Low']): d = (((((2 * records[i]['Close']) - records[i]['Low']) - records[i]['High']) / (records[i]['High'] - records[i]['Low'])) * records[i]['Volume']) arrD.append(d) arrV.append(records[i]['Volume']) sumD += d sumV += records[i]['Volume'] if (i >= period): sumD -= arrD.pop(0) sumV -= arrV.pop(0) ret.append((sumD / sumV)) return ret
class OptionPlotoptionsVennSonificationDefaultinstrumentoptionsActivewhen(Options): def crossingDown(self): return self._config_get(None) def crossingDown(self, num: float): self._config(num, js_type=False) def crossingUp(self): return self._config_get(None) def crossingUp(self, num: float): self._config(num, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get(None) def prop(self, text: str): self._config(text, js_type=False)
.parametrize('ops', XP_OPS) .parametrize('depth,dirs,nO,batch_size,nI', [(1, 1, 1, 1, 1), (1, 1, 2, 1, 1), (1, 1, 2, 1, 2), (2, 1, 1, 1, 1), (2, 1, 2, 2, 2), (1, 2, 2, 1, 1), (2, 2, 2, 2, 2)]) def test_lstm_forward_training(ops, depth, dirs, nO, batch_size, nI): reference_ops = Ops() (params, H0, C0, X, size_at_t) = get_lstm_args(depth, dirs, nO, batch_size, nI) reference = reference_ops.lstm_forward_training(params, H0, C0, X, size_at_t) (Y, fwd_state) = ops.lstm_forward_training(params, H0, C0, X, size_at_t) assert_allclose(fwd_state[2], reference[1][2], atol=0.0001, rtol=0.001) assert_allclose(fwd_state[1], reference[1][1], atol=0.0001, rtol=0.001) assert_allclose(Y, reference[0], atol=0.0001, rtol=0.001)
def test_invalid_toml(tmp_path, capsys): config_path = (tmp_path / '.mdformat.toml') config_path.write_text(']invalid TOML[') file_path = (tmp_path / 'test_markdown.md') file_path.write_text('some markdown\n') assert (run((str(file_path),)) == 1) captured = capsys.readouterr() assert ('Invalid TOML syntax' in captured.err)
.compilertest def test_set_max_request_header_v3(): yaml = '\n---\napiVersion: getambassador.io/v3alpha1\nkind: Module\nmetadata:\n name: ambassador\n namespace: default\nspec:\n config:\n max_request_headers_kb: 96\n---\napiVersion: getambassador.io/v3alpha1\nkind: Mapping\nmetadata:\n name: ambassador\n namespace: default\nspec:\n hostname: "*"\n prefix: /test/\n service: test:9999\n' econf = _get_envoy_config(yaml) expected = 96 key_found = False conf = econf.as_dict() for listener in conf['static_resources']['listeners']: for filter_chain in listener['filter_chains']: for f in filter_chain['filters']: max_req_headers = f['typed_config'].get('max_request_headers_kb', None) assert (max_req_headers is not None), f"max_request_headers_kb not found on typed_config: {f['typed_config']}" print(f'Found max_req_headers = {max_req_headers}') key_found = True assert (expected == int(max_req_headers)), 'max_request_headers_kb must equal the value set on the ambassador Module' assert key_found, 'max_request_headers_kb must be found in the envoy config'