code
stringlengths
281
23.7M
def check_keylayout(filename): path = os.path.join('.', 'dist', (filename + '.keylayout')) tree = etree.parse(path, etree.XMLParser(recover=True)) dead_keys = [] for keymap_index in range(5): keymap_query = f'//keyMap[="{keymap_index}"]' keymap = tree.xpath(keymap_query) assert (len(keymap) == 1), f'{keymap_query} should be unique' excluded_keys = [54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 68, 73, 74, 90, 93, 94, 95] for key_index in range(126): if (key_index in excluded_keys): continue key_query = f'{keymap_query}/key[="{key_index}"]' key = tree.xpath(key_query) assert (len(key) == 1), f'{key_query} should be unique' action_id = key[0].get('action') if action_id: if action_id.startswith('dead_'): dead_keys.append(action_id[5:]) action_query = f'//actions/action[="{action_id}"]' action = tree.xpath(action_query) assert (len(action) == 1), f'{action_query} should be unique' else: assert (len(key[0].get('output')) <= 1), f'{key_query} should have a one-char output' for dk in dead_keys: when_query = f'//actions/action[="dead_{dk}"]/when' when = tree.xpath(when_query) assert (len(when) == 1), f'{when_query} should be unique' assert (when[0].get('state') == 'none') assert (when[0].get('next') == dk) terminator_query = f'//terminators/when[="{dk}"]' terminator = tree.xpath(terminator_query) assert (len(terminator) == 1), f'{terminator_query} should be unique' assert (len(terminator[0].get('output')) == 1)
def gem_to_loopy(gem_expr, var2terminal, scalar_type): shape = (gem_expr.shape if (len(gem_expr.shape) != 0) else (1,)) idx = make_indices(len(shape)) indexed_gem_expr = gem.Indexed(gem_expr, idx) output_loopy_arg = loopy.GlobalArg('output', shape=shape, dtype=scalar_type, is_input=True, is_output=True) args = ([output_loopy_arg] + [loopy.GlobalArg(var.name, shape=var.shape, dtype=scalar_type) for var in var2terminal.keys()]) ret_vars = [gem.Indexed(gem.Variable('output', shape), idx)] preprocessed_gem_expr = impero_utils.preprocess_gem([indexed_gem_expr]) assignments = list(zip(ret_vars, preprocessed_gem_expr)) impero_c = impero_utils.compile_gem(assignments, (), remove_zeros=False) output_arg = OutputKernelArg(output_loopy_arg) return (generate_loopy(impero_c, args, scalar_type, 'slate_loopy', [], log=PETSc.Log.isActive()), output_arg)
('args,expected', [([], {'db': {'driver': 'mysql', 'pass': 'secret', 'user': '${oc.env:USER}'}, 'ui': {'windows': {'create_db': True, 'view': True}}, 'schema': {'database': 'school', 'tables': [{'name': 'students', 'fields': [{'name': 'string'}, {'class': 'int'}]}, {'name': 'exams', 'fields': [{'profession': 'string'}, {'time': 'data'}, {'class': 'int'}]}]}})]) def test_advanced_ad_hoc_composition(monkeypatch: Any, tmpdir: Path, args: List[str], expected: Any) -> None: monkeypatch.setenv('USER', 'test_user') cmd = ['examples/advanced/ad_hoc_composition/hydra_compose_example.py', f'hydra.run.dir={tmpdir}', 'hydra.job.chdir=True'] (result, _err) = run_python_script(cmd) assert (OmegaConf.create(result) == OmegaConf.create(expected))
def execution(execution: Dict[(str, Any)], filler: Dict[(str, Any)]) -> Dict[(str, Any)]: execution = normalize_execution((execution or {})) if (('caller' in execution) and ('origin' not in execution)): execution = assoc(execution, 'origin', execution['caller']) if ('vyperLLLCode' in execution): code = compile_vyper_lll(execution['vyperLLLCode']) if ('code' in execution): if (code != execution['code']): raise ValueError('Compiled Vyper LLL code does not match') execution = assoc(execution, 'code', code) execution = merge(DEFAULT_EXECUTION, execution) test_name = get_test_name(filler) return deep_merge(filler, {test_name: {'exec': execution}})
class HCI_Cmd_LE_Set_Extended_Scan_Enable(HCI_Command): def __init__(self, enable=True, filter_dups=1, duration=0, period=0): super().__init__(b'\x08', b'B') self.payload.append(Bool('enable', enable)) self.payload.append(EnumByte('filter', filter_dups, {0: 'Disable', 1: 'Enable', 2: 'Eanble with reset'})) self.payload.append(UShortInt('Duration', int(round((min((65535 * 10), duration) / 10))), endian='little')) self.payload.append(UShortInt('Period', int(round((min((65535 * 1280), max(period, duration)) / 1280))), endian='little'))
def filter_firewall_decrypted_traffic_mirror_data(json): option_list = ['dstmac', 'interface', 'name', 'traffic_source', 'traffic_type'] json = remove_invalid_fields(json) dictionary = {} for attribute in option_list: if ((attribute in json) and (json[attribute] is not None)): dictionary[attribute] = json[attribute] return dictionary
(name='worker', cls=build_lazy_click_command(_dynamic_factory=_model_dynamic_factory)) def start_model_worker(**kwargs): if kwargs['daemon']: port = kwargs['port'] model_type = (kwargs.get('worker_type') or 'llm') log_file = os.path.join(LOGDIR, f'model_worker_{model_type}_{port}_uvicorn.log') _run_current_with_daemon('ModelWorker', log_file) else: from dbgpt.model.cluster import run_worker_manager run_worker_manager()
class Xkblayout(IntervalModule): interval = 1 color = '#FFFFFF' format = ' {symbol}' layouts = [] uppercase = True settings = (('color', 'RGB hexadecimal color code specifuer, defaults to #FFFFFF'), ('format', 'Format string'), ('layouts', 'List of layouts'), ('uppercase', 'Flag for uppercase output')) on_leftclick = ['change_layout', 1] on_upscroll = ['change_layout', 1] on_downscroll = ['change_layout', (- 1)] def init(self): if self.layouts: self.set_layouts(self.layouts) self._xkb = XKeyboard(auto_open=True) def set_layouts(self, layouts): self.layouts = layouts layouts_parts = [x.split() for x in self.layouts] symbols_variants_zipped = zip_longest(*layouts_parts, fillvalue='') symbols_variants_str = [','.join(x) for x in symbols_variants_zipped] assert (len(symbols_variants_str) > 0) if (len(symbols_variants_str) == 1): symbols = symbols_variants_str[0] args = 'setxkbmap -layout {}'.format(symbols) elif (len(symbols_variants_str) == 2): (symbols, variants) = symbols_variants_str args = 'setxkbmap -layout {} -variant {}'.format(symbols, variants) else: raise ValueError('Wrong layouts value: {}'.format(self.layouts)) subprocess.check_call(args.split()) def change_layout(self, increment=1): self._xkb.group_num += increment def run(self): cdict = {'num': self._xkb.group_num, 'name': self._xkb.group_name, 'symbol': self._xkb.group_symbol, 'variant': self._xkb.group_variant, 'count': self._xkb.groups_count, 'names': self._xkb.groups_names, 'symbols': self._xkb.groups_symbols, 'variants': self._xkb.groups_variants} full_text = self.format.format(**cdict) full_text = (full_text.upper() if self.uppercase else full_text) self.data = cdict self.output = {'full_text': full_text, 'color': self.color}
class TestRegisterWithMalformedEntryPoint(): MESSAGE_REGEX = "Malformed .*: '.*'. It must be of the form '.*'." def test_wrong_spaces(self): with pytest.raises(AEAException, match=self.MESSAGE_REGEX): aea.crypto.registries.register_crypto('crypto_id', ' path.to.module:CryptoClass') with pytest.raises(AEAException, match=self.MESSAGE_REGEX): aea.crypto.registries.register_crypto('crypto_id', 'path.to.module :CryptoClass') with pytest.raises(AEAException, match=self.MESSAGE_REGEX): aea.crypto.registries.register_crypto('crypto_id', 'path.to .module:CryptoClass') .parametrize('special_character', forbidden_special_characters) def test_special_characters(self, special_character): with pytest.raises(AEAException, match=self.MESSAGE_REGEX): aea.crypto.registries.register_crypto('crypto_id', (('path' + special_character) + '.to.module:CryptoClass'))
def extractMoemclendonWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
class PayeeBadgeDelegate(QStyledItemDelegate): margin_x = 0 margin_y = 0 def __init__(self, form_context: DetailFormContext, parent: Optional[Any]=None) -> None: super().__init__(parent) self._form_context = form_context def paint(self, painter: QPainter, option: QStyleOptionViewItem, model_index: QModelIndex) -> None: point = option.rect.topLeft() source_index = get_source_index(model_index) (contact, identity) = source_index.model()._get_identity(source_index.row()) widget = self._create_payee_badge(self.parent(), self._form_context, contact, identity) if (option.state & QStyle.State_Selected): p = option.palette p.setColor(QPalette.Background, p.color(QPalette.Active, QPalette.Highlight)) widget.setPalette(p) widget.render(painter, point) dummyWidget = QWidget() widget.setParent(dummyWidget) def sizeHint(self, option: QStyleOptionViewItem, model_index: QModelIndex): return QSize(150, 25) def _create_payee_badge(self, parent: Any, form_context: DetailFormContext, contact: ContactEntry, identity: ContactIdentity): badge = PayeeBadge(form_context, contact, identity, parent) return badge
.slow .parametrize('block_file_name', ['block_1.json', 'block_1234567.json', 'block_.json']) def test_pow_validation_block_headers(block_file_name: str) -> None: block_str_data = cast(bytes, pkgutil.get_data('ethereum', f'assets/blocks/{block_file_name}')).decode() block_json_data = json.loads(block_str_data) load = Load('ConstantinopleFix', 'constantinople') header: Header = load.json_to_header(block_json_data) validate_proof_of_work(header)
def extractDefectivetlsWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
class TestProtFuncs(BaseEvenniaTest): _settings(PROT_FUNC_MODULES=['evennia.prototypes.protfuncs']) def test_protkey_protfunc(self): test_prot = {'key1': 'value1', 'key2': 2} self.assertEqual(protlib.protfunc_parser('$protkey(key1)', testing=True, prototype=test_prot), 'value1') self.assertEqual(protlib.protfunc_parser('$protkey(key2)', testing=True, prototype=test_prot), 2)
class ObsSymbolic(ObsBase): def __init__(self, contract_manager, account_manager, dataset_dump_path, backend_loggers): super().__init__(contract_manager, account_manager, dataset_dump_path) self.sym_stat = Stat(contract_manager, account_manager) self.contract_to_storage = {} self.tx_count = 0 address_to_contract = self.contract_manager.address_to_contract fuzz_contract_names = self.contract_manager.fuzz_contract_names fuzz_addresses = [int(contract_manager.contract_dict[n].addresses[0][2:], 16) for n in fuzz_contract_names] self.hash_to_func_name = {} for contract in self.contract_manager.contract_dict.values(): for method in contract.abi.methods: self.hash_to_func_name[method.idd] = method.name proj_path = self.contract_manager.proj_path build_dir = os.path.join(proj_path, 'build', 'contracts') build_json_files = glob.glob(os.path.join(build_dir, '*.json')) contract_to_build_data = {} for build_json_file in build_json_files: contract_name = os.path.splitext(os.path.basename(build_json_file))[0] with open(build_json_file) as f: build_data = json.loads(f.read()) contract_to_build_data[contract_name] = build_data self.svm = svm.SVM(address_to_contract, contract_to_build_data, self.hash_to_func_name, self.account_manager, fuzz_addresses) contract_to_address = {v.name: k for (k, v) in address_to_contract.items()} for logger in backend_loggers: if (logger.logs is not None): bytecode_bytes = self.get_create_bytecode(logger) if (bytecode_bytes is None): continue found_swarmhashes = asm.find_swarmhashes(bytecode_bytes) create_contract = self.svm.swarm_hash_tuple_to_contract[tuple(found_swarmhashes)] if (create_contract.name == 'Migrations'): continue if (create_contract.name not in contract_to_address): continue address = contract_to_address[create_contract.name] (sstore_data, sha_data) = self.get_logger_info(logger, address) self.svm.update_sha(sha_data) self.svm.update_storages(sstore_data) def init(self): super().init() def update(self, logger, is_init_explore): super().update(logger, is_init_explore) self.sym_stat.update(logger) self.tx_count += 1 (sstore_data, sha_data) = self.get_logger_info(logger) self.svm.update_sha(sha_data) self.svm.update_storages(sstore_data) def get_create_bytecode(self, logger): log = logger.logs[(- 1)] stack = log.stack if (log.op_name != 'RETURN'): return None offset = int(stack[(- 1)][2:], 16) length = int(stack[(- 2)][2:], 16) memory = ('0' if (log.memory == '0x') else log.memory[2:]) memory_bytes = ethereum.utils.int_to_bytes(int(memory, 16)) return_bytes = memory_bytes[offset:(offset + length)] return return_bytes def get_logger_info(self, logger, start_address=None): sha_data = [] sstore_data = [] assert ((logger.tx.call_address != '0x0') or (start_address is not None)) start_address = (logger.tx.call_address if (start_address is None) else start_address) call_stack = [start_address] for (i, log) in enumerate(logger.logs): depth = log.depth stack = log.stack assert (depth == len(call_stack)) if (log.op_name == 'SSTORE'): index = int(stack[(- 1)][2:], 16) value = int(stack[(- 2)][2:], 16) sstore_data.append((call_stack[(- 1)], index, value)) elif (log.op_name == 'SHA3'): offset = int(stack[(- 1)][2:], 16) length = int(stack[(- 2)][2:], 16) memory = ethereum.utils.remove_0x_head(log.memory) memory_bytes = bytearray.fromhex(memory) arg_bytes = memory_bytes[offset:(offset + length)] arg = ethereum.utils.bytes_to_int(arg_bytes) next_log = logger.logs[(i + 1)] value = int(next_log.stack[(- 1)], 16) sha_data.append((arg, value, length)) elif ((depth == 1) and ((log.op_name == 'RETURN') or (log.op_name == 'STOP'))): return (sstore_data, sha_data) if (((i + 1) < len(logger.logs)) and (logger.logs[(i + 1)].depth == (depth + 1))): call_stack.append((call_stack[(- 1)] if (log.op_name == 'DELEGATECALL') else stack[(- 2)])) elif (((i + 1) < len(logger.logs)) and (logger.logs[(i + 1)].depth == (depth - 1))): call_stack.pop() return ([], [])
def get_es_kdata_index(security_type='stock', exchange='sh', level='day'): if (exchange in ['sh', 'sz']): return '{}_{}_{}_kdata'.format(security_type, 'china', level) elif (exchange in ['nasdaq', 'amex', 'nyse']): return '{}_{}_{}_kdata'.format(security_type, 'usa', level) else: return '{}_{}_{}_kdata'.format(security_type, exchange, level)
def test_standard_ignore(): patterns = ['*.pyc', '.cache', '.cache/*', '__pycache__', '**/__pycache__', '*.foo'] ignore = StandardIgnore(root='.', patterns=patterns) assert (not ignore.is_ignored('foo.py')) assert ignore.is_ignored('foo.pyc') assert ignore.is_ignored('.cache/foo') assert ignore.is_ignored('__pycache__') assert ignore.is_ignored('foo/__pycache__') assert ignore.is_ignored('spam/ham/some.foo')
def test_verify_from_public_key_obj(key_api, private_key): non_recoverable_signature = key_api.ecdsa_sign_non_recoverable(MSGHASH, private_key) recoverable_signature = key_api.ecdsa_sign_non_recoverable(MSGHASH, private_key) public_key = private_key.public_key for signature in (recoverable_signature, non_recoverable_signature): assert public_key.verify_msg_hash(MSGHASH, signature) assert public_key.verify_msg(MSG, signature)
def test_hydrate_workflow_template__branch_node(): workflow_template = _core_workflow_pb2.WorkflowTemplate() branch_node = _core_workflow_pb2.Node(id='branch_node', branch_node=_core_workflow_pb2.BranchNode(if_else=_core_workflow_pb2.IfElseBlock(case=_core_workflow_pb2.IfBlock(then_node=_core_workflow_pb2.Node(task_node=_core_workflow_pb2.TaskNode(reference_id=_identifier_pb2.Identifier(resource_type=_identifier_pb2.TASK, name='if_case')))), else_node=_core_workflow_pb2.Node(task_node=_core_workflow_pb2.TaskNode(reference_id=_identifier_pb2.Identifier(resource_type=_identifier_pb2.TASK, name='else_node')))))) branch_node.branch_node.if_else.other.extend([_core_workflow_pb2.IfBlock(then_node=_core_workflow_pb2.Node(task_node=_core_workflow_pb2.TaskNode(reference_id=_identifier_pb2.Identifier(resource_type=_identifier_pb2.TASK, name='other_1')))), _core_workflow_pb2.IfBlock(then_node=_core_workflow_pb2.Node(task_node=_core_workflow_pb2.TaskNode(reference_id=_identifier_pb2.Identifier(resource_type=_identifier_pb2.TASK, name='other_2'))))]) workflow_template.nodes.append(branch_node) hydrated_workflow_template = _hydrate_workflow_template_nodes('project', 'domain', '12345', workflow_template) if_case_id = hydrated_workflow_template.nodes[0].branch_node.if_else.case.then_node.task_node.reference_id assert (if_case_id.project == 'project') assert (if_case_id.domain == 'domain') assert (if_case_id.name == 'if_case') assert (if_case_id.version == '12345') other_1_id = hydrated_workflow_template.nodes[0].branch_node.if_else.other[0].then_node.task_node.reference_id assert (other_1_id.project == 'project') assert (other_1_id.domain == 'domain') assert (other_1_id.name == 'other_1') assert (other_1_id.version == '12345') other_2_id = hydrated_workflow_template.nodes[0].branch_node.if_else.other[1].then_node.task_node.reference_id assert (other_2_id.project == 'project') assert (other_2_id.domain == 'domain') assert (other_2_id.name == 'other_2') assert (other_2_id.version == '12345') else_id = hydrated_workflow_template.nodes[0].branch_node.if_else.else_node.task_node.reference_id assert (else_id.project == 'project') assert (else_id.domain == 'domain') assert (else_id.name == 'else_node') assert (else_id.version == '12345')
class OptionSeriesBarSonificationTracksMappingLowpassFrequency(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
.skipif(('pandas' not in sys.modules), reason='Pandas is not installed.') .parametrize('map_task_fn', [map_task, array_node_map_task]) def test_everything(map_task_fn): import pandas as pd def get_static_list() -> typing.List[float]: return [3.14, 2.718] def get_list_of_pd(s: int) -> typing.List[pd.DataFrame]: df1 = pd.DataFrame({'Name': ['Tom', 'Joseph'], 'Age': [20, 22]}) df2 = pd.DataFrame({'Name': ['Rachel', 'Eve', 'Mary'], 'Age': [22, 23, 24]}) if (s == 2): return [df1, df2] else: return [df1, df2, df1] def t3(a: int, b: str, c: typing.List[float], d: typing.List[float], a2: pd.DataFrame) -> str: return (((((str(a) + f'pdsize{len(a2)}') + b) + str(c)) + '&&') + str(d)) t3_bind_b2 = partial(t3, b='world') mt1 = map_task_fn(t3_bind_b2) if (map_task_fn == array_node_map_task): mr = ArrayNodeMapTaskResolver() elif (map_task_fn == map_task): mr = MapTaskResolver() else: raise ValueError('Unexpected map task fn') aa = mr.loader_args(serialization_settings, mt1) aa = aa[1].split(',') aa.sort() assert (aa == ['b']) def print_lists(i: typing.List[str], j: typing.List[str], k: typing.List[str]) -> str: print(f'First: {i}') print(f'Second: {j}') print(f'Third: {k}') return f'{i}-{j}-{k}' def dt1(a: typing.List[int], a2: typing.List[pd.DataFrame], sl: typing.List[float]) -> str: i = mt1(a=a, a2=a2, c=[[1.1, 2.0, 3.0], [1.1, 2.0, 3.0]], d=[sl, sl]) mt_in2 = map_task_fn(t3_bind_b2) dfs = get_list_of_pd(s=3) j = mt_in2(a=[3, 4, 5], a2=dfs, c=[[1.0], [2.0], [3.0]], d=[sl, sl, sl]) t3_bind_a2 = partial(t3_bind_b2, a2=a2[0]) mt_in3 = map_task_fn(t3_bind_a2) aa = mr.loader_args(serialization_settings, mt_in3) aa = aa[1].split(',') aa.sort() assert (aa == ['a2', 'b']) k = mt_in3(a=[3, 4, 5], c=[[1.0], [2.0], [3.0]], d=[sl, sl, sl]) return print_lists(i=i, j=j, k=k) def wf_dt(a: typing.List[int]) -> str: sl = get_static_list() dfs = get_list_of_pd(s=2) return dt1(a=a, a2=dfs, sl=sl) print(wf_dt(a=[1, 2])) assert (wf_dt(a=[1, 2]) == "['1pdsize2world[1.1, 2.0, 3.0]&&[3.14, 2.718]', '2pdsize3world[1.1, 2.0, 3.0]&&[3.14, 2.718]']-['3pdsize2world[1.0]&&[3.14, 2.718]', '4pdsize3world[2.0]&&[3.14, 2.718]', '5pdsize2world[3.0]&&[3.14, 2.718]']-['3pdsize2world[1.0]&&[3.14, 2.718]', '4pdsize2world[2.0]&&[3.14, 2.718]', '5pdsize2world[3.0]&&[3.14, 2.718]']")
def set_other_config(manager, system_id, key, val, fn): val = str(val) def _set_iface_other_config(tables, *_): row = fn(tables) if (not row): return None other_config = row.other_config other_config[key] = val row.other_config = other_config req = ovsdb_event.EventModifyRequest(system_id, _set_iface_other_config) return manager.send_request(req)
class OptionSeriesBarTooltipDatetimelabelformats(Options): def day(self): return self._config_get('%A, %e %b %Y') def day(self, text: str): self._config(text, js_type=False) def hour(self): return self._config_get('%A, %e %b, %H:%M') def hour(self, text: str): self._config(text, js_type=False) def millisecond(self): return self._config_get('%A, %e %b, %H:%M:%S.%L') def millisecond(self, text: str): self._config(text, js_type=False) def minute(self): return self._config_get('%A, %e %b, %H:%M') def minute(self, text: str): self._config(text, js_type=False) def month(self): return self._config_get('%B %Y') def month(self, text: str): self._config(text, js_type=False) def second(self): return self._config_get('%A, %e %b, %H:%M:%S') def second(self, text: str): self._config(text, js_type=False) def week(self): return self._config_get('Week from %A, %e %b %Y') def week(self, text: str): self._config(text, js_type=False) def year(self): return self._config_get('%Y') def year(self, text: str): self._config(text, js_type=False)
def register(registry): _IndexedCustomEditor.register(registry) registry.register_interaction(target_class=SimpleEditor, interaction_class=MouseClick, handler=(lambda wrapper, _: mouse_click_qwidget(wrapper._target._button, delay=wrapper.delay))) register_traitsui_ui_solvers(registry, SimpleEditor, _get_nested_ui_simple) registry.register_interaction(target_class=CustomEditor, interaction_class=SelectedText, handler=_get_combobox_text) register_traitsui_ui_solvers(registry, CustomEditor, _get_nested_ui_custom)
def main(): pages = {} class_names = [] layouts = set() namespace = {} namespace.update(ui.__dict__) namespace.update(ui.layouts.__dict__) namespace.update(ui.widgets.__dict__) namespace.update(ui.pywidgets.__dict__) for mod in namespace.values(): if isinstance(mod, ModuleType): classes = [] for w in mod.__dict__.values(): if (isinstance(w, type) and issubclass(w, (app.PyComponent, app.JsComponent))): if ((w.__module__ == mod.__name__) and (not w.__name__.startswith('_'))): classes.append(w) if issubclass(w, ui.Layout): layouts.add(w.__name__) if classes: classes.sort(key=(lambda x: x.__name__)) classes.sort(key=(lambda x: len(x.mro()))) class_names.extend([w.__name__ for w in classes]) pages[mod.__name__] = classes for (module_name, classes) in sorted(pages.items()): page_name = page_title = module_name.split('.')[(- 1)].strip('_').capitalize() mdoc = sys.modules[module_name].__doc__ if (mdoc and (0 < len(mdoc.split('\n', 1)[0].strip()) <= 24)): page_title = mdoc.split('\n', 1)[0].strip() sys.modules[module_name].__doc__ = sys.modules[module_name].__doc__.split('\n', 1)[(- 1)] docs = ('%s\n%s\n\n' % (page_title, ('-' * len(page_title)))) docs += ('.. automodule:: %s\n\n' % module_name) docs += '----\n\n' if module_name.endswith('_widget'): docs += '.. autofunction:: flexx.ui.create_element\n\n' for cls in classes: assert issubclass(cls, (ui.Widget, ui.PyWidget)), (cls.__name__ + ' is not a Widget or PyWidget') name = cls.__name__ if ('Inherits from' not in cls.__doc__): bases = [] for bcls in cls.__bases__: if getattr(ui, bcls.__name__, None): bases.append((':class:`%s <flexx.ui.%s>`' % (bcls.__name__, bcls.__name__))) elif getattr(app, bcls.__name__, None): bases.append((':class:`%s <flexx.app.%s>`' % (bcls.__name__, bcls.__name__))) else: bases.append((':class:`%s <%s.%s>`' % (bcls.__name__, bcls.__module__, bcls.__name__))) line = (' *Inherits from:* ' + ', '.join(bases)) cls.__doc__ = ((line + '\n\n ') + (cls.__doc__ or '').lstrip()) members = {} include = ('_create_dom', '_render_dom') exclude = ('CODE', 'CSS', 'DEFAULT_MIN_SIZE') for n in list(cls.JS.__dict__): val = getattr(cls.JS, n) if ((n in exclude) or (not val.__doc__)): pass elif (n.startswith('_') and (n not in include)): pass elif isinstance(val, event._action.BaseDescriptor): for (tname, tclass) in (('attributes', event._attribute.Attribute), ('properties', event._property.Property), ('actions', event._action.ActionDescriptor), ('reactions', event._reaction.ReactionDescriptor), ('emitters', event._emitter.EmitterDescriptor)): if isinstance(val, tclass): members.setdefault(tname, []).append(n) break else: assert False elif getattr(val, '__doc__', None): members.setdefault('methods', []).append(n) full_name = ('%s.%s' % (module_name, name)) if getattr(ui, name, None): full_name = ('flexx.ui.%s' % name) order = ('attributes', 'properties', 'emitters', 'actions', 'reactions', 'methods') member_str = ' :members:' toc_str = '\n' for key in members: members[key].sort() assert (not set(members).difference(order)) for key in order: if (key in members): toc_str = (toc_str.rstrip(',') + '\n\n *{}*:'.format(key)) for n in members[key]: member_str += ' {},'.format(n) toc_str += ' `{} <#{}.{}>`__,'.format(n, full_name, n) for n in members[key]: if (n not in cls.__dict__): setattr(cls, n, cls.JS.__dict__[n]) cls.__doc__ += (toc_str.rstrip(',') + '\n\n') docs += ('.. autoclass:: %s\n' % full_name) docs += (member_str.rstrip(',') + '\n :member-order: alphabetical\n\n') filename = os.path.join(OUTPUT_DIR, (page_name.lower() + '.rst')) created_files.append(filename) open(filename, 'wt', encoding='utf-8').write(docs) docs = 'Widgets reference' docs += (('\n' + ('=' * len(docs))) + '\n\n') docs += 'This is a list of all widget classes provided by ``flexx.ui``. ' docs += 'The :class:`Widget <flexx.ui.Widget>` class is the base class of all widgets. ' docs += '\n\n' docs += '\nBase widget:\n\n' if True: docs += ('* :class:`%s <flexx.ui.%s>`\n' % ('Widget', 'Widget')) docs += '\nLayouts:\n\n' for name in [n for n in sorted(class_names) if (n in layouts) if getattr(ui, n, None)]: docs += ('* :class:`%s <flexx.ui.%s>`\n' % (name, name)) docs += '\nWidgets:\n\n' for name in [n for n in sorted(class_names) if (n not in layouts) if getattr(ui, n, None)]: docs += ('* :class:`%s <flexx.ui.%s>`\n' % (name, name)) docs += '\n.. toctree::\n :maxdepth: 1\n :hidden:\n\n' for module_name in sorted(pages.keys()): docs += (' %s\n' % module_name.split('.')[(- 1)].strip('_').lower()) filename = os.path.join(OUTPUT_DIR, 'api.rst') created_files.append(filename) open(filename, 'wt', encoding='utf-8').write(docs) print((' generated widget docs with %i pages and %i widgets' % (len(pages), len(class_names))))
def test_channels(audio, multichannel_format): if (audio.ndim == 1): with tmp.NamedTemporaryFile(delete=False, suffix=('.' + multichannel_format)) as tempfile: stempeg.write_audio(tempfile.name, audio, sample_rate=44100) (loaded_audio, rate) = stempeg.read_stems(tempfile.name) assert (audio.shape == loaded_audio.shape)
_test def test_local_run() -> None: runner = LocalRunner(module=MyLocalGraph(config=MySinkConfig(output_filename=LOCAL_OUTPUT_FILENAME))) runner.run() remaining_numbers = {str(i) for i in range(NUM_MESSAGES)} with open(LOCAL_OUTPUT_FILENAME, 'r') as output_file: lines = output_file.readlines() assert (len(lines) == NUM_MESSAGES) for line in lines: message = MyMessage2.fromdict(json.loads(line)) assert (message.str_field in remaining_numbers) remaining_numbers.remove(message.str_field) assert (len(remaining_numbers) == 0) os.remove(LOCAL_OUTPUT_FILENAME)
class Test_Fixup(): () def fixup(self, *, app): return Fixup(app) def test_fixup_env_enabled(self, *, app, fixup, monkeypatch): monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'settings') with patch_module('django'): assert fixup.enabled() assert any((isinstance(f, Fixup) for f in fixups(app))) def test_fixup_env_enabled_no_django(self, *, app, fixup, monkeypatch): monkeypatch.setenv('DJANGO_SETTINGS_MODULE', 'settings') with mask_module('django'): with pytest.warns(UserWarning): assert (not fixup.enabled()) assert (not any((isinstance(f, Fixup) for f in fixups(app)))) def test_fixup_env_disabled(self, *, app, fixup, monkeypatch): monkeypatch.delenv('DJANGO_SETTINGS_MODULE', raising=False) assert (not fixup.enabled()) assert (not any((isinstance(f, Fixup) for f in fixups(app)))) def test_wait_for_django_calls_django_setup(self, *, fixup, monkeypatch): with patch_module('django', 'django.conf', 'django.conf.settings'): import django from django.conf import settings settings.configure() settings.DEBUG = False fixup.wait_for_django() django.setup.assert_called_once_with() settings.DEBUG = True with pytest.warns(UserWarning): fixup.wait_for_django() def test_autodiscover_modules(self, *, fixup): with patch_module('django', 'django.conf', 'django.core', 'django.apps'): from django.apps import apps config1 = Mock() config1.name = 'config1' config2 = Mock() config2.name = 'config2' apps.get_app_configs.return_value = [config1, config2] assert (fixup.autodiscover_modules() == ['config1', 'config2']) def test_on_worker_init(self, *, fixup): with patch_module('django', 'django.conf', 'django.core', 'django.core.checks'): import django from django.conf import settings from django.core.checks import run_checks settings.DEBUG = False fixup.on_worker_init() django.setup.assert_called_once_with() run_checks.assert_called_once_with() settings.DEBUG = True with pytest.warns(UserWarning): fixup.on_worker_init() def test_apps(self, *, fixup): with patch_module('django', 'django.apps'): from django.apps import apps assert (fixup.apps is apps) def test_settings(self, *, fixup): with patch_module('django', 'django.conf'): from django.conf import settings assert (fixup.settings is settings)
class Etherscan(): def __init__(self, api_key: str) -> None: if (not api_key): raise ValueError('Must provide an API key for Etherscan API access') self.api_key = api_key def get_proxy_api_url(self, network: Network) -> str: return f'{API_URLS[network]}?module=proxy&apikey={self.api_key}' def post(self, action: str, network: Network) -> Any: response = requests.post(f'{self.get_proxy_api_url(network)}&action={action}', headers=COMMON_REQUEST_HEADERS) if (response.status_code not in [200, 201]): raise EtherscanAPIError(f'Invalid status code: {response.status_code}, {response.reason}') try: value = response.json() except ValueError as err: raise EtherscanAPIError(f'Invalid response: {response.text}') from err message = value.get('message', '') result = value['result'] api_error = ((message == 'NOTOK') or (result == 'Error!')) if api_error: raise EtherscanAPIError(f'API error: {message}, result: {result}') return value['result'] def get_latest_block(self, network: Network) -> int: response = self.post('eth_blockNumber', network) return to_int(hexstr=response) def get_block_by_number(self, block_number: int, network: Network) -> Dict[(str, Any)]: num = to_hex(primitive=block_number) return self.post(f'eth_getBlockByNumber&tag={num}&boolean=false', network)
class experimenter_stats_reply(stats_reply): subtypes = {} version = 2 type = 19 stats_type = 65535 def __init__(self, xid=None, flags=None, experimenter=None, data=None): if (xid != None): self.xid = xid else: self.xid = None if (flags != None): self.flags = flags else: self.flags = 0 if (experimenter != None): self.experimenter = experimenter else: self.experimenter = 0 if (data != None): self.data = data else: self.data = '' return def pack(self): packed = [] packed.append(struct.pack('!B', self.version)) packed.append(struct.pack('!B', self.type)) packed.append(struct.pack('!H', 0)) packed.append(struct.pack('!L', self.xid)) packed.append(struct.pack('!H', self.stats_type)) packed.append(struct.pack('!H', self.flags)) packed.append(('\x00' * 4)) packed.append(struct.pack('!L', self.experimenter)) packed.append(('\x00' * 4)) packed.append(self.data) length = sum([len(x) for x in packed]) packed[2] = struct.pack('!H', length) return ''.join(packed) def unpack(reader): (subtype,) = reader.peek('!L', 16) subclass = experimenter_stats_reply.subtypes.get(subtype) if subclass: return subclass.unpack(reader) obj = experimenter_stats_reply() _version = reader.read('!B')[0] assert (_version == 2) _type = reader.read('!B')[0] assert (_type == 19) _length = reader.read('!H')[0] orig_reader = reader reader = orig_reader.slice(_length, 4) obj.xid = reader.read('!L')[0] _stats_type = reader.read('!H')[0] assert (_stats_type == 65535) obj.flags = reader.read('!H')[0] reader.skip(4) obj.experimenter = reader.read('!L')[0] reader.skip(4) obj.data = str(reader.read_all()) return obj def __eq__(self, other): if (type(self) != type(other)): return False if (self.xid != other.xid): return False if (self.flags != other.flags): return False if (self.experimenter != other.experimenter): return False if (self.data != other.data): return False return True def pretty_print(self, q): q.text('experimenter_stats_reply {') with q.group(): with q.indent(2): q.breakable() q.text('xid = ') if (self.xid != None): q.text(('%#x' % self.xid)) else: q.text('None') q.text(',') q.breakable() q.text('flags = ') value_name_map = {1: 'OFPSF_REPLY_MORE'} q.text(util.pretty_flags(self.flags, value_name_map.values())) q.text(',') q.breakable() q.text('data = ') q.pp(self.data) q.breakable() q.text('}')
class Scatter(GraphPlotly.Chart): requirements = ('plotly.js',) __reqJs = ['plotly.js'] def chart(self) -> JsPlotly.Pie: if (self._chart is None): self._chart = JsPlotly.Pie(page=self.page, js_code=self.js_code, component=self) return self._chart def layout(self) -> LayoutGeo: if (self._layout is None): self._layout = LayoutGeo(page=self.page, component=self) return self._layout def data(self): return self._traces[(- 1)] def add_trace(self, data, type='scattermapbox', mode=None): c_data = dict(data) if (type is not None): c_data['type'] = type if (mode is not None): c_data['mode'] = mode self._traces.append(DataScatterMapBox(page=self.page, component=self, attrs=c_data)) return self
.xfail(reason='modification to initial allocation made the block fixture invalid') def test_import_block_validation(valid_chain, funded_address, funded_address_initial_balance): block = rlp.decode(valid_block_rlp, sedes=FrontierBlock) (imported_block, _, _) = valid_chain.import_block(block) assert (len(imported_block.transactions) == 1) tx = imported_block.transactions[0] assert (tx.value == 10) vm = valid_chain.get_vm() state = vm.state assert (state.get_balance(decode_hex('095e7baea6a6c7c4c2dfeb977efac326af552d87')) == tx.value) tx_gas = (tx.gas_price * constants.GAS_TX) assert (state.get_balance(funded_address) == ((funded_address_initial_balance - tx.value) - tx_gas))
def ensure_no_static(opcode_fn: Callable[(..., Any)]) -> Callable[(..., Any)]: (opcode_fn) def inner(computation: ComputationAPI) -> Callable[(..., Any)]: if computation.msg.is_static: raise WriteProtection('Cannot modify state while inside of a STATICCALL context') return opcode_fn(computation) return inner
def extractSubudai11(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None if ('Mai Kitsune Waifu Chapter' in item['title']): return buildReleaseMessageWithType(item, 'My Fox Immortal Wife', vol, chp, frag=frag, postfix=postfix) if ('My Beautiful Teacher Chapter' in item['title']): return buildReleaseMessageWithType(item, 'My Beautiful Teacher', vol, chp, frag=frag, postfix=postfix) if ('Awakening ' in item['title']): return buildReleaseMessageWithType(item, 'Awakening ', vol, chp, frag=frag, postfix=postfix) if ('Awakening' in item['title']): return buildReleaseMessageWithType(item, 'Awakening ', vol, chp, frag=frag, postfix=postfix) return False
class OptionPlotoptionsPyramidSonificationTracksMappingLowpass(Options): def frequency(self) -> 'OptionPlotoptionsPyramidSonificationTracksMappingLowpassFrequency': return self._config_sub_data('frequency', OptionPlotoptionsPyramidSonificationTracksMappingLowpassFrequency) def resonance(self) -> 'OptionPlotoptionsPyramidSonificationTracksMappingLowpassResonance': return self._config_sub_data('resonance', OptionPlotoptionsPyramidSonificationTracksMappingLowpassResonance)
def get_twitter_client(): app_key = config['twitter']['api_key'] app_secret = config['twitter']['api_secret'] oauth_token = config['twitter']['oauth_token'] oauth_token_secret = config['twitter']['oauth_secret'] return tweepy.Client(consumer_key=app_key, consumer_secret=app_secret, access_token=oauth_token, access_token_secret=oauth_token_secret)
class LoggingGcsCommon(ModelNormal): allowed_values = {} validations = {} _property def additional_properties_type(): return (bool, date, datetime, dict, float, int, list, str, none_type) _nullable = False _property def openapi_types(): return {'user': (str,), 'secret_key': (str,), 'account_name': (str,)} _property def discriminator(): return None attribute_map = {'user': 'user', 'secret_key': 'secret_key', 'account_name': 'account_name'} read_only_vars = {} _composed_schemas = {} _js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes']) _js_args_to_python_args def __init__(self, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
class OptionPlotoptionsItemSonificationContexttracksMappingPan(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def run_distributed(scheduler=None): init_logging(THIS_DIR, scheduler) atoms = ('H', 'H') geoms = list() for i in range(7): bond_length = (0.8 + (i * 0.2)) print(f'{i}: {bond_length:.02f}') coords = np.array((0.0, 0.0, 0.0, 0.0, 0.0, bond_length)) geom = Geometry(atoms, coords) td_kwargs = {'keywords': 'BP86 def2-TZVP', 'charge': 0, 'mult': 1, 'calc_number': i, 'blocks': '%tddft nroots 2 iroot 1 end', 'out_dir': THIS_DIR} kwargs = {'keywords': 'BP86 def2-SV(P)', 'charge': 0, 'mult': 1, 'calc_number': i, 'out_dir': THIS_DIR} orca = ORCA(**td_kwargs) geom.set_calculator(orca) geoms.append(geom) neb_kwargs = {'dask_cluster': scheduler} neb = NEB(geoms, **neb_kwargs) forces = neb.forces for f in forces.reshape((- 1), 6): print(f, f'{np.linalg.norm(f):.2f}') for geom in neb.images: print(geom.calculator.wfow)
class OptionSeriesScatterSonificationTracksMappingGapbetweennotes(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def test_integration_surfaces(): surfaces = td.FieldProjectionAngleMonitor(size=(2, 0, 2), theta=[1, 2], phi=[0], name='f', freqs=[.0]).integration_surfaces assert (len(surfaces) == 1) assert (surfaces[0].normal_dir == '+') surfaces = td.FieldProjectionAngleMonitor(size=(2, 0, 2), theta=[1, 2], phi=[0], name='f', freqs=[.0], normal_dir='-').integration_surfaces assert (len(surfaces) == 1) assert (surfaces[0].normal_dir == '-') surfaces = td.FieldProjectionAngleMonitor(size=(2, 2, 2), theta=[1, 2], phi=[0], name='f', freqs=[.0]).integration_surfaces assert (len(surfaces) == 6) for (idx, surface) in enumerate(surfaces): if (np.mod(idx, 2) == 0): assert (surface.normal_dir == '-') assert (surface.name[(- 1)] == '-') else: assert (surface.normal_dir == '+') assert (surface.name[(- 1)] == '+') surfaces = td.FieldProjectionAngleMonitor(size=(2, 2, 2), theta=[1], phi=[0], name='f', freqs=[.0], exclude_surfaces=['x-', 'y+']).integration_surfaces assert (len(surfaces) == 4) expected_surfs = ['x+', 'y-', 'z-', 'z+'] for (idx, surface) in enumerate(surfaces): assert (surface.normal_dir == expected_surfs[idx][(- 1)]) assert (surface.name[(- 2):] == expected_surfs[idx]) surfaces = td.FieldProjectionAngleMonitor(size=(td.inf, 2, 2), theta=[1], phi=[0], name='f', freqs=[.0]).integration_surfaces assert (len(surfaces) == 4) expected_surfs = ['y-', 'y+', 'z-', 'z+'] for (idx, surface) in enumerate(surfaces): assert (surface.normal_dir == expected_surfs[idx][(- 1)]) assert (surface.name[(- 2):] == expected_surfs[idx]) surfaces = td.FieldProjectionAngleMonitor(size=(td.inf, td.inf, td.inf), theta=[1], phi=[0], name='f', freqs=[.0]).integration_surfaces assert (len(surfaces) == 0)
def test_replay_decider_respond_query(): service: WorkflowService = Mock() service.respond_query_task_completed = Mock(return_value=(None, None)) decision_task_loop = DecisionTaskLoop(worker=Mock(), service=service) decision_task_loop.respond_query(task_token=b'the-task-token', result=b'the-result', error_message=None) service.respond_query_task_completed.assert_called_once() (args, kwargs) = service.respond_query_task_completed.call_args_list[0] request = args[0] assert isinstance(request, RespondQueryTaskCompletedRequest) assert (request.task_token == b'the-task-token') assert (request.query_result == b'the-result')
class DeleteMixinTest(QuickbooksUnitTestCase): def test_delete_unsaved_exception(self): from quickbooks.exceptions import QuickbooksException bill = Bill() self.assertRaises(QuickbooksException, bill.delete, qb=self.qb_client) ('quickbooks.mixins.QuickBooks.delete_object') def test_delete(self, delete_object): bill = Bill() bill.Id = 1 bill.delete(qb=self.qb_client) self.assertTrue(delete_object.called)
def test_override_ledger_configurations_negative(): agent_config = MagicMock() agent_config.component_configurations = {} expected_configurations = deepcopy(LedgerApis.ledger_api_configs) _override_ledger_configurations(agent_config) actual_configurations = LedgerApis.ledger_api_configs assert (expected_configurations == actual_configurations)
class ContourModuleFactory(DataModuleFactory): contours = Any(5, help='Integer/list specifying number/list of\n contours. Specifying a list of values will only\n give the requested contours asked for.') def _contours_changed(self): contour_list = True try: len(self.contours) except TypeError: contour_list = False if contour_list: self._target.contour.auto_contours = False self._target.contour.contours = self.contours else: assert (type(self.contours) == int), 'The contours argument must be an integer' assert (self.contours > 0), 'The contours argument must be positive' self._target.contour.trait_set(auto_contours=True, number_of_contours=self.contours) if hasattr(self._target, 'enable_contours'): self._target.enable_contours = True
class CleanAfterTestCase(TestCase): ('aea.cli.utils.decorators.os.path.exists', return_value=True) ('aea.cli.utils.decorators._cast_ctx', (lambda x: x)) ('aea.cli.utils.decorators.shutil.rmtree') def test_clean_after_positive(self, rmtree_mock, *mocks): _after def func(click_context): ctx = cast(Context, click_context.obj) ctx.clean_paths.append('clean/path') raise ClickException('Message') with self.assertRaises(ClickException): func(ContextMock()) rmtree_mock.assert_called_once_with('clean/path')
def predictRecallMonteCarlo(prior, tnow, N=(1000 * 1000)): import scipy.stats as stats (alpha, beta, t) = prior tPrior = stats.beta.rvs(alpha, beta, size=N) tnowPrior = (tPrior ** (tnow / t)) (freqs, bins) = np.histogram(tnowPrior, 'auto') bincenters = (bins[:(- 1)] + (np.diff(bins) / 2)) return dict(mean=np.mean(tnowPrior), median=np.median(tnowPrior), mode=bincenters[freqs.argmax()], var=np.var(tnowPrior))
def main(args=None): configLogger(logger=log) parser = argparse.ArgumentParser() parser.add_argument('input', nargs='+', metavar='INPUT') parser.add_argument('-o', '--output') parser.add_argument('-e', '--max-error', type=float, default=MAX_ERR) parser.add_argument('--post-format', type=float, default=POST_FORMAT) parser.add_argument('--keep-direction', dest='reverse_direction', action='store_false') parser.add_argument('--face-index', type=int, default=0) parser.add_argument('--overwrite', action='store_true') options = parser.parse_args(args) if (options.output and (len(options.input) > 1)): if (not os.path.isdir(options.output)): parser.error('-o/--output option must be a directory when processing multiple fonts') for path in options.input: if (options.output and (not os.path.isdir(options.output))): output = options.output else: output = makeOutputFileName(path, outputDir=options.output, extension='.ttf', overWrite=options.overwrite) font = TTFont(path, fontNumber=options.face_index) otf_to_ttf(font, post_format=options.post_format, max_err=options.max_error, reverse_direction=options.reverse_direction) font.save(output)
def pull_reddit(args, op): print('') print('# Pull from Reddit') print('') print(f'environment: {os.environ}') def run(): pulling_count = os.getenv('REDDIT_PULLING_COUNT', 25) return op.pull(pulling_count=pulling_count, pulling_interval=0, data_folder=args.data_folder, run_id=args.run_id) return (utils.prun(run) or {})
def test_ner_silver_to_gold(dataset, spacy_model): silver_dataset = '__test_ner_silver_to_gold__' silver_examples = [{INPUT_HASH_ATTR: 1, TASK_HASH_ATTR: 11, 'text': 'Hello world', 'answer': 'accept', 'spans': [{'start': 0, 'end': 5, 'label': 'PERSON'}]}, {INPUT_HASH_ATTR: 1, TASK_HASH_ATTR: 12, 'text': 'Hello world', 'answer': 'reject', 'spans': [{'start': 6, 'end': 11, 'label': 'PERSON'}]}, {INPUT_HASH_ATTR: 2, TASK_HASH_ATTR: 21, 'text': 'This is a test', 'answer': 'reject', 'spans': [{'start': 5, 'end': 7, 'label': 'ORG'}]}] with tmp_dataset(silver_dataset, silver_examples): recipe = ner_silver_to_gold(silver_dataset, dataset, spacy_model) stream = list(recipe['stream']) assert (recipe['view_id'] == 'ner_manual') assert (recipe['dataset'] == dataset) assert (len(stream) == 2) assert (stream[0]['text'] == 'Hello world') assert ('tokens' in stream[0]) assert (stream[1]['text'] == 'This is a test') assert ('tokens' in stream[1])
_tag(takes_context=True) def search_results(context, search_results): context.push() try: context.update({'search_results': search_results, 'query': context['query']}) return template.loader.render_to_string('watson/includes/search_results.html', context.flatten()) finally: context.pop()
class GammaNormalModel(): def __init__(self, shape: Tensor, rate: Tensor, mu: Tensor) -> None: self.shape_ = shape self.rate_ = rate self.mu_ = mu _variable def gamma(self) -> dist.Distribution: return dist.Gamma(self.shape_, self.rate_) _variable def normal(self) -> dist.Distribution: return dist.Normal(self.mu_, (1 / torch.sqrt(self.gamma())))
def add_version_to_css(app: Sphinx, pagename, templatename, context, doctree): if (app.builder.name != 'html'): return if ('_static/local.css' in context.get('css_files', {})): css = Path(app.srcdir, '_static/local.css').read_text('utf8') hashed = hashlib.sha256(css.encode('utf-8')).hexdigest() index = context['css_files'].index('_static/local.css') context['css_files'][index] = f'_static/local.css?hash={hashed}'
_required(login_url='/login') def ArticleMe(request): article = Article.objects.filter(authors__follow__fan_id=request.user.id, is_show=True) category = Category_Article.objects.all() type = request.GET.get('type', '') try: page = request.GET.get('page', 1) if type: article = Article.objects.filter(authors__follow__fan_id=request.user.id, category_id=type, is_show=True) if (page == ''): page = 1 except PageNotAnInteger: page = 1 p = Paginator(article, 10, request=request) people = p.page(page) headlines = Headlines.objects.all()[:20] banners = Banners.objects.first() return render(request, 'pc/article_me.html', {'article': people, 'category': category, 'Headlines': headlines, 'banners': banners})
def _weights_init(m: nn.Module) -> None: classname = m.__class__.__name__ if (classname.find('Conv') != (- 1)): torch.nn.init.normal_(m.weight, 0.0, 0.02) elif (classname.find('BatchNorm') != (- 1)): torch.nn.init.normal_(m.weight, 1.0, 0.02) torch.nn.init.zeros_(m.bias)
class BaseTestMissingValuesRenderer(TestRenderer): MISSING_VALUES_NAMING_MAPPING = {None: 'Pandas nulls (None, NAN, etc.)', '': '"" (empty string)', np.inf: 'Numpy "inf" value', (- np.inf): 'Numpy "-inf" value'} def _get_number_and_percents_of_missing_values(missing_values_info: DatasetMissingValues) -> pd.DataFrame: result = {} for columns_name in missing_values_info.number_of_missing_values_by_column: missing_values_count = missing_values_info.number_of_missing_values_by_column[columns_name] percent_count = (missing_values_info.share_of_missing_values_by_column[columns_name] * 100) result[columns_name] = f'{missing_values_count} ({percent_count:.2f}%)' return pd.DataFrame.from_dict({name: dict(value=missing_values_info.number_of_missing_values_by_column[name], display=f'{missing_values_info.number_of_missing_values_by_column[name]} ({(missing_values_info.share_of_missing_values_by_column[name] * 100):.2f}%)') for name in missing_values_info.number_of_missing_values_by_column.keys()}, orient='index', columns=['value', 'display']) def get_table_with_missing_values_and_percents_by_column(self, info: TestHtmlInfo, metric_result: DatasetMissingValuesMetricResult, name: str) -> TestHtmlInfo: columns = ['column name', 'current number of missing values'] dict_curr = self._get_number_and_percents_of_missing_values(metric_result.current) dict_ref = None reference_stats = metric_result.reference if (reference_stats is not None): columns.append('reference number of missing values') dict_ref = self._get_number_and_percents_of_missing_values(reference_stats) additional_plots = dataframes_to_table(dict_curr, dict_ref, columns, name) info.details = additional_plots return info def _replace_missing_values_to_description(self, values: dict) -> dict: return {self.MISSING_VALUES_NAMING_MAPPING.get(k, k): v for (k, v) in values.items()} def get_table_with_number_of_missing_values_by_one_missing_value(self, info: TestHtmlInfo, current_missing_values: dict, reference_missing_values: Optional[dict], name: str) -> TestHtmlInfo: columns = ['missing value', 'current number of missing values'] dict_curr = self._replace_missing_values_to_description(current_missing_values) dict_ref: Optional[dict] = None if (reference_missing_values is not None): columns.append('reference number of missing values') dict_ref = self._replace_missing_values_to_description(reference_missing_values) additional_plots = plot_dicts_to_table(dict_curr, dict_ref, columns, name) info.details = additional_plots return info
def load(libmagick): libmagick.AcquireExceptionInfo.argtypes = [] libmagick.AcquireExceptionInfo.restype = c_void_p libmagick.AcquireImageInfo.argtypes = [] libmagick.AcquireImageInfo.restype = c_void_p libmagick.CloneImageInfo.argtypes = [c_void_p] libmagick.CloneImageInfo.restype = c_void_p libmagick.CloneImages.argtypes = [c_void_p, c_char_p, c_void_p] libmagick.CloneImages.restype = c_void_p libmagick.DestroyExceptionInfo.argtypes = [c_void_p] libmagick.DestroyExceptionInfo.restype = c_void_p libmagick.DestroyImage.argtypes = [c_void_p] libmagick.DestroyImage.restype = c_void_p libmagick.DestroyImageInfo.argtypes = [c_void_p] libmagick.DestroyImageInfo.restype = c_void_p libmagick.DestroyString.argtypes = [c_void_p] libmagick.DestroyString.restype = c_void_p try: libmagick.GetGeometry.argtypes = [c_char_p, POINTER(c_ssize_t), POINTER(c_ssize_t), POINTER(c_size_t), POINTER(c_size_t)] libmagick.GetGeometry.restype = c_int except AttributeError: libmagick.GetGeometry = None libmagick.GetMagickCopyright.argtypes = [] libmagick.GetMagickCopyright.restype = c_char_p try: libmagick.GetMagickDelegates.argtypes = [] libmagick.GetMagickDelegates.restype = c_char_p except AttributeError: libmagick.GetMagickDelegates = None libmagick.GetMagickFeatures.argtypes = [] libmagick.GetMagickFeatures.restype = c_char_p try: libmagick.GetMagickLicense.argtypes = [] libmagick.GetMagickLicense.restype = c_char_p except AttributeError: pass libmagick.GetMagickPackageName.argtypes = [] libmagick.GetMagickPackageName.restype = c_char_p libmagick.GetMagickQuantumDepth.argtypes = [POINTER(c_size_t)] libmagick.GetMagickQuantumDepth.restype = c_char_p libmagick.GetMagickQuantumRange.argtypes = [POINTER(c_size_t)] libmagick.GetMagickQuantumRange.restype = c_char_p libmagick.GetMagickReleaseDate.argtypes = [] libmagick.GetMagickReleaseDate.restype = c_char_p libmagick.GetMagickResource.argtypes = [c_int] libmagick.GetMagickResource.restype = c_ulonglong libmagick.GetMagickResourceLimit.argtypes = [c_int] libmagick.GetMagickResourceLimit.restype = c_ulonglong libmagick.GetMagickVersion.argtypes = [POINTER(c_size_t)] libmagick.GetMagickVersion.restype = c_char_p try: libmagick.GetPageGeometry.argtypes = [c_char_p] libmagick.GetPageGeometry.restype = c_void_p except AttributeError: libmagick.GetPageGeometry = None libmagick.GetNextImageInList.argtypes = [c_void_p] libmagick.GetNextImageInList.restype = c_void_p libmagick.MagickToMime.argtypes = [c_char_p] libmagick.MagickToMime.restype = c_void_p try: libmagick.ParseAbsoluteGeometry.argtypes = [c_char_p, c_void_p] libmagick.ParseAbsoluteGeometry.restype = c_int except AttributeError: libmagick.ParseAbsoluteGeometry = None try: libmagick.ParseChannelOption.argtypes = [c_char_p] libmagick.ParseChannelOption.restypes = c_ssize_t except AttributeError: libmagick.ParseChannelOption = None try: libmagick.ParseGeometry.argtypes = [c_char_p, c_void_p] libmagick.ParseGeometry.restype = c_int libmagick.ParseMetaGeometry.argtypes = [c_char_p, POINTER(c_ssize_t), POINTER(c_ssize_t), POINTER(c_size_t), POINTER(c_size_t)] libmagick.ParseMetaGeometry.restype = c_int except AttributeError: libmagick.ParseGeometry = None libmagick.ParseMetaGeometry = None libmagick.SetImageOption.argtypes = [c_void_p, c_char_p, c_char_p] libmagick.SetImageOption.restype = c_bool libmagick.SetMagickResourceLimit.argtypes = [c_int, c_ulonglong] libmagick.SetMagickResourceLimit.restype = c_int libmagick.SyncImageSettings.argtypes = [c_void_p, c_void_p, c_void_p] libmagick.SyncImageSettings.restype = c_bool
def extractTodstlBlogspotCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
def validate_header(header: Header, parent_header: Header) -> None: parent_has_ommers = (parent_header.ommers_hash != EMPTY_OMMER_HASH) ensure((header.timestamp > parent_header.timestamp), InvalidBlock) ensure((header.number == (parent_header.number + 1)), InvalidBlock) ensure(check_gas_limit(header.gas_limit, parent_header.gas_limit), InvalidBlock) ensure((len(header.extra_data) <= 32), InvalidBlock) block_difficulty = calculate_block_difficulty(header.number, header.timestamp, parent_header.timestamp, parent_header.difficulty, parent_has_ommers) ensure((header.difficulty == block_difficulty), InvalidBlock) block_parent_hash = keccak256(rlp.encode(parent_header)) ensure((header.parent_hash == block_parent_hash), InvalidBlock) validate_proof_of_work(header)
def url_unparse(components): (scheme, netloc, path, query, fragment) = components url = '' if (netloc or (scheme and path.startswith('/'))): if (path and (path[:1] != '/')): path = ('/' + path) url = (('//' + (netloc or '')) + path) elif path: url += path if scheme: url = ((scheme + ':') + url) if query: url = ((url + '?') + query) if fragment: url = ((url + '#') + fragment) return url
class SubcomponentList(PaginationMixin, AgencyBase): endpoint_doc = 'usaspending_api/api_contracts/contracts/v2/agency/toptier_code/sub_components.md' def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.params_to_validate = ['toptier_code', 'fiscal_year'] _response() def get(self, request: Request, *args: Any, **kwargs: Any) -> Response: self.sortable_columns = ['name', 'total_obligations', 'total_outlays', 'total_budgetary_resources'] self.default_sort_column = 'total_obligations' results = self.format_results(self.get_file_a_queryset(), self.get_file_b_queryset()) page_metadata = get_pagination_metadata(len(results), self.pagination.limit, self.pagination.page) return Response({'toptier_code': self.toptier_code, 'fiscal_year': self.fiscal_year, 'results': results[self.pagination.lower_limit:self.pagination.upper_limit], 'messages': self.standard_response_messages, 'page_metadata': page_metadata}) def format_results(self, file_a_response, file_b_response): combined_list_dict = {} for row in file_a_response: combined_list_dict[row['bureau_info']] = row for row in file_b_response: if (row['bureau_info'] not in combined_list_dict): combined_list_dict[row['bureau_info']] = row else: combined_list_dict[row['bureau_info']].update(row) combined_response = [value for (key, value) in combined_list_dict.items()] results = sort_with_null_last(to_sort=[{'name': (x['bureau_info'].split(';')[0] if (x.get('bureau_info') is not None) else None), 'id': (x['bureau_info'].split(';')[1] if (x.get('bureau_info') is not None) else None), 'total_obligations': (x['total_obligations'] if x['total_obligations'] else None), 'total_outlays': (x['total_outlays'] if x['total_outlays'] else None), 'total_budgetary_resources': (x['total_budgetary_resources'] if x['total_budgetary_resources'] else None)} for x in combined_response], sort_key=self.pagination.sort_key, sort_order=self.pagination.sort_order) return results def get_file_a_queryset(self): (filters, bureau_info_subquery) = self.get_common_query_objects('treasury_account_identifier') results = AppropriationAccountBalances.objects.filter(*filters).annotate(bureau_info=bureau_info_subquery).values('bureau_info').annotate(total_budgetary_resources=Sum('total_budgetary_resources_amount_cpe')).exclude(bureau_info__isnull=True).values('bureau_info', 'total_budgetary_resources') return results def get_file_b_queryset(self): (filters, bureau_info_subquery) = self.get_common_query_objects('treasury_account') results = FinancialAccountsByProgramActivityObjectClass.objects.filter(*filters).annotate(bureau_info=bureau_info_subquery).values('bureau_info').annotate(total_obligations=Sum('obligations_incurred_by_program_object_class_cpe'), total_outlays=Sum('gross_outlay_amount_by_program_object_class_cpe')).exclude(bureau_info__isnull=True).values('bureau_info', 'total_obligations', 'total_outlays') return results def get_common_query_objects(self, treasury_account_keyword): latest = SubmissionAttributes.objects.filter(submission_window__submission_reveal_date__lte=now(), reporting_fiscal_year=self.fiscal_year).values('reporting_fiscal_year').annotate(max_fiscal_period=Max(F('reporting_fiscal_period'))).values('max_fiscal_period') filters = [Q(**{f'{treasury_account_keyword}__federal_account__parent_toptier_agency': self.toptier_agency}), Q(submission__reporting_fiscal_year=self.fiscal_year), Q(submission__reporting_fiscal_period=latest[0]['max_fiscal_period'])] bureau_info_subquery = Subquery(BureauTitleLookup.objects.filter(federal_account_code=OuterRef(f'{treasury_account_keyword}__federal_account__federal_account_code')).exclude(federal_account_code__isnull=True).annotate(bureau_info=Case(When(federal_account_code__startswith='057', then=ConcatAll(Value('Air Force'), Value(';'), Value('air-force'))), When(federal_account_code__startswith='021', then=ConcatAll(Value('Army'), Value(';'), Value('army'))), When(federal_account_code__startswith='017', then=ConcatAll(Value('Navy, Marine Corps'), Value(';'), Value('navy-marine-corps'))), When(federal_account_code__startswith='097', then=ConcatAll(Value('Defense-wide'), Value(';'), Value('defense-wide'))), default=ConcatAll(F('bureau_title'), Value(';'), F('bureau_slug')), output_field=TextField())).values('bureau_info')) return (filters, bureau_info_subquery)
class OptionSeriesHeatmapSonificationTracksMappingNoteduration(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
_os(*metadata.platforms) def main(): svchost = 'C:\\Users\\Public\\svchost.exe' powershell = 'C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe' common.copy_file(EXE_FILE, svchost) common.execute([svchost, '/c', 'echo', 'schedule', f';{powershell}', 'C:\\Users\\A'], timeout=5, kill=True) common.remove_files(svchost)
def _add_tests(): import os import os.path import functools this_dir = os.path.dirname(sys.modules[__name__].__file__) packet_data_dir = os.path.join(this_dir, '../../packet_data') json_dir = os.path.join(this_dir, 'json') ofvers = ['of10', 'of12', 'of13', 'of14', 'of15'] cases = set() for ver in ofvers: pdir = ((packet_data_dir + '/') + ver) jdir = ((json_dir + '/') + ver) n_added = 0 for file in os.listdir(pdir): if file.endswith('.packet'): truncated = None elif ('.truncated' in file): (s1, s2) = file.split('.truncated') try: truncated = int(s2) except ValueError: continue file = (s1 + '.packet') else: continue wire_msg = open(((pdir + '/') + file), 'rb').read() if (not truncated): json_str = open((((jdir + '/') + file) + '.json'), 'r').read() else: json_str = open((((jdir + '/') + file) + ('.truncated%d.json' % truncated)), 'r').read() wire_msg = wire_msg[:truncated] method_name = ('test_' + file).replace('-', '_').replace('.', '_') if truncated: method_name += ('_truncated%d' % truncated) def _run(self, name, wire_msg, json_str): print(('processing %s ...' % name)) if six.PY3: self._test_msg(self, name, wire_msg, json_str) else: self._test_msg(name, wire_msg, json_str) print(('adding %s ...' % method_name)) f = functools.partial(_run, name=method_name, wire_msg=wire_msg, json_str=json_str) test_lib.add_method(Test_Parser, method_name, f) cases.add(method_name) n_added += 1 assert (n_added > 0) assert (cases == set(unittest.defaultTestLoader.getTestCaseNames(Test_Parser)))
class queue_op_failed_error_msg(error_msg): version = 4 type = 1 err_type = 9 def __init__(self, xid=None, code=None, data=None): if (xid != None): self.xid = xid else: self.xid = None if (code != None): self.code = code else: self.code = 0 if (data != None): self.data = data else: self.data = '' return def pack(self): packed = [] packed.append(struct.pack('!B', self.version)) packed.append(struct.pack('!B', self.type)) packed.append(struct.pack('!H', 0)) packed.append(struct.pack('!L', self.xid)) packed.append(struct.pack('!H', self.err_type)) packed.append(struct.pack('!H', self.code)) packed.append(self.data) length = sum([len(x) for x in packed]) packed[2] = struct.pack('!H', length) return ''.join(packed) def unpack(reader): obj = queue_op_failed_error_msg() _version = reader.read('!B')[0] assert (_version == 4) _type = reader.read('!B')[0] assert (_type == 1) _length = reader.read('!H')[0] orig_reader = reader reader = orig_reader.slice(_length, 4) obj.xid = reader.read('!L')[0] _err_type = reader.read('!H')[0] assert (_err_type == 9) obj.code = reader.read('!H')[0] obj.data = str(reader.read_all()) return obj def __eq__(self, other): if (type(self) != type(other)): return False if (self.xid != other.xid): return False if (self.code != other.code): return False if (self.data != other.data): return False return True def pretty_print(self, q): q.text('queue_op_failed_error_msg {') with q.group(): with q.indent(2): q.breakable() q.text('xid = ') if (self.xid != None): q.text(('%#x' % self.xid)) else: q.text('None') q.text(',') q.breakable() q.text('code = ') value_name_map = {0: 'OFPQOFC_BAD_PORT', 1: 'OFPQOFC_BAD_QUEUE', 2: 'OFPQOFC_EPERM'} if (self.code in value_name_map): q.text(('%s(%d)' % (value_name_map[self.code], self.code))) else: q.text(('%#x' % self.code)) q.text(',') q.breakable() q.text('data = ') q.pp(self.data) q.breakable() q.text('}')
class TestDeleteIlmPolicyRunner(): params = {'policy-name': 'my-ilm-policy', 'request-params': {'master_timeout': '30s', 'timeout': '30s'}} ('elasticsearch.Elasticsearch') .asyncio async def test_delete_ilm_policy_with_request_params(self, es): es.ilm.delete_lifecycle = mock.AsyncMock(return_value={}) delete_ilm_policy = runner.DeleteIlmPolicy() result = (await delete_ilm_policy(es, params=self.params)) assert (result == {'weight': 1, 'unit': 'ops', 'success': True}) es.ilm.delete_lifecycle.assert_awaited_once_with(name=self.params['policy-name'], master_timeout=self.params['request-params'].get('master_timeout'), timeout=self.params['request-params'].get('timeout'), error_trace=None, filter_path=None, ignore=[404]) ('elasticsearch.Elasticsearch') .asyncio async def test_delete_ilm_policy_without_request_params(self, es): es.ilm.delete_lifecycle = mock.AsyncMock(return_value={}) delete_ilm_policy = runner.DeleteIlmPolicy() params = copy.deepcopy(self.params) del params['request-params'] result = (await delete_ilm_policy(es, params=params)) assert (result == {'weight': 1, 'unit': 'ops', 'success': True}) es.ilm.delete_lifecycle.assert_awaited_once_with(name=self.params['policy-name'], master_timeout=None, timeout=None, error_trace=None, filter_path=None, ignore=[404])
class _RichComparison(Matcher): def __init__(self, klass: type, lt: Optional[AnyType]=None, le: Optional[AnyType]=None, eq: Optional[AnyType]=None, ne: Optional[AnyType]=None, ge: Optional[AnyType]=None, gt: Optional[AnyType]=None) -> None: self.klass = klass self.lt = lt self.le = le self.eq = eq self.ne = ne self.ge = ge self.gt = gt def __eq__(self, other: AnyType) -> bool: if (not isinstance(other, self.klass)): return False if ((self.lt is not None) and (not (other < self.lt))): return False if ((self.le is not None) and (not (other <= self.le))): return False if ((self.eq is not None) and (not (other == self.eq))): return False if ((self.ne is not None) and (not (other != self.ne))): return False if ((self.ge is not None) and (not (other >= self.ge))): return False if ((self.gt is not None) and (not (other > self.gt))): return False return True def __repr__(self) -> str: return '<{} 0x{:02X}{}{}{}{}{}{}>'.format(type(self).__name__, id(self), (f' lt={self.lt}' if (self.lt is not None) else ''), (f' le={self.le}' if (self.le is not None) else ''), (f' eq={self.eq}' if (self.eq is not None) else ''), (f' ne={self.ne}' if (self.ne is not None) else ''), (f' ge={self.ge}' if (self.ge is not None) else ''), (f' gt={self.gt}' if (self.gt is not None) else ''))
def load_mnemonic_arguments_decorator(function: Callable[(..., Any)]) -> Callable[(..., Any)]: decorators = [jit_option(callback=validate_mnemonic, help=(lambda : load_text(['arg_mnemonic', 'help'], func='existing_mnemonic')), param_decls='--mnemonic', prompt=(lambda : load_text(['arg_mnemonic', 'prompt'], func='existing_mnemonic')), type=str), jit_option(callback=captive_prompt_callback((lambda x: x), (lambda : load_text(['arg_mnemonic_password', 'prompt'], func='existing_mnemonic')), (lambda : load_text(['arg_mnemonic_password', 'confirm'], func='existing_mnemonic')), (lambda : load_text(['arg_mnemonic_password', 'mismatch'], func='existing_mnemonic')), True), default='', help=(lambda : load_text(['arg_mnemonic_password', 'help'], func='existing_mnemonic')), hidden=True, param_decls='--mnemonic-password', prompt=False)] for decorator in reversed(decorators): function = decorator(function) return function
class OptionSeriesPieSonificationDefaultinstrumentoptionsMappingPlaydelay(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class Event(object): def __init__(self) -> None: self._event_handler = [] def subscribe(self, fn): self._event_handler.append(fn) return self def unsubscribe(self, fn): self._event_handler.remove(fn) return self def __iadd__(self, fn): self._event_handler.append(fn) return self def __isub__(self, fn): self._event_handler.remove(fn) return self def __call__(self, *args, **keywargs): for eventhandler in self._event_handler: eventhandler(*args, **keywargs)
class OptionPlotoptionsHistogramAccessibility(Options): def description(self): return self._config_get(None) def description(self, text: str): self._config(text, js_type=False) def descriptionFormat(self): return self._config_get(None) def descriptionFormat(self, text: str): self._config(text, js_type=False) def enabled(self): return self._config_get(None) def enabled(self, flag: bool): self._config(flag, js_type=False) def exposeAsGroupOnly(self): return self._config_get(None) def exposeAsGroupOnly(self, flag: bool): self._config(flag, js_type=False) def keyboardNavigation(self) -> 'OptionPlotoptionsHistogramAccessibilityKeyboardnavigation': return self._config_sub_data('keyboardNavigation', OptionPlotoptionsHistogramAccessibilityKeyboardnavigation) def point(self) -> 'OptionPlotoptionsHistogramAccessibilityPoint': return self._config_sub_data('point', OptionPlotoptionsHistogramAccessibilityPoint)
class RPCSocket(object): __s = None __reader = None __parser = None def __init__(self, *args, is_unix_socket=False, is_ipv6=False, **kwargs): self.__as_service = False self.__reader = reader.reader() self.__parser = RPCParser() if is_unix_socket: fa = socket.AF_UNIX elif is_ipv6: fa = socket.AF_INET6 else: fa = socket.AF_INET self.__s = socket.socket(fa, socket.SOCK_STREAM) self.myinit(*args, **kwargs) def myinit(self, *args, **kwargs): pass def socket(self): return self.__s def parser(self): return self.__parser def release(self): self.socket.close()
def hash_matches(fname, known_hash, strict=False, source=None): if (known_hash is None): return True algorithm = hash_algorithm(known_hash) new_hash = file_hash(fname, alg=algorithm) matches = (new_hash.lower() == known_hash.split(':')[(- 1)].lower()) if (strict and (not matches)): if (source is None): source = str(fname) raise ValueError(f'{algorithm.upper()} hash of downloaded file ({source}) does not match the known hash: expected {known_hash} but got {new_hash}. Deleted download for safety. The downloaded file may have been corrupted or the known hash may be outdated.') return matches
class JsonConverter(DataConverterBase): def __init__(self): super(JsonConverter, self).__init__() def getName(): return 'json_converter' def collect(self, data, args=None): rows = self._prepareData(data) results = [] valid_run_idxs = [] for row in rows: try: pattern = '\\{.*\\}' match = re.findall(pattern, row) if ((match is None) or (len(match) == 0)): getLogger().debug(('Format not correct, skip one row %s \n' % row)) else: result = json.loads(match[0]) if ((('type' in result) and (result['type'] == 'NET') and ('value' in result)) or ('NET' in result) or ('custom_output' in result)): valid_run_idxs.append(len(results)) results.append(result) except Exception as e: getLogger().info(('Skip one row %s \n Exception: %s' % (row, str(e)))) pass if (len(valid_run_idxs) > 0): results = results[valid_run_idxs[0]:] return (results, valid_run_idxs) def convert(self, data): details = collections.defaultdict((lambda : collections.defaultdict(list))) for d in data: if ('custom_output' in d): table_name = (d['table_name'] if ('table_name' in d) else 'Custom Output') details['custom_output'][table_name].append(d['custom_output']) elif (('type' in d) and ('metric' in d) and ('unit' in d)): key = ((d['type'] + ' ') + d['metric']) if ('info_string' in d): if ('info_string' in details[key]): old_string = details[key]['info_string'] new_string = d['info_string'] if (old_string != new_string): getLogger().warning((((((('info_string values for {} '.format(key) + 'do not match.\n') + 'Current info_string: ') + '{}\n '.format(old_string)) + 'does not match new ') + 'info_string: ') + '{}'.format(new_string))) else: details[key]['info_string'] = d['info_string'] if ('value' in d): details[key]['values'].append(float(d['value'])) if ('num_runs' in d): details[key]['num_runs'] = d['num_runs'] if ('summary' in d): details[key]['summary'] = d['summary'] self._updateOneEntry(details[key], d, 'type') self._updateOneEntry(details[key], d, 'metric') self._updateOneEntry(details[key], d, 'unit') else: for (k, v) in d.items(): if (not isinstance(v, dict)): continue for (kk, vv) in v.items(): key = ((k + ' ') + kk) if ('info_string' in vv): if ('info_string' in details[key]): assert (details[key]['info_string'] == vv['info_string']), ((('info_string values for {} '.format(key) + 'do not match.\n') + 'Current info_string:\n{}\n '.format(details[key]['info_string'])) + 'does not match new info_string:\n{}'.format(vv['info_string'])) else: details[key]['info_string'] = vv['info_string'] else: details[key]['values'].append(float(vv['value'])) details[key]['type'] = k details[key]['metric'] = kk details[key]['unit'] = str(vv['unit']) return details def _updateOneEntry(self, detail, d, k): if (k in detail): assert (detail[k] == d[k]), 'Field {} does not match in different entries'.format(k) else: detail[k] = d[k]
(Output('configs-output', 'children'), [Input('dash-uploader', 'disabled'), Input('dash-uploader', 'disableDragAndDrop')]) def update_config_states(is_disabled, is_disableDragAndDrop): val_configs = [] if is_disabled: val_configs.append(0) if is_disableDragAndDrop: val_configs.append(1) return json.dumps(val_configs)
class ModelsAPI(APIClient): def __init__(self, dbt_runner: BaseDbtRunner): super().__init__(dbt_runner) self.models_fetcher = ModelsFetcher(dbt_runner=self.dbt_runner) def get_models_runs(self, days_back: Optional[int]=7, exclude_elementary_models: bool=False) -> ModelRunsWithTotalsSchema: model_runs_results = self.models_fetcher.get_models_runs(days_back=days_back, exclude_elementary_models=exclude_elementary_models) model_id_to_runs_map = defaultdict(list) for model_run in model_runs_results: model_id_to_runs_map[model_run.unique_id].append(model_run) aggregated_models_runs = [] for (model_unique_id, model_runs) in model_id_to_runs_map.items(): totals = self._get_model_runs_totals(model_runs) runs = [ModelRunSchema(id=model_run.invocation_id, time_utc=model_run.generated_at, status=model_run.status, full_refresh=model_run.full_refresh, materialization=model_run.materialization, execution_time=model_run.execution_time) for model_run in model_runs] successful_execution_times = [model_run.execution_time for model_run in model_runs if (model_run.status.lower() == 'success')] median_execution_time = (statistics.median(successful_execution_times) if len(successful_execution_times) else 0) last_model_run = sorted(model_runs, key=(lambda run: run.generated_at))[(- 1)] execution_time_change_rate = ((((last_model_run.execution_time / median_execution_time) - 1) * 100) if (median_execution_time != 0) else 0) aggregated_models_runs.append(ModelRunsSchema(unique_id=model_unique_id, schema=last_model_run.schema_name, name=last_model_run.name, status=last_model_run.status, last_exec_time=last_model_run.execution_time, last_generated_at=last_model_run.generated_at, compiled_code=last_model_run.compiled_code, median_exec_time=median_execution_time, exec_time_change_rate=execution_time_change_rate, totals=totals, runs=runs)) model_runs_totals = {} for aggregated_model_run in aggregated_models_runs: model_runs_totals[aggregated_model_run.unique_id] = TotalsSchema(errors=aggregated_model_run.totals.errors, warnings=0, failures=0, passed=aggregated_model_run.totals.success) return ModelRunsWithTotalsSchema(runs=aggregated_models_runs, totals=model_runs_totals) def _get_model_runs_totals(runs: List[FetcherModelRunSchema]) -> TotalsModelRunsSchema: error_runs = len([run for run in runs if (run.status in ['error', 'fail'])]) success_runs = len([run for run in runs if (run.status == 'success')]) return TotalsModelRunsSchema(errors=error_runs, success=success_runs) def get_models(self, exclude_elementary_models: bool=False) -> Dict[(str, NormalizedModelSchema)]: models_results = self.models_fetcher.get_models(exclude_elementary_models=exclude_elementary_models) models = dict() if models_results: for model_result in models_results: normalized_model = self._normalize_dbt_artifact_dict(model_result) model_unique_id = normalized_model.unique_id if (model_unique_id is None): continue models[model_unique_id] = normalized_model return models def get_sources(self) -> Dict[(str, NormalizedSourceSchema)]: sources_results = self.models_fetcher.get_sources() sources = dict() if sources_results: for source_result in sources_results: normalized_source = self._normalize_dbt_artifact_dict(source_result) source_unique_id = normalized_source.unique_id if (source_unique_id is None): continue sources[source_unique_id] = normalized_source return sources def get_exposures(self) -> Dict[(str, NormalizedExposureSchema)]: exposures_results = self.models_fetcher.get_exposures() exposures = dict() if exposures_results: for exposure_result in exposures_results: normalized_exposure = self._normalize_dbt_artifact_dict(exposure_result) exposure_unique_id = normalized_exposure.unique_id if (exposure_unique_id is None): continue exposures[exposure_unique_id] = normalized_exposure return exposures def get_test_coverages(self) -> Dict[(str, ModelCoverageSchema)]: coverage_results = self.models_fetcher.get_test_coverages() coverages = dict() if coverage_results: for coverage_result in coverage_results: if (coverage_result.model_unique_id is None): continue coverages[coverage_result.model_unique_id] = ModelCoverageSchema(table_tests=coverage_result.table_tests, column_tests=coverage_result.column_tests) return coverages def _normalize_dbt_artifact_dict(self, artifact: ModelSchema) -> NormalizedModelSchema: ... def _normalize_dbt_artifact_dict(self, artifact: ExposureSchema) -> NormalizedExposureSchema: ... def _normalize_dbt_artifact_dict(self, artifact: SourceSchema) -> NormalizedSourceSchema: ... def _normalize_dbt_artifact_dict(self, artifact: Union[(ModelSchema, ExposureSchema, SourceSchema)]) -> Union[(NormalizedModelSchema, NormalizedExposureSchema, NormalizedSourceSchema)]: schema_to_normalized_schema_map = {ExposureSchema: NormalizedExposureSchema, ModelSchema: NormalizedModelSchema, SourceSchema: NormalizedSourceSchema} artifact_name = artifact.name normalized_artifact = json.loads(artifact.json()) normalized_artifact['model_name'] = artifact_name fqn = self._fqn(artifact) normalized_artifact['fqn'] = fqn normalized_artifact['normalized_full_path'] = self._normalize_artifact_path(artifact, fqn) return schema_to_normalized_schema_map[type(artifact)](**normalized_artifact) def _normalize_artifact_path(cls, artifact: ArtifactSchemaType, fqn: str) -> str: if (artifact.full_path is None): raise Exception("Artifact full path can't be null") if isinstance(artifact, ExposureSchema): split_artifact_path = (['exposures'] + fqn.split('/')) else: split_artifact_path = artifact.full_path.split(os.path.sep) if isinstance(artifact, SourceSchema): if (split_artifact_path[0] == 'models'): split_artifact_path[0] = 'sources' if artifact.package_name: split_artifact_path.insert(0, artifact.package_name) return os.path.sep.join(split_artifact_path) def _fqn(cls, artifact: Union[(ModelSchema, ExposureSchema, SourceSchema)]) -> str: if isinstance(artifact, ExposureSchema): path = (artifact.meta or {}).get('path') name = (artifact.label or artifact.name or 'N/A') fqn = (f'{path}/{name}' if path else name) return fqn fqn = (f'{artifact.database_name}.{artifact.schema_name}.{artifact.table_name}' if ((artifact.database_name is not None) and (artifact.schema_name is not None)) else artifact.table_name) return fqn.lower()
class Channel(BaseChannel): __slots__ = ['_confirming_deliveries', 'consumer_callback', 'rpc', '_basic', '_connection', '_exchange', '_inbound', '_queue', '_tx', '_die', 'message_build_timeout'] def __init__(self, channel_id, connection, rpc_timeout): super(Channel, self).__init__(channel_id) self.consumer_callback = None self.rpc = Rpc(self, timeout=rpc_timeout) self.message_build_timeout = rpc_timeout self._confirming_deliveries = False self._connection = connection self._inbound = [] self._basic = Basic(self) self._exchange = Exchange(self) self._tx = Tx(self) self._queue = Queue(self) self._die = multiprocessing.Value('b', 0) def __enter__(self): return self def __exit__(self, exception_type, exception_value, _): if exception_type: LOGGER.warning('Closing channel due to an unhandled exception: %s', exception_value) if (not self.is_open): return self.close() def __int__(self): return self._channel_id def basic(self): return self._basic def exchange(self): return self._exchange def tx(self): return self._tx def queue(self): return self._queue def build_inbound_messages(self, break_on_empty=False, to_tuple=False, auto_decode=False): last_message_built_at = time() self.check_for_errors() while (not self.is_closed): if (self._die.value != 0): return if self.is_closed: return message = self._build_message(auto_decode=auto_decode) if (not message): if ((time() - last_message_built_at) > self.message_build_timeout): raise AMQPConnectionError('Timeout while attempting to build message!') if break_on_empty: break self.check_for_errors() sleep(IDLE_WAIT) continue last_message_built_at = time() if to_tuple: (yield message.to_tuple()) continue (yield message) def kill(self): self._die.value = 1 self.set_state(self.CLOSED) def close(self, reply_code=200, reply_text=''): if (not compatibility.is_integer(reply_code)): raise AMQPInvalidArgument('reply_code should be an integer') elif (not compatibility.is_string(reply_text)): raise AMQPInvalidArgument('reply_text should be a string') try: if (self._connection.is_closed or self.is_closed): self.stop_consuming() LOGGER.debug('Channel #%d forcefully Closed', self.channel_id) return self.set_state(self.CLOSING) LOGGER.debug('Channel #%d Closing', self.channel_id) try: self.stop_consuming() except AMQPChannelError: self.remove_consumer_tag() self.rpc_request(specification.Channel.Close(reply_code=reply_code, reply_text=reply_text), adapter=self._connection) finally: if self._inbound: del self._inbound[:] self.set_state(self.CLOSED) LOGGER.debug('Channel #%d Closed', self.channel_id) def check_for_errors(self): try: self._connection.check_for_errors() except AMQPConnectionError: self.set_state(self.CLOSED) raise if self.exceptions: exception = self.exceptions[0] if self.is_open: self.exceptions.pop(0) raise exception if self.is_closed: raise AMQPChannelError('channel was closed') def confirm_deliveries(self): self._confirming_deliveries = True confirm_frame = specification.Confirm.Select() return self.rpc_request(confirm_frame) def confirming_deliveries(self): return self._confirming_deliveries def on_frame(self, frame_in): if self.rpc.on_frame(frame_in): return if (frame_in.name in CONTENT_FRAME): self._inbound.append(frame_in) elif (frame_in.name == 'Basic.Cancel'): self._basic_cancel(frame_in) elif (frame_in.name == 'Basic.CancelOk'): self.remove_consumer_tag(frame_in.consumer_tag) elif (frame_in.name == 'Basic.ConsumeOk'): self.add_consumer_tag(frame_in['consumer_tag']) elif (frame_in.name == 'Basic.Return'): self._basic_return(frame_in) elif (frame_in.name == 'Channel.Close'): self._close_channel(frame_in) elif (frame_in.name == 'Channel.Flow'): self.write_frame(specification.Channel.FlowOk(frame_in.active)) else: LOGGER.error('[Channel%d] Unhandled Frame: %s -- %s', self.channel_id, frame_in.name, dict(frame_in)) def open(self): self._inbound = [] self._exceptions = [] self.set_state(self.OPENING) self.rpc_request(specification.Channel.Open()) self.set_state(self.OPEN) def process_data_events(self, to_tuple=False, auto_decode=False): if (not self.consumer_callback): raise AMQPChannelError('no consumer_callback defined') for message in self.build_inbound_messages(break_on_empty=True, to_tuple=to_tuple, auto_decode=auto_decode): if (self._die.value != 0): return if to_tuple: self.consumer_callback(*message) continue self.consumer_callback(message) sleep(IDLE_WAIT) def rpc_request(self, frame_out, adapter=None): with self.rpc.lock: uuid = self.rpc.register_request(frame_out.valid_responses) self._connection.write_frame(self.channel_id, frame_out) return self.rpc.get_request(uuid, adapter=adapter) def start_consuming(self, to_tuple=False, auto_decode=False): while (not self.is_closed): self.process_data_events(to_tuple=to_tuple, auto_decode=auto_decode) if (not self.consumer_tags): break if (self._die.value != 0): break def stop_consuming(self): if (not self.consumer_tags): return if (not self.is_closed): for tag in self.consumer_tags: self.basic.cancel(tag) self.remove_consumer_tag() def write_frame(self, frame_out): self.check_for_errors() self._connection.write_frame(self.channel_id, frame_out) def write_frames(self, frames_out): self.check_for_errors() self._connection.write_frames(self.channel_id, frames_out) def _basic_cancel(self, frame_in): LOGGER.warning('Received Basic.Cancel on consumer_tag: %s', try_utf8_decode(frame_in.consumer_tag)) self.remove_consumer_tag(frame_in.consumer_tag) def _basic_return(self, frame_in): reply_text = try_utf8_decode(frame_in.reply_text) message = ("Message not delivered: %s (%s) to queue '%s' from exchange '%s'" % (reply_text, frame_in.reply_code, frame_in.routing_key, frame_in.exchange)) exception = AMQPMessageError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception) def _build_message(self, auto_decode): with self.lock: if (len(self._inbound) < 2): return None headers = self._build_message_headers() if (not headers): return None (basic_deliver, content_header) = headers body = self._build_message_body(content_header.body_size) message = Message(channel=self, body=body, method=dict(basic_deliver), properties=dict(content_header.properties), auto_decode=auto_decode) return message def _build_message_headers(self): basic_deliver = self._inbound.pop(0) if (not isinstance(basic_deliver, specification.Basic.Deliver)): LOGGER.warning('Received an out-of-order frame: %s was expecting a Basic.Deliver frame', type(basic_deliver)) return None content_header = self._inbound.pop(0) if (not isinstance(content_header, ContentHeader)): LOGGER.warning('Received an out-of-order frame: %s was expecting a ContentHeader frame', type(content_header)) return None return (basic_deliver, content_header) def _build_message_body(self, body_size): body = bytes() while (len(body) < body_size): if (not self._inbound): self.check_for_errors() sleep(IDLE_WAIT) continue body_piece = self._inbound.pop(0) if (not body_piece.value): break body += body_piece.value return body def _close_channel(self, frame_in): if (frame_in.reply_code != 200): reply_text = try_utf8_decode(frame_in.reply_text) message = ('Channel %d was closed by remote server: %s' % (self._channel_id, reply_text)) exception = AMQPChannelError(message, reply_code=frame_in.reply_code) self.exceptions.append(exception) self.set_state(self.CLOSED) if self._connection.is_open: try: self._connection.write_frame(self.channel_id, specification.Channel.CloseOk()) except AMQPConnectionError: pass self.close()
.parametrize('elasticapm_client', [{'span_compression_enabled': True, 'span_compression_same_kind_max_duration': '5ms', 'span_compression_exact_match_max_duration': '5ms'}], indirect=True) def test_same_kind(elasticapm_client): transaction = elasticapm_client.begin_transaction('test') with elasticapm.capture_span('test1', span_type='a', span_subtype='b', span_action='c', leaf=True, duration=0.002, extra={'destination': {'service': {'resource': 'x'}}}) as span1: assert span1.is_compression_eligible() with elasticapm.capture_span('test2', span_type='a', span_subtype='b', span_action='c', leaf=True, duration=0.003, extra={'destination': {'service': {'resource': 'x'}}}) as span2: pass assert span2.is_compression_eligible() assert (not span1.is_exact_match(span2)) assert span1.is_same_kind(span2) elasticapm_client.end_transaction('test') spans = elasticapm_client.events[SPAN] assert (len(spans) == 1) span = spans[0] assert (span['name'] == 'Calls to x') assert ('composite' in span) assert (span['composite']['count'] == 2) assert (span['composite']['sum'] == 5) assert (span['composite']['compression_strategy'] == 'same_kind')
def extractBlackmaskedphantomWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('Aloof King', 'Aloof King and Cold (Acting) Queen', 'translated'), ('The Adventures of the Idiot Hero, the Logical Mage, and Their Friends', 'The Adventures of the Idiot Hero, the Logical Mage, and Their Friends', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
class OptionSeriesTreegraphMarker(Options): def fillColor(self): return self._config_get(None) def fillColor(self, text: str): self._config(text, js_type=False) def fillOpacity(self): return self._config_get(1) def fillOpacity(self, num: float): self._config(num, js_type=False) def height(self): return self._config_get(None) def height(self, num: float): self._config(num, js_type=False) def lineColor(self): return self._config_get('#ffffff') def lineColor(self, text: str): self._config(text, js_type=False) def lineWidth(self): return self._config_get(0) def lineWidth(self, num: float): self._config(num, js_type=False) def radius(self): return self._config_get(10) def radius(self, num: float): self._config(num, js_type=False) def states(self) -> 'OptionSeriesTreegraphMarkerStates': return self._config_sub_data('states', OptionSeriesTreegraphMarkerStates) def symbol(self): return self._config_get('circle') def symbol(self, text: str): self._config(text, js_type=False) def width(self): return self._config_get(None) def width(self, num: float): self._config(num, js_type=False)
def test_stringify_env_args(): preprocessed_messages = {'ENV_VAR_STRING': 'some_string', 'ENV_VAR_BOOLEAN1': True, 'ENV_VAR_BOOLEAN2': False, 'ENV_VAR_INT': 23, 'ENV_VAR_DICT': {'testing': 'is_cool'}, 'ENV_VAR_LIST': ['abc', 'def'], 'ENV_VAR_LIST_OF_DICTS': [{'a': 'b'}]} expected = {'ENV_VAR_STRING': 'some_string', 'ENV_VAR_BOOLEAN1': 'true', 'ENV_VAR_BOOLEAN2': 'false', 'ENV_VAR_INT': '23', 'ENV_VAR_DICT': '{"testing": "is_cool"}', 'ENV_VAR_LIST': '["abc", "def"]', 'ENV_VAR_LIST_OF_DICTS': '[{"a": "b"}]'} processor = StringifyObject({}) res = processor.process_arg(preprocessed_messages, None, {}) assert (res == expected)
.django_db def test_object_class_groups_by_object_classes(client, elasticsearch_account_index, faba_with_two_object_classes_and_two_awards, monkeypatch, helpers): setup_elasticsearch_test(monkeypatch, elasticsearch_account_index) helpers.patch_datetime_now(monkeypatch, 2022, 12, 31) helpers.reset_dabs_cache() resp = helpers.post_for_spending_endpoint(client, url, def_codes=['M'], spending_type='award') assert (resp.status_code == status.HTTP_200_OK) assert (len(resp.json()['results']) == 2)
def _upload_firmware_get(test_client, intercom): rv = test_client.get('/upload') assert (b'<h3 class="mb-3">Upload Firmware</h3>' in rv.data), 'upload page not displayed correctly' plugins = intercom.get_available_analysis_plugins() mandatory_plugins = [p for p in plugins if plugins[p][1]] default_plugins = [p for p in plugins if ((p != 'unpacker') and plugins[p][2]['default'])] optional_plugins = [p for p in plugins if (not (plugins[p][1] or plugins[p][2]))] for mandatory_plugin in mandatory_plugins: assert (f'id="{mandatory_plugin}"'.encode() not in rv.data), f'mandatory plugin {mandatory_plugin} found erroneously' for default_plugin in default_plugins: assert (f'value="{default_plugin}" checked'.encode() in rv.data), f'default plugin {default_plugin} erroneously unchecked or not found' for optional_plugin in optional_plugins: assert (f'value="{optional_plugin}" unchecked'.encode() in rv.data), f'optional plugin {optional_plugin} erroneously checked or not found'
class RewardAggregator(RewardAggregatorInterface): def get_interfaces(self) -> List[Type[ABC]]: return [DummyEnvEvents] def summarize_reward(self, maze_state: Optional[MazeStateType]=None) -> float: return sum((e.value for e in self.query_events(DummyEnvEvents.twice_per_step))) def to_scalar_reward(cls, reward: float) -> float: return reward
def normal_normal_conjugate_fixer(bmg: BMGraphBuilder) -> NodeFixer: def _transform_mu(mu: bn.ConstantNode, std: bn.ConstantNode, sigma: bn.ConstantNode, obs: List[bn.Observation]) -> bn.BMGNode: precision_prior = pow(std.value, (- 2.0)) precision_data = (len(obs) * pow(sigma.value, (- 2.0))) precision_inv = pow((precision_prior + precision_data), (- 1.0)) data_sum = sum((o.value for o in obs)) transformed_mu = (precision_inv * ((mu.value * pow(std.value, (- 2.0))) + (data_sum * pow(sigma.value, (- 2.0))))) return bmg.add_constant(transformed_mu) def _transform_std(std: bn.ConstantNode, sigma: bn.ConstantNode, obs: List[bn.Observation]) -> bn.BMGNode: precision_prior = (1 / pow(std.value, 2)) precision_data = (len(obs) / pow(sigma.value, 2)) transformed_std = math.sqrt((1 / (precision_prior + precision_data))) return bmg.add_constant(transformed_std) def fixer(n: bn.BMGNode) -> NodeFixerResult: if (not isinstance(n, bn.NormalNode)): return Inapplicable mu_normal_sample = n.inputs[0] if (not (isinstance(mu_normal_sample, bn.SampleNode) and _mu_is_normal_with_real_params(mu_normal_sample) and _mu_is_queried(mu_normal_sample) and _normal_is_observed(n))): return Inapplicable sigma = n.inputs[1] assert isinstance(sigma, bn.UntypedConstantNode) mu_normal_node = mu_normal_sample.inputs[0] assert isinstance(mu_normal_node, bn.NormalNode) obs = [] samples_to_remove = [] for o in n.outputs.items: if (isinstance(o, bn.SampleNode) and _sample_contains_obs(o)): obs.append(next(iter(o.outputs.items.keys()))) samples_to_remove.append(o) mu = mu_normal_node.inputs[0] std = mu_normal_node.inputs[1] assert isinstance(mu, bn.ConstantNode) assert isinstance(std, bn.ConstantNode) transformed_mu = _transform_mu(mu, std, sigma, obs) transformed_std = _transform_std(std, sigma, obs) mu_normal_node.inputs[0] = transformed_mu mu_normal_node.inputs[1] = transformed_std for o in obs: bmg.remove_leaf(o) for s in samples_to_remove: if (len(s.outputs.items) == 0): bmg.remove_node(s) return n return fixer
class InitCommand(Command): def does_already_exist(self): if os.path.isfile(self.PROJECT_FILE): sys.stdout.write('{warning}A project called {green}{name}{warning} already exists, erase it?{reset} (y/n) '.format(name=self.get_project_name(), green=Fore.GREEN, warning=Fore.WARNING, reset=Style.RESET_ALL)) try: answer = input().lower() except KeyboardInterrupt: self.cancel_command() if answer.startswith('n'): sys.exit() def get_new_project_name(self): sys.stdout.write('{blue}Project name{reset}: ({green}{name}{reset}) '.format(name=FOLDER_NAME, blue=Fore.BLUE, green=Fore.GREEN, reset=Style.RESET_ALL)) try: name = input() except KeyboardInterrupt: self.cancel_command() return (name if name else FOLDER_NAME) def create_project(self, name): try: with open(self.PROJECT_FILE, 'w', encoding='utf-8') as project_file: json.dump({'name': name}, project_file, sort_keys=True, indent=4, ensure_ascii=False) except: print('{fail}An error has occured while initializing the project{reset}'.format(fail=Fore.FAIL, reset=Style.RESET_ALL)) sys.exit(1) def run(self): self.does_already_exist() name = self.get_new_project_name() self.create_project(name) print('The project {green}{name}{reset} has been created.'.format(name=name, green=Fore.GREEN, reset=Style.RESET_ALL))
def test_training_job_resource_config(): rc = training_job.TrainingJobResourceConfig(instance_count=1, instance_type='random.instance', volume_size_in_gb=25, distributed_protocol=training_job.DistributedProtocol.MPI) rc2 = training_job.TrainingJobResourceConfig.from_flyte_idl(rc.to_flyte_idl()) assert (rc2 == rc) assert (rc2.distributed_protocol == training_job.DistributedProtocol.MPI) assert (rc != training_job.TrainingJobResourceConfig(instance_count=1, instance_type='random.instance', volume_size_in_gb=25, distributed_protocol=training_job.DistributedProtocol.UNSPECIFIED)) assert (rc != training_job.TrainingJobResourceConfig(instance_count=1, instance_type='oops', volume_size_in_gb=25, distributed_protocol=training_job.DistributedProtocol.MPI))
('with_commandline', (True, False)) def test_optuna_example(with_commandline: bool, tmpdir: Path) -> None: storage = ('sqlite:///' + os.path.join(str(tmpdir), 'test.db')) study_name = 'test-optuna-example' cmd = ['example/sphere.py', '--multirun', ('hydra.sweep.dir=' + str(tmpdir)), 'hydra.job.chdir=True', 'hydra.sweeper.n_trials=20', 'hydra.sweeper.n_jobs=1', f'hydra.sweeper.storage={storage}', f'hydra.sweeper.study_name={study_name}', 'hydra/sweeper/sampler=tpe', 'hydra.sweeper.sampler.seed=123', '~z'] if with_commandline: cmd += ['x=choice(0, 1, 2)', 'y=0'] run_python_script(cmd) returns = OmegaConf.load(f'{tmpdir}/optimization_results.yaml') study = optuna.load_study(storage=storage, study_name=study_name) best_trial = study.best_trial assert isinstance(returns, DictConfig) assert (returns.name == 'optuna') assert (returns['best_params']['x'] == best_trial.params['x']) if with_commandline: assert ('y' not in returns['best_params']) assert ('y' not in best_trial.params) else: assert (returns['best_params']['y'] == best_trial.params['y']) assert (returns['best_value'] == best_trial.value) assert (returns['best_value'] <= 2.27)
def split_dofs(elem): entity_dofs = elem.entity_dofs() ndim = elem.cell.get_spatial_dimension() edofs = [[], []] for key in sorted(entity_dofs.keys()): vals = entity_dofs[key] edim = key try: edim = sum(edim) except TypeError: pass for k in sorted(vals.keys()): edofs[(edim < ndim)].extend(sorted(vals[k])) return tuple((numpy.array(e, dtype=PETSc.IntType) for e in edofs))
def compute_gradient_norm(params: Iterable[torch.Tensor]) -> float: total_norm = 0.0 for p in params: if (p.requires_grad and (np.prod(p.shape) > 0)): param_norm = p.grad.data.norm(2) total_norm += (param_norm.item() ** 2) total_norm = (total_norm ** (1.0 / 2)) return total_norm
.parametrize(('provider', 'feature', 'subfeature', 'phase'), global_features(return_phase=True)['ungrouped_providers']) class TestComputeOutput(): def test_output_fake(self, mocker: MockerFixture, provider, feature, subfeature, phase): if (phase == 'create_project'): pytest.skip('create_project is not supported in fake mode') mocker.patch('edenai_apis.interface.validate_all_provider_constraints', return_value={}) final_result = compute_output(provider, feature, subfeature, {}, fake=True, phase=phase) assert (final_result['provider'] == provider) assert (final_result['status'] == 'success')
class ValveChangePortTestCase(ValveTestBases.ValveTestNetwork): CONFIG = ('\ndps:\n s1:\n%s\n interfaces:\n p1:\n number: 1\n native_vlan: 0x100\n p2:\n number: 2\n native_vlan: 0x200\n permanent_learn: True\n' % DP1_CONFIG) LESS_CONFIG = ('\ndps:\n s1:\n%s\n interfaces:\n p1:\n number: 1\n native_vlan: 0x100\n p2:\n number: 2\n native_vlan: 0x200\n permanent_learn: False\n' % DP1_CONFIG) def setUp(self): self.setup_valves(self.CONFIG) def test_delete_permanent_learn(self): table = self.network.tables[self.DP_ID] before_table_state = table.table_state() self.rcv_packet(2, 512, {'eth_src': self.P2_V200_MAC, 'eth_dst': self.P3_V200_MAC, 'ipv4_src': '10.0.0.2', 'ipv4_dst': '10.0.0.3', 'vid': 512}) self.update_and_revert_config(self.CONFIG, self.LESS_CONFIG, 'warm', before_table_states={self.DP_ID: before_table_state})
def test_custom_mapping(worker): class CustomNameWorkflow(): _method(name='blah') def the_signal_method(self): pass worker.register_workflow_implementation_type(CustomNameWorkflow) assert (CustomNameWorkflow._signal_methods['blah'] == CustomNameWorkflow.the_signal_method)
.parametrize('transfer', [prolong, inject, restrict]) def test_transfer_invalid_level_combo(transfer): m = UnitIntervalMesh(10) mh = MeshHierarchy(m, 2) Vcoarse = FunctionSpace(mh[0], 'DG', 0) Vfine = FunctionSpace(mh[(- 1)], 'DG', 0) if (transfer == restrict): (Vcoarse, Vfine) = (Vcoarse.dual(), Vfine.dual()) if (transfer == prolong): (source, target) = (Function(Vfine), Function(Vcoarse)) else: (source, target) = (Function(Vcoarse), Function(Vfine)) with pytest.raises(ValueError): transfer(source, target)
(accept=('application/json', 'text/json'), renderer='json', error_handler=bodhi.server.services.errors.json_handler) (accept='application/javascript', renderer='jsonp', error_handler=bodhi.server.services.errors.jsonp_handler) def get_release_json(request): id = request.matchdict.get('name') release = Release.get(id) if (not release): request.errors.add('body', 'name', 'No such release') request.errors.status = HTTPNotFound.code return release
class OptionSeriesFunnelSonificationDefaultinstrumentoptionsPointgrouping(Options): def algorithm(self): return self._config_get('minmax') def algorithm(self, text: str): self._config(text, js_type=False) def enabled(self): return self._config_get(True) def enabled(self, flag: bool): self._config(flag, js_type=False) def groupTimespan(self): return self._config_get(15) def groupTimespan(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get('y') def prop(self, text: str): self._config(text, js_type=False)
class TestDecimalFieldMappings(TestCase): def test_decimal_field_has_decimal_validator(self): class TestSerializer(serializers.ModelSerializer): class Meta(): model = DecimalFieldModel fields = '__all__' serializer = TestSerializer() assert (len(serializer.fields['decimal_field'].validators) == 2) def test_min_value_is_passed(self): class TestSerializer(serializers.ModelSerializer): class Meta(): model = DecimalFieldModel fields = '__all__' serializer = TestSerializer() assert (serializer.fields['decimal_field'].min_value == 1) def test_max_value_is_passed(self): class TestSerializer(serializers.ModelSerializer): class Meta(): model = DecimalFieldModel fields = '__all__' serializer = TestSerializer() assert (serializer.fields['decimal_field'].max_value == 3)
class QdrantDB(BaseVectorDB): BATCH_SIZE = 10 def __init__(self, config: QdrantDBConfig=None): if (config is None): config = QdrantDBConfig() elif (not isinstance(config, QdrantDBConfig)): raise TypeError('config is not a `QdrantDBConfig` instance. Please make sure the type is right and that you are passing an instance.') self.config = config self.client = QdrantClient(url=os.getenv('QDRANT_URL'), api_key=os.getenv('QDRANT_API_KEY')) super().__init__(config=self.config) def _initialize(self): if (not self.embedder): raise ValueError('Embedder not set. Please set an embedder with `set_embedder` before initialization.') self.collection_name = self._get_or_create_collection() self.metadata_keys = {'data_type', 'doc_id', 'url', 'hash', 'app_id', 'text'} all_collections = self.client.get_collections() collection_names = [collection.name for collection in all_collections.collections] if (self.collection_name not in collection_names): self.client.recreate_collection(collection_name=self.collection_name, vectors_config=VectorParams(size=self.embedder.vector_dimension, distance=Distance.COSINE, hnsw_config=self.config.hnsw_config, quantization_config=self.config.quantization_config, on_disk=self.config.on_disk)) def _get_or_create_db(self): return self.client def _get_or_create_collection(self): return f'{self.config.collection_name}-{self.embedder.vector_dimension}'.lower().replace('_', '-') def get(self, ids: Optional[List[str]]=None, where: Optional[Dict[(str, any)]]=None, limit: Optional[int]=None): if ((ids is None) or (len(ids) == 0)): return {'ids': []} keys = set((where.keys() if (where is not None) else set())) qdrant_must_filters = [models.FieldCondition(key='identifier', match=models.MatchAny(any=ids))] if (len(keys.intersection(self.metadata_keys)) != 0): for key in keys.intersection(self.metadata_keys): qdrant_must_filters.append(models.FieldCondition(key='metadata.{}'.format(key), match=models.MatchValue(value=where.get(key)))) offset = 0 existing_ids = [] while (offset is not None): response = self.client.scroll(collection_name=self.collection_name, scroll_filter=models.Filter(must=qdrant_must_filters), offset=offset, limit=self.BATCH_SIZE) offset = response[1] for doc in response[0]: existing_ids.append(doc.payload['identifier']) return {'ids': existing_ids} def add(self, embeddings: List[List[float]], documents: List[str], metadatas: List[object], ids: List[str], **kwargs: Optional[Dict[(str, any)]]): embeddings = self.embedder.embedding_fn(documents) payloads = [] qdrant_ids = [] for (id, document, metadata) in zip(ids, documents, metadatas): metadata['text'] = document qdrant_ids.append(str(uuid.uuid4())) payloads.append({'identifier': id, 'text': document, 'metadata': copy.deepcopy(metadata)}) for i in range(0, len(qdrant_ids), self.BATCH_SIZE): self.client.upsert(collection_name=self.collection_name, points=Batch(ids=qdrant_ids[i:(i + self.BATCH_SIZE)], payloads=payloads[i:(i + self.BATCH_SIZE)], vectors=embeddings[i:(i + self.BATCH_SIZE)]), **kwargs) def query(self, input_query: List[str], n_results: int, where: Dict[(str, any)], citations: bool=False, **kwargs: Optional[Dict[(str, Any)]]) -> Union[(List[Tuple[(str, Dict)]], List[str])]: query_vector = self.embedder.embedding_fn([input_query])[0] keys = set((where.keys() if (where is not None) else set())) qdrant_must_filters = [] if (len(keys.intersection(self.metadata_keys)) != 0): for key in keys.intersection(self.metadata_keys): qdrant_must_filters.append(models.FieldCondition(key='payload.metadata.{}'.format(key), match=models.MatchValue(value=where.get(key)))) results = self.client.search(collection_name=self.collection_name, query_filter=models.Filter(must=qdrant_must_filters), query_vector=query_vector, limit=n_results, **kwargs) contexts = [] for result in results: context = result.payload['text'] if citations: metadata = result.payload['metadata'] metadata['score'] = result.score contexts.append(tuple((context, metadata))) else: contexts.append(context) return contexts def count(self) -> int: response = self.client.get_collection(collection_name=self.collection_name) return response.points_count def reset(self): self.client.delete_collection(collection_name=self.collection_name) self._initialize() def set_collection_name(self, name: str): if (not isinstance(name, str)): raise TypeError('Collection name must be a string') self.config.collection_name = name self.collection_name = self._get_or_create_collection()
class PubAcc(BasicAuthWithTopicTestCase): def setUp(self): BasicAuthWithTopicTestCase.setUp(self) self.acc = models.PROTO_MQTT_ACC_PUB def test_login_with_pub_acl_public(self): response = self._test_login_with_pub_acl_public() self.assertEqual(response.status_code, 200) def test_login_with_pub_acl(self): response = self._test_login_with_pub_acl() self.assertEqual(response.status_code, 200) username = 'new_user' User.objects.create_user(username, password=self.password) response = self.client.post(self.url_testing, {'username': username, 'password': self.password, 'topic': self.topic, 'acc': self.acc}) self.assertEqual(response.status_code, 403) def test_login_with_pub_acl_group(self): response = self._test_login_with_pub_acl_group() self.assertEqual(response.status_code, 200) username = 'new_user' User.objects.create_user(username, password=self.password) response = self.client.post(self.url_testing, {'username': username, 'password': self.password, 'topic': self.topic, 'acc': self.acc}) self.assertEqual(response.status_code, 403)
class PairwiseDistance(Metric[PairwiseDistanceResult]): k: int item_features: List[str] def __init__(self, k: int, item_features: List[str], options: AnyOptions=None) -> None: self.k = k self.item_features = item_features super().__init__(options=options) def calculate(self, data: InputData) -> PairwiseDistanceResult: curr = data.current_data ref = data.reference_data prediction_name = get_prediciton_name(data) recommendations_type = data.column_mapping.recom_type user_id = data.data_definition.get_user_id_column() item_id = data.data_definition.get_item_id_column() current_train_data = data.additional_data.get('current_train_data') reference_train_data = data.additional_data.get('reference_train_data') if ((recommendations_type is None) or (user_id is None) or (item_id is None)): raise ValueError('recommendations_type, user_id, item_id must be provided in the column mapping.') all_items = curr.copy() if (ref is not None): all_items = pd.concat([curr, ref]) if (recommendations_type == RecomType.SCORE): all_items[prediction_name] = all_items.groupby(user_id.column_name)[prediction_name].transform('rank', ascending=False) all_items = all_items[(all_items[prediction_name] <= (self.k + 1))] all_items = all_items[([item_id.column_name] + self.item_features)] if (current_train_data is not None): if (not np.in1d(self.item_features, current_train_data.columns).all()): raise ValueError('current_train_data must contain item_features.') all_items = pd.concat([all_items, current_train_data[([item_id.column_name] + self.item_features)]]) if (reference_train_data is not None): if (not np.in1d(self.item_features, reference_train_data.columns).all()): raise ValueError('reference_train_data must contain item_features.') all_items = pd.concat([all_items, reference_train_data[([item_id.column_name] + self.item_features)]]) all_items.drop_duplicates(subset=[item_id.column_name], inplace=True) name_dict = {i: j for (i, j) in zip(all_items[item_id.column_name], range(all_items.shape[0]))} return PairwiseDistanceResult(dist_matrix=pairwise_distances(all_items[self.item_features], metric='cosine'), name_dict=name_dict)