code
stringlengths
281
23.7M
def lazy_import(): from fastly.model.relationship_waf_rule import RelationshipWafRule from fastly.model.type_waf_rule_revision import TypeWafRuleRevision from fastly.model.waf_rule_revision import WafRuleRevision from fastly.model.waf_rule_revision_attributes import WafRuleRevisionAttributes from fastly.model.waf_rule_revision_response_data_all_of import WafRuleRevisionResponseDataAllOf globals()['RelationshipWafRule'] = RelationshipWafRule globals()['TypeWafRuleRevision'] = TypeWafRuleRevision globals()['WafRuleRevision'] = WafRuleRevision globals()['WafRuleRevisionAttributes'] = WafRuleRevisionAttributes globals()['WafRuleRevisionResponseDataAllOf'] = WafRuleRevisionResponseDataAllOf
class TestRemovePrefix(): def test_remove_matching_prefix(self): suffix = runner.remove_prefix('index-', 'index') assert (suffix == '-') def test_prefix_doesnt_exit(self): index_name = 'index-' suffix = runner.remove_prefix(index_name, 'unrelatedprefix') assert (index_name == suffix)
class TestTraitDict(unittest.TestCase): def setUp(self): self.added = None self.changed = None self.removed = None self.trait_dict = None def notification_handler(self, trait_dict, removed, added, changed): self.trait_list = trait_dict self.removed = removed self.added = added self.changed = changed def test_init(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator) self.assertEqual(td, {'a': 1, 'b': 2}) self.assertEqual(td.notifiers, []) def test_init_iterable(self): td = TraitDict([('a', 1), ('b', 2)], key_validator=str_validator, value_validator=int_validator) self.assertEqual(td, {'a': 1, 'b': 2}) self.assertEqual(td.notifiers, []) with self.assertRaises(ValueError): TraitDict(['a', 'b'], key_validator=str_validator, value_validator=int_validator) def test_notification(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td['c'] = 5 self.assertEqual(self.added, {'c': 5}) self.assertEqual(self.changed, {}) self.assertEqual(self.removed, {}) def test_deepcopy(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td_copy = copy.deepcopy(td) self.assertEqual(td, td_copy) self.assertEqual(td_copy.notifiers, []) self.assertEqual(td_copy.value_validator, td.value_validator) self.assertEqual(td_copy.key_validator, td.key_validator) def test_setitem(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td['a'] = 5 self.assertEqual(self.added, {}) self.assertEqual(self.changed, {'a': 1}) self.assertEqual(self.removed, {}) with self.assertRaises(TraitError): td[5] = 'a' def test_delitem(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) del td['a'] self.assertEqual(self.added, {}) self.assertEqual(self.changed, {}) self.assertEqual(self.removed, {'a': 1}) def test_delitem_not_found(self): python_dict = dict() with self.assertRaises(KeyError) as python_e: del python_dict['x'] td = TraitDict() with self.assertRaises(KeyError) as trait_e: del td['x'] self.assertEqual(str(trait_e.exception), str(python_e.exception)) if (sys.version_info >= (3, 9)): def test_ior(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td |= {'a': 3, 'd': 5} self.assertEqual(td, {'a': 3, 'b': 2, 'd': 5}) self.assertEqual(self.added, {'d': 5}) self.assertEqual(self.changed, {'a': 1}) self.assertEqual(self.removed, {}) def test_ior_is_quiet_if_no_change(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td |= [] self.assertEqual(td, {'a': 1, 'b': 2}) self.assertIsNone(self.added) self.assertIsNone(self.removed) self.assertIsNone(self.changed) else: def test_ior(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) with self.assertRaises(TypeError): td |= {'a': 3, 'd': 5} def test_update(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td.update({'a': 2, 'b': 4, 'c': 5}) self.assertEqual(self.added, {'c': 5}) self.assertEqual(self.changed, {'a': 1, 'b': 2}) self.assertEqual(self.removed, {}) def test_update_iterable(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td.update([('a', 2), ('b', 4), ('c', 5)]) self.assertEqual(self.added, {'c': 5}) self.assertEqual(self.changed, {'a': 1, 'b': 2}) self.assertEqual(self.removed, {}) def test_update_with_transformation(self): td = TraitDict({'1': 1, '2': 2}, key_validator=str, notifiers=[self.notification_handler]) td.update({1: 2}) self.assertEqual(td, {'1': 2, '2': 2}) self.assertEqual(self.added, {}) self.assertEqual(self.changed, {'1': 1}) self.assertEqual(self.removed, {}) def test_update_with_empty_argument(self): td = TraitDict({'1': 1, '2': 2}, key_validator=str, notifiers=[self.notification_handler]) td.update([]) td.update({}) self.assertEqual(td, {'1': 1, '2': 2}) self.assertIsNone(self.added) self.assertIsNone(self.changed) self.assertIsNone(self.removed) def test_update_notifies_with_nonempty_argument(self): td = TraitDict({'1': 1, '2': 2}, key_validator=str, notifiers=[self.notification_handler]) td.update({'1': 1}) self.assertEqual(td, {'1': 1, '2': 2}) self.assertEqual(self.added, {}) self.assertEqual(self.changed, {'1': 1}) self.assertEqual(self.removed, {}) def test_clear(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td.clear() self.assertEqual(self.added, {}) self.assertEqual(self.changed, {}) self.assertEqual(self.removed, {'a': 1, 'b': 2}) def test_clear_empty_dictionary(self): td = TraitDict({}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td.clear() self.assertIsNone(self.added) self.assertIsNone(self.changed) self.assertIsNone(self.removed) def test_invalid_key(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) with self.assertRaises(TraitError): td[3] = '3' def test_invalid_value(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) with self.assertRaises(TraitError): td['3'] = True def test_setdefault(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) result = td.setdefault('c', 3) self.assertEqual(result, 3) self.assertEqual(td.setdefault('a', 5), 1) def test_setdefault_with_casting(self): notifier = mock.Mock() td = TraitDict(key_validator=str, value_validator=str, notifiers=[notifier, self.notification_handler]) td.setdefault(1, 2) self.assertEqual(td, {'1': '2'}) self.assertEqual(notifier.call_count, 1) self.assertEqual(self.removed, {}) self.assertEqual(self.added, {'1': '2'}) self.assertEqual(self.changed, {}) notifier.reset_mock() td.setdefault(1, 4) self.assertEqual(td, {'1': '4'}) self.assertEqual(notifier.call_count, 1) self.assertEqual(self.removed, {}) self.assertEqual(self.added, {}) self.assertEqual(self.changed, {'1': '2'}) def test_pop(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) td.pop('b', 'X') self.assertEqual(self.removed, {'b': 2}) self.removed = None res = td.pop('x', 'X') self.assertIsNone(self.removed) self.assertEqual(res, 'X') def test_pop_key_error(self): python_dict = {} with self.assertRaises(KeyError) as python_e: python_dict.pop('a') td = TraitDict() with self.assertRaises(KeyError) as trait_e: td.pop('a') self.assertEqual(str(trait_e.exception), str(python_e.exception)) def test_popitem(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) items_cpy = td.copy().items() itm = td.popitem() self.assertIn(itm, items_cpy) self.assertNotIn(itm, td.items()) td = TraitDict({}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) with self.assertRaises(KeyError): td.popitem() def test_pickle(self): td = TraitDict({'a': 1, 'b': 2}, key_validator=str_validator, value_validator=int_validator, notifiers=[self.notification_handler]) for protocol in range((pickle.HIGHEST_PROTOCOL + 1)): td_unpickled = pickle.loads(pickle.dumps(td, protocol=protocol)) self.assertIs(td_unpickled.key_validator, str_validator) self.assertIs(td_unpickled.value_validator, int_validator) self.assertEqual(td_unpickled.notifiers, [])
class EnumProp(Property): _default = '' def _consume_args(self, options, *args): if (not isinstance(options, (list, tuple))): raise TypeError('EnumProp needs list of options') if (not all([isinstance(i, str) for i in options])): raise TypeError('EnumProp options must be str') if (not args): args = (options[0],) self._set_data([option.upper() for option in options]) super()._consume_args(*args) def _validate(self, value, name, data): if (not isinstance(value, str)): raise TypeError(('EnumProp %r value must be str.' % name)) value = value.upper() if (value.upper() not in data): raise ValueError(('Invalid value for enum %r: %s' % (name, value))) return value
def run(): tiles = list(gen_sites()) print(('// Tile count: %d' % len(tiles))) print(("// Seed: '%s'" % os.getenv('SEED'))) ninputs = 0 do_idx = [] for (i, sites) in enumerate(tiles): if random.randint(0, 1): do_idx.append(ninputs) ninputs += 1 else: do_idx.append(None) print('\nmodule top (\n (* CLOCK_BUFFER_TYPE = "NONE" *)\n input wire clk,\n output wire [{N}:0] do\n);\n\nwire clk_buf = clk;\n\nwire [{N}:0] do_buf;\n '.format(N=(ninputs - 1))) data = [] for (i, (sites, obuf_idx)) in enumerate(zip(tiles, do_idx)): if random.randint(0, 1): iob_inuse = sites[0] iob_other = sites[2] odelay_inuse = sites[1] odelay_other = sites[3] else: iob_inuse = sites[2] iob_other = sites[0] odelay_inuse = sites[3] odelay_other = sites[1] use_obuf = (obuf_idx is not None) if (not use_obuf): continue params = {'LOC': (('"' + odelay_inuse) + '"'), 'ODELAY_TYPE': (('"' + random.choice(['FIXED', 'VARIABLE', 'VAR_LOAD'])) + '"'), 'ODELAY_VALUE': random.randint(0, 31), 'HIGH_PERFORMANCE_MODE': (('"' + random.choice(['TRUE', 'FALSE'])) + '"'), 'CINVCTRL_SEL': (('"' + random.choice(['TRUE', 'FALSE'])) + '"'), 'PIPE_SEL': (('"' + random.choice(['TRUE', 'FALSE'])) + '"'), 'IS_C_INVERTED': random.randint(0, 1), 'IS_ODATAIN_INVERTED': random.randint(0, 1)} if (params['ODELAY_TYPE'] != '"VAR_LOAD_PIPE"'): params['PIPE_SEL'] = '"FALSE"' if (params['ODELAY_TYPE'] == '"VAR_LOAD"'): params['ODELAY_VALUE'] = 0 if (params['ODELAY_TYPE'] == '"VAR_LOAD_PIPE"'): params['ODELAY_VALUE'] = 0 if (params['ODELAY_TYPE'] == '"FIXED"'): params['IS_C_INVERTED'] = 0 param_str = ','.join((('.%s(%s)' % (k, v)) for (k, v) in params.items())) if (random.randint(0, 5) == 0): print('') print(('(* LOC="%s", KEEP, DONT_TOUCH *)' % iob_inuse)) print(('OBUF obuf_%03d (.I(%d), .O(do[%3d]));' % (obuf_idx, random.randint(0, 1), obuf_idx))) params['ODELAY_BYPASS'] = True params['ODELAY_NOT_IN_USE'] = ((odelay_inuse + ' ') + odelay_other) else: print('') print(('(* LOC="%s", KEEP, DONT_TOUCH *)' % iob_inuse)) print(('OBUF obuf_%03d (.I(do_buf[%3d]), .O(do[%3d]));' % (obuf_idx, obuf_idx, obuf_idx))) print(('mod #(%s) mod_%03d (.clk(clk_buf), .O(do_buf[%3d]));' % (param_str, i, obuf_idx))) params['ODELAY_BYPASS'] = False params['ODELAY_IN_USE'] = odelay_inuse params['ODELAY_NOT_IN_USE'] = odelay_other data.append(params) with open('params.json', 'w') as fp: json.dump(data, fp, sort_keys=True, indent=1) print('\n// IDELAYCTRL\n(* KEEP, DONT_TOUCH *)\nIDELAYCTRL idelayctrl();\n\nendmodule\n\n(* KEEP, DONT_TOUCH *)\nmodule mod(\n input wire clk,\n output wire O\n);\n\nparameter LOC = "";\nparameter ODELAY_TYPE = "FIXED";\nparameter ODELAY_VALUE = 0;\nparameter DELAY_SRC = "ODATAIN";\nparameter HIGH_PERFORMANCE_MODE = "TRUE";\nparameter SIGNAL_PATTERN = "DATA";\nparameter CINVCTRL_SEL = "FALSE";\nparameter PIPE_SEL = "FALSE";\nparameter IS_C_INVERTED = 0;\nparameter IS_ODATAIN_INVERTED = 0;\n\nwire x;\nwire lut;\n\n(* KEEP, DONT_TOUCH *)\nLUT2 l( .O(lut) );\n\n// ODELAY\n(* LOC=LOC, KEEP, DONT_TOUCH *)\nODELAYE2 #(\n .ODELAY_TYPE(ODELAY_TYPE),\n .ODELAY_VALUE(ODELAY_VALUE),\n .DELAY_SRC(DELAY_SRC),\n .HIGH_PERFORMANCE_MODE(HIGH_PERFORMANCE_MODE),\n .SIGNAL_PATTERN(SIGNAL_PATTERN),\n .CINVCTRL_SEL(CINVCTRL_SEL),\n .PIPE_SEL(PIPE_SEL),\n .IS_C_INVERTED(IS_C_INVERTED),\n .IS_ODATAIN_INVERTED(IS_ODATAIN_INVERTED)\n)\nodelay\n(\n .C(clk),\n .REGRST(),\n .LD(),\n .CE(),\n .INC(),\n .CINVCTRL(),\n .CNTVALUEIN(),\n .ODATAIN(lut),\n .LDPIPEEN(),\n .DATAOUT(O),\n .CNTVALUEOUT()\n);\n\nendmodule\n ')
class OptionSeriesLollipopSonificationDefaultinstrumentoptions(Options): def activeWhen(self) -> 'OptionSeriesLollipopSonificationDefaultinstrumentoptionsActivewhen': return self._config_sub_data('activeWhen', OptionSeriesLollipopSonificationDefaultinstrumentoptionsActivewhen) def instrument(self): return self._config_get('piano') def instrument(self, text: str): self._config(text, js_type=False) def mapping(self) -> 'OptionSeriesLollipopSonificationDefaultinstrumentoptionsMapping': return self._config_sub_data('mapping', OptionSeriesLollipopSonificationDefaultinstrumentoptionsMapping) def midiName(self): return self._config_get(None) def midiName(self, text: str): self._config(text, js_type=False) def pointGrouping(self) -> 'OptionSeriesLollipopSonificationDefaultinstrumentoptionsPointgrouping': return self._config_sub_data('pointGrouping', OptionSeriesLollipopSonificationDefaultinstrumentoptionsPointgrouping) def roundToMusicalNotes(self): return self._config_get(True) def roundToMusicalNotes(self, flag: bool): self._config(flag, js_type=False) def showPlayMarker(self): return self._config_get(True) def showPlayMarker(self, flag: bool): self._config(flag, js_type=False) def type(self): return self._config_get('instrument') def type(self, text: str): self._config(text, js_type=False)
def server(sock, site, log=None, environ=None, max_size=None, max_ protocol=HttpProtocol, server_event=None, minimum_chunk_size=None, log_x_forwarded_for=True, custom_pool=None, keepalive=True, log_output=True, log_format=DEFAULT_LOG_FORMAT, url_length_limit=MAX_REQUEST_LINE, debug=True, socket_timeout=None, capitalize_response_headers=True): serv = Server(sock, sock.getsockname(), site, log, environ=environ, max_ protocol=protocol, minimum_chunk_size=minimum_chunk_size, log_x_forwarded_for=log_x_forwarded_for, keepalive=keepalive, log_output=log_output, log_format=log_format, url_length_limit=url_length_limit, debug=debug, socket_timeout=socket_timeout, capitalize_response_headers=capitalize_response_headers) if (server_event is not None): warnings.warn('eventlet.wsgi.Server() server_event kwarg is deprecated and will be removed soon', DeprecationWarning, stacklevel=2) server_event.send(serv) if (max_size is None): max_size = DEFAULT_MAX_SIMULTANEOUS_REQUESTS if (custom_pool is not None): pool = custom_pool else: pool = eventlet.GreenPool(max_size) if (not (hasattr(pool, 'spawn') and hasattr(pool, 'waitall'))): raise AttributeError('eventlet.wsgi.Server pool must provide methods: `spawn`, `waitall`.\nIf unsure, use eventlet.GreenPool.') connections = {} def _clean_connection(_, conn): connections.pop(conn[0], None) conn[2] = STATE_CLOSE greenio.shutdown_safe(conn[1]) conn[1].close() try: serv.log.info('({}) wsgi starting up on {}'.format(serv.pid, socket_repr(sock))) while is_accepting: try: (client_socket, client_addr) = sock.accept() client_socket.settimeout(serv.socket_timeout) serv.log.debug('({}) accepted {!r}'.format(serv.pid, client_addr)) connections[client_addr] = connection = [client_addr, client_socket, STATE_IDLE] pool.spawn(serv.process_request, connection).link(_clean_connection, connection) except ACCEPT_EXCEPTIONS as e: if (support.get_errno(e) not in ACCEPT_ERRNO): raise except (KeyboardInterrupt, SystemExit): serv.log.info('wsgi exiting') break finally: for cs in connections.values(): prev_state = cs[2] cs[2] = STATE_CLOSE if (prev_state == STATE_IDLE): greenio.shutdown_safe(cs[1]) pool.waitall() serv.log.info('({}) wsgi exited, is_accepting={}'.format(serv.pid, is_accepting)) try: sock.close() except OSError as e: if (support.get_errno(e) not in BROKEN_SOCK): traceback.print_exc()
_click def mouse_click_combobox_or_choice(control, index, delay): if isinstance(control, wx.ComboBox): click_event = _create_event(wx.wxEVT_COMMAND_COMBOBOX_SELECTED, control) elif isinstance(control, wx.Choice): click_event = _create_event(wx.wxEVT_COMMAND_CHOICE_SELECTED, control) else: raise TypeError('Only supported controls are wxComboBox or wxChoice') click_event.SetString(control.GetString(index)) control.SetSelection(index) control.ProcessWindowEvent(click_event)
def test_load_submission(tmp_path: Path) -> None: executor = local.LocalExecutor(tmp_path) job = executor.submit(f66, 67, y=68) submission = local.LocalJob(tmp_path, job.job_id).submission() assert (submission.function is f66) assert (submission.args == (67,)) assert (submission.kwargs == {'y': 68}) assert (submission._result is None)
class MockSock(): def __init__(self, *, chunk=None, exception=None): self._recver = io.BytesIO(ZEN) self._sender = io.BytesIO() self.__closed = False self.__chunk = chunk self.__exception = exception self.flags = None def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self._recver.close() self._sender.close() self.__closed = True if ((exc_type is not None) and ('suppress' in exc_val.args[0])): return True return False def recv(self, bufsize, flags=0): if self.__closed: raise OSError(errno.EBADF, os.strerror(errno.EBADF)) if (bufsize is None): raise TypeError("'NoneType' object cannot be interpreted as an integer") if (not isinstance(flags, int)): raise TypeError('an integer is required (got type {})'.format(type(flags).__name__)) self.flags = flags if (self.__exception is not None): raise self.__exception if (self.__chunk is None): return self._recver.read(bufsize) else: return self._recver.read(min(self.__chunk, bufsize)) def send(self, data, flags=0): if self.__closed: raise OSError(errno.EBADF, os.strerror(errno.EBADF)) if (not isinstance(flags, int)): raise TypeError('an integer is required (got type {})'.format(type(flags).__name__)) self.flags = flags if (self.__chunk is None): return self._sender.write(data) return self._sender.write(data[:self.__chunk])
class AmazonLootScraper(AmazonBaseScraper): def get_type() -> OfferType: return OfferType.LOOT def get_offer_handlers(self, page: Page) -> list[OfferHandler]: return [OfferHandler(page.locator('[data-a-target="offer-list-IN_GAME_LOOT"] .item-card__action > a:first-child'), self.read_raw_offer, self.normalize_offer)] async def page_loaded_hook(self, page: Page) -> None: (await Scraper.scroll_element_to_bottom(page, 'root')) async def read_raw_offer(self, element: Locator) -> AmazonLootRawOffer: base_raw_offer = (await self.read_base_raw_offer(element)) game_title = (await element.locator('.item-card-details__body p').text_content()) if (game_title is None): raise ValueError("Couldn't find game title.") return AmazonLootRawOffer(title=base_raw_offer.title, valid_to=base_raw_offer.valid_to, url=base_raw_offer.url, img_url=base_raw_offer.img_url, game_title=game_title) def normalize_offer(self, raw_offer: RawOffer) -> Offer: if (not isinstance(raw_offer, AmazonLootRawOffer)): raise TypeError('Wrong type of raw offer.') rawtext = {'title': raw_offer.title, 'gametitle': raw_offer.game_title} if (raw_offer.game_title is None): raise ValueError('No game title found.') title = f'{raw_offer.game_title}: {raw_offer.title}' end_date = None if raw_offer.valid_to: logger.debug(f'Found date: {raw_offer.valid_to} for {raw_offer.title}') try: raw_date = raw_offer.valid_to.removeprefix('Ends ') if (raw_date.lower() == 'today'): parsed_date = datetime.now(tz=timezone.utc).replace(hour=0, minute=0, second=0) elif (raw_date.lower() == 'tomorrow'): parsed_date = (datetime.now(tz=timezone.utc).replace(hour=0, minute=0, second=0) + timedelta(days=1)) else: parsed_date = datetime.strptime(raw_date, '%b %d, %Y').replace(tzinfo=timezone.utc, hour=0, minute=0, second=0) end_date = parsed_date except (ValueError, IndexError): logger.warning(f'Date parsing failed for {raw_offer.title}') return Offer(source=AmazonLootScraper.get_source(), duration=AmazonLootScraper.get_duration(), type=AmazonLootScraper.get_type(), title=title, probable_game_name=raw_offer.game_title, seen_last=datetime.now(timezone.utc), valid_to=end_date, rawtext=rawtext, url=raw_offer.url, img_url=raw_offer.img_url)
def test_far_to_near(mesh, DGDPC0, W): velocity = as_vector((0.0, (- 1.0), 0.0)) u0 = project(velocity, W) xs = SpatialCoordinate(mesh) inflowexpr = conditional(And((real(xs[2]) > 0.25), (real(xs[2]) < 0.75)), 1.0, 0.5) inflow = Function(DGDPC0) inflow.interpolate(inflowexpr) n = FacetNormal(mesh) un = (0.5 * (dot(u0, n) + abs(dot(u0, n)))) D = TrialFunction(DGDPC0) phi = TestFunction(DGDPC0) a1 = ((- inner(D, dot(u0, grad(phi)))) * dx) a2 = (inner(((un('+') * D('+')) - (un('-') * D('-'))), jump(phi)) * dS_v) a3 = (inner((un * D), phi) * ds_v(3)) a = ((a1 + a2) + a3) L = ((- inner((inflow * dot(u0, n)), phi)) * ds_v(4)) out = Function(DGDPC0) solve((a == L), out) assert (max(abs((out.dat.data - inflow.dat.data))) < 1.4e-07)
def introduce_data_drops(data, drop_config, set_to_none=False): assert isinstance(drop_config, DataDropConfig) assert (type(data) is list) assert (drop_config.max_percentage_for_single_drop < drop_config.overall_drop_percentage) num_datapoints = len(data) max_datapoints_per_drop = ((drop_config.max_percentage_for_single_drop / 100.0) * float(num_datapoints)) overall_num_datapoints_to_drop = ((drop_config.overall_drop_percentage / 100.0) * float(num_datapoints)) print('Indroducing data drops for a dataset of length {}, overall number of datapoints to drop: {} max number of datapoints per drop: {}.'.format(num_datapoints, overall_num_datapoints_to_drop, max_datapoints_per_drop)) current_num_datapoints_dropped = 0 dropped_datapoints_set = set() while (current_num_datapoints_dropped < overall_num_datapoints_to_drop): remaining_datapoints_to_drop = (overall_num_datapoints_to_drop - current_num_datapoints_dropped) drop_start = randint(0, (num_datapoints - 1)) drop_size = randint(0, min(remaining_datapoints_to_drop, max_datapoints_per_drop)) drop_end = min((drop_start + drop_size), (num_datapoints - 1)) dropped_datapoints_set.update(range(drop_start, drop_end)) current_num_datapoints_dropped = len(dropped_datapoints_set) assert (len(dropped_datapoints_set) == overall_num_datapoints_to_drop) for i in sorted(dropped_datapoints_set, reverse=True): if set_to_none: data[i] = None else: del data[i] print('Dropped {} datapoints.'.format(len(dropped_datapoints_set)))
class BertLayer(nn.Module): def __init__(self, hidden_size, batch_size, seq_len, num_attention_heads, intermediate_size, hidden_act, layer_norm_eps, attention_probs_dropout_prob, hidden_dropout_prob): super().__init__() self.attention = BertAttention(batch_size=batch_size, seq_len=seq_len, hidden_size=hidden_size, num_attention_heads=num_attention_heads, layer_norm_eps=layer_norm_eps, attention_probs_dropout_prob=attention_probs_dropout_prob, hidden_dropout_prob=hidden_dropout_prob) self.intermediate = BertIntermediate(hidden_size, intermediate_size, hidden_act) self.output = BertOutput(hidden_size, intermediate_size, layer_norm_eps, hidden_dropout_prob) def feed_forward(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def forward(self, hidden_states: Tensor): shape = hidden_states.shape() self_attention_outputs = self.attention(hidden_states) layer_output = self.feed_forward(self_attention_outputs[0]) layer_output = (layer_output if (layer_output._rank() == 3) else ops.reshape()(layer_output, shape)) return (layer_output,)
def extractMayangel7WordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
(frozen=True, **SLOTS) class Field(): number: int if (version_info >= (3, 10)): _: ClassVar[KW_ONLY] packed: Union[(bool, Sentinel)] = DEFAULT one_of: Optional[OneOf] = None def _from_annotated_args(cls, *args: Any) -> Optional[Field]: for arg in args: if isinstance(arg, Field): arg._validate() return arg return None def _validate(self) -> None: if (not (1 <= self.number <= )): raise IncorrectAnnotationError(f'field number {self.number} is outside the allowed range') if (19000 <= self.number <= 19999): raise IncorrectAnnotationError(f'field number {self.number} is reserved') def _packed_or(self, default: bool) -> bool: return (self.packed if isinstance(self.packed, bool) else default)
class IRCBot(Bot): factory_path = 'evennia.server.portal.irc.IRCBotFactory' def start(self, ev_channel=None, irc_botname=None, irc_channel=None, irc_network=None, irc_port=None, irc_ssl=None): if (not _IRC_ENABLED): self.delete() return if irc_botname: self.db.irc_botname = irc_botname elif (not self.db.irc_botname): self.db.irc_botname = self.key if ev_channel: channel = search.channel_search(ev_channel) if (not channel): raise RuntimeError(f"Evennia Channel '{ev_channel}' not found.") channel = channel[0] channel.connect(self) self.db.ev_channel = channel if irc_channel: self.db.irc_channel = irc_channel if irc_network: self.db.irc_network = irc_network if irc_port: self.db.irc_port = irc_port if irc_ssl: self.db.irc_ssl = irc_ssl configdict = {'uid': self.dbid, 'botname': self.db.irc_botname, 'channel': self.db.irc_channel, 'network': self.db.irc_network, 'port': self.db.irc_port, 'ssl': self.db.irc_ssl} evennia.SESSION_HANDLER.start_bot_session(self.factory_path, configdict) def at_msg_send(self, **kwargs): pass def get_nicklist(self, caller): if (not hasattr(self, '_nicklist_callers')): self._nicklist_callers = [] self._nicklist_callers.append(caller) super().msg(request_nicklist='') return def ping(self, caller): if (not hasattr(self, '_ping_callers')): self._ping_callers = [] self._ping_callers.append(caller) super().msg(ping='') def reconnect(self): super().msg(reconnect='') def msg(self, text=None, **kwargs): from_obj = kwargs.get('from_obj', None) options = (kwargs.get('options', None) or {}) if ((not self.ndb.ev_channel) and self.db.ev_channel): self.ndb.ev_channel = self.db.ev_channel if (('from_channel' in options) and text and (self.ndb.ev_channel.dbid == options['from_channel'])): if ((not from_obj) or (from_obj != [self])): super().msg(channel=text) def execute_cmd(self, session=None, txt=None, **kwargs): if (kwargs['type'] == 'nicklist'): if (hasattr(self, '_nicklist_callers') and self._nicklist_callers): chstr = f'{self.db.irc_channel} ({self.db.irc_network}:{self.db.irc_port})' nicklist = ', '.join(sorted(kwargs['nicklist'], key=(lambda n: n.lower()))) for obj in self._nicklist_callers: obj.msg('Nicks at {chstr}:\n {nicklist}'.format(chstr=chstr, nicklist=nicklist)) self._nicklist_callers = [] return elif (kwargs['type'] == 'ping'): if (hasattr(self, '_ping_callers') and self._ping_callers): chstr = f'{self.db.irc_channel} ({self.db.irc_network}:{self.db.irc_port})' for obj in self._ping_callers: obj.msg('IRC ping return from {chstr} took {time}s.'.format(chstr=chstr, time=kwargs['timing'])) self._ping_callers = [] return elif (kwargs['type'] == 'privmsg'): user = kwargs['user'] if txt.lower().startswith('who'): whos = [] t0 = time.time() for sess in evennia.SESSION_HANDLER.get_sessions(): delta_cmd = (t0 - sess.cmd_last_visible) delta_conn = (t0 - session.conn_time) account = sess.get_account() whos.append(('%s (%s/%s)' % (utils.crop(('|w%s|n' % account.name), width=25), utils.time_format(delta_conn, 0), utils.time_format(delta_cmd, 1)))) text = f"Who list (online/idle): {', '.join(sorted(whos, key=(lambda w: w.lower())))}" elif txt.lower().startswith('about'): text = f"This is an Evennia IRC bot connecting from '{settings.SERVERNAME}'." else: text = "I understand 'who' and 'about'." super().msg(privmsg=((text,), {'user': user})) else: if (kwargs['type'] == 'action'): text = f"{kwargs['user']}{kwargs['channel']} {txt}" else: text = f"{kwargs['user']}{kwargs['channel']}: {txt}" if ((not self.ndb.ev_channel) and self.db.ev_channel): self.ndb.ev_channel = self.db.ev_channel if self.ndb.ev_channel: self.ndb.ev_channel.msg(text, senders=self)
def flash_id(esp, args): flash_id = esp.flash_id() print(('Manufacturer: %02x' % (flash_id & 255))) flid_lowbyte = ((flash_id >> 16) & 255) print(('Device: %02x%02x' % (((flash_id >> 8) & 255), flid_lowbyte))) print(('Detected flash size: %s' % DETECTED_FLASH_SIZES.get(flid_lowbyte, 'Unknown'))) flash_type = esp.flash_type() flash_type_dict = {0: 'quad (4 data lines)', 1: 'octal (8 data lines)'} flash_type_str = flash_type_dict.get(flash_type) if flash_type_str: print(f'Flash type set in eFuse: {flash_type_str}')
def neval(expr, digits=20, **kwargs): from flint import ctx, acb assert (digits >= 1) orig = ctx.prec target = ((digits * 3.33) + 5) wp = ((digits * 3.33) + 30) maxprec = ((wp * 10) + 4000) evaluator = ArbNumericalEvaluation() try: while 1: ctx.prec = wp try: v = evaluator.eval(expr) except ArbFiniteError: v = acb('nan') if (v.rel_accuracy_bits() >= target): break wp *= 2 if (wp > maxprec): break finally: ctx.prec = orig if (not v.is_finite()): raise ValueError('failed to converge to a finite value') if (isinstance(v, acb) and (v.imag == 0)): v = v.real if kwargs.get('as_arb'): return v else: return arb_as_fungrim(v, digits)
def name_of_process(hProcess): hModule = c_ulong() count = c_ulong() modname = c_buffer(30) windll.psapi.EnumProcessModules(hProcess, byref(hModule), sizeof(hModule), byref(count)) windll.psapi.GetModuleBaseNameA(hProcess, hModule.value, modname, sizeof(modname)) return b''.join([i for i in modname if (i != b'\x00')])
def test_byte_strobes(): bf_a = BitField('bf_a', lsb=0, width=16) bf_b = BitField('bf_b', lsb=0, width=13) bf_c = BitField('bf_c', lsb=8, width=16) bf_d = BitField('bf_d', lsb=14, width=14) assert (bf_a.byte_strobes == {0: {'bf_lsb': 0, 'bf_msb': 7, 'wdata_lsb': 0, 'wdata_msb': 7}, 1: {'bf_lsb': 8, 'bf_msb': 15, 'wdata_lsb': 8, 'wdata_msb': 15}}) assert (bf_b.byte_strobes == {0: {'bf_lsb': 0, 'bf_msb': 7, 'wdata_lsb': 0, 'wdata_msb': 7}, 1: {'bf_lsb': 8, 'bf_msb': 12, 'wdata_lsb': 8, 'wdata_msb': 12}}) assert (bf_c.byte_strobes == {1: {'bf_lsb': 0, 'bf_msb': 7, 'wdata_lsb': 8, 'wdata_msb': 15}, 2: {'bf_lsb': 8, 'bf_msb': 15, 'wdata_lsb': 16, 'wdata_msb': 23}}) assert (bf_d.byte_strobes == {1: {'bf_lsb': 0, 'bf_msb': 1, 'wdata_lsb': 14, 'wdata_msb': 15}, 2: {'bf_lsb': 2, 'bf_msb': 9, 'wdata_lsb': 16, 'wdata_msb': 23}, 3: {'bf_lsb': 10, 'bf_msb': 13, 'wdata_lsb': 24, 'wdata_msb': 27}})
def _model_stats_str(model: nn.Module, statistics: Dict[(str, Dict[(str, str)])]) -> str: def _addindent(s_: str, numSpaces: int) -> str: s = s_.split('\n') if (len(s) == 1): return s_ first = s.pop(0) s = [((numSpaces * ' ') + line) for line in s] s = '\n'.join(s) s = ((first + '\n') + s) return s def print_statistics(name: str) -> str: if (name not in statistics): return '' printed_stats = ['{}: {}'.format(k, v) for (k, v) in statistics[name].items()] return ', '.join(printed_stats) def repr_with_statistics(module: nn.Module, name: str) -> str: extra_lines = [] extra_repr = module.extra_repr() printed_stats = print_statistics(name) if extra_repr: extra_lines.extend(extra_repr.split('\n')) if printed_stats: extra_lines.extend(printed_stats.split('\n')) child_lines = [] for (key, submod) in module._modules.items(): submod_name = ((name + ('.' if name else '')) + key) submod_str = repr_with_statistics(submod, submod_name) submod_str = _addindent(submod_str, 2) child_lines.append(((('(' + key) + '): ') + submod_str)) lines = (extra_lines + child_lines) main_str = (module._get_name() + '(') if lines: if ((len(extra_lines) == 1) and (not child_lines)): main_str += extra_lines[0] else: main_str += (('\n ' + '\n '.join(lines)) + '\n') main_str += ')' return main_str return repr_with_statistics(model, '')
class OptionSeriesStreamgraphOnpoint(Options): def connectorOptions(self) -> 'OptionSeriesStreamgraphOnpointConnectoroptions': return self._config_sub_data('connectorOptions', OptionSeriesStreamgraphOnpointConnectoroptions) def id(self): return self._config_get(None) def id(self, text: str): self._config(text, js_type=False) def position(self) -> 'OptionSeriesStreamgraphOnpointPosition': return self._config_sub_data('position', OptionSeriesStreamgraphOnpointPosition)
class OptionSeriesBubbleSonificationTracksMappingLowpassFrequency(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class ActionDescriptor(BaseDescriptor): def __init__(self, func, name, doc): self._func = func self._name = name self.__doc__ = self._format_doc('action', name, doc, func) def __get__(self, instance, owner): if (instance is None): return self private_name = (('_' + self._name) + '_action') try: action = getattr(instance, private_name) except AttributeError: action = Action(instance, self._func, self._name, self.__doc__) setattr(instance, private_name, action) action._use_once(self._func) return action
class RoundTripConstructor(SafeConstructor): def construct_scalar(self, node): if (not isinstance(node, ScalarNode)): raise ConstructorError(None, None, ('expected a scalar node, but found %s' % node.id), node.start_mark) if ((node.style == '|') and isinstance(node.value, text_type)): lss = LiteralScalarString(node.value, anchor=node.anchor) if (node.comment and node.comment[1]): lss.comment = node.comment[1][0] return lss if ((node.style == '>') and isinstance(node.value, text_type)): fold_positions = [] idx = (- 1) while True: idx = node.value.find('\x07', (idx + 1)) if (idx < 0): break fold_positions.append((idx - len(fold_positions))) fss = FoldedScalarString(node.value.replace('\x07', ''), anchor=node.anchor) if (node.comment and node.comment[1]): fss.comment = node.comment[1][0] if fold_positions: fss.fold_pos = fold_positions return fss elif (bool(self._preserve_quotes) and isinstance(node.value, text_type)): if (node.style == "'"): return SingleQuotedScalarString(node.value, anchor=node.anchor) if (node.style == '"'): return DoubleQuotedScalarString(node.value, anchor=node.anchor) if node.anchor: return PlainScalarString(node.value, anchor=node.anchor) return node.value def construct_yaml_int(self, node): width = None value_su = to_str(self.construct_scalar(node)) try: sx = value_su.rstrip('_') underscore = [((len(sx) - sx.rindex('_')) - 1), False, False] except ValueError: underscore = None except IndexError: underscore = None value_s = value_su.replace('_', '') sign = (+ 1) if (value_s[0] == '-'): sign = (- 1) if (value_s[0] in '+-'): value_s = value_s[1:] if (value_s == '0'): return 0 elif value_s.startswith('0b'): if ((self.resolver.processing_version > (1, 1)) and (value_s[2] == '0')): width = len(value_s[2:]) if (underscore is not None): underscore[1] = (value_su[2] == '_') underscore[2] = ((len(value_su[2:]) > 1) and (value_su[(- 1)] == '_')) return BinaryInt((sign * int(value_s[2:], 2)), width=width, underscore=underscore, anchor=node.anchor) elif value_s.startswith('0x'): if ((self.resolver.processing_version > (1, 1)) and (value_s[2] == '0')): width = len(value_s[2:]) hex_fun = HexInt for ch in value_s[2:]: if (ch in 'ABCDEF'): hex_fun = HexCapsInt break if (ch in 'abcdef'): break if (underscore is not None): underscore[1] = (value_su[2] == '_') underscore[2] = ((len(value_su[2:]) > 1) and (value_su[(- 1)] == '_')) return hex_fun((sign * int(value_s[2:], 16)), width=width, underscore=underscore, anchor=node.anchor) elif value_s.startswith('0o'): if ((self.resolver.processing_version > (1, 1)) and (value_s[2] == '0')): width = len(value_s[2:]) if (underscore is not None): underscore[1] = (value_su[2] == '_') underscore[2] = ((len(value_su[2:]) > 1) and (value_su[(- 1)] == '_')) return OctalInt((sign * int(value_s[2:], 8)), width=width, underscore=underscore, anchor=node.anchor) elif ((self.resolver.processing_version != (1, 2)) and (value_s[0] == '0')): return (sign * int(value_s, 8)) elif ((self.resolver.processing_version != (1, 2)) and (':' in value_s)): digits = [int(part) for part in value_s.split(':')] digits.reverse() base = 1 value = 0 for digit in digits: value += (digit * base) base *= 60 return (sign * value) elif ((self.resolver.processing_version > (1, 1)) and (value_s[0] == '0')): if (underscore is not None): underscore[2] = ((len(value_su) > 1) and (value_su[(- 1)] == '_')) return ScalarInt((sign * int(value_s)), width=len(value_s), underscore=underscore) elif underscore: underscore[2] = ((len(value_su) > 1) and (value_su[(- 1)] == '_')) return ScalarInt((sign * int(value_s)), width=None, underscore=underscore, anchor=node.anchor) elif node.anchor: return ScalarInt((sign * int(value_s)), width=None, anchor=node.anchor) else: return (sign * int(value_s)) def construct_yaml_float(self, node): def leading_zeros(v): lead0 = 0 idx = 0 while ((idx < len(v)) and (v[idx] in '0.')): if (v[idx] == '0'): lead0 += 1 idx += 1 return lead0 m_sign = False value_so = to_str(self.construct_scalar(node)) value_s = value_so.replace('_', '').lower() sign = (+ 1) if (value_s[0] == '-'): sign = (- 1) if (value_s[0] in '+-'): m_sign = value_s[0] value_s = value_s[1:] if (value_s == '.inf'): return (sign * self.inf_value) if (value_s == '.nan'): return self.nan_value if ((self.resolver.processing_version != (1, 2)) and (':' in value_s)): digits = [float(part) for part in value_s.split(':')] digits.reverse() base = 1 value = 0.0 for digit in digits: value += (digit * base) base *= 60 return (sign * value) if ('e' in value_s): try: (mantissa, exponent) = value_so.split('e') exp = 'e' except ValueError: (mantissa, exponent) = value_so.split('E') exp = 'E' if (self.resolver.processing_version != (1, 2)): if ('.' not in mantissa): warnings.warn(MantissaNoDotYAML1_1Warning(node, value_so)) lead0 = leading_zeros(mantissa) width = len(mantissa) prec = mantissa.find('.') if m_sign: width -= 1 e_width = len(exponent) e_sign = (exponent[0] in '+-') return ScalarFloat((sign * float(value_s)), width=width, prec=prec, m_sign=m_sign, m_lead0=lead0, exp=exp, e_width=e_width, e_sign=e_sign, anchor=node.anchor) width = len(value_so) prec = value_so.index('.') lead0 = leading_zeros(value_so) return ScalarFloat((sign * float(value_s)), width=width, prec=prec, m_sign=m_sign, m_lead0=lead0, anchor=node.anchor) def construct_yaml_str(self, node): value = self.construct_scalar(node) if isinstance(value, ScalarString): return value if PY3: return value try: return value.encode('ascii') except AttributeError: return value except UnicodeEncodeError: return value def construct_rt_sequence(self, node, seqtyp, deep=False): if (not isinstance(node, SequenceNode)): raise ConstructorError(None, None, ('expected a sequence node, but found %s' % node.id), node.start_mark) ret_val = [] if node.comment: seqtyp._yaml_add_comment(node.comment[:2]) if (len(node.comment) > 2): seqtyp.yaml_end_comment_extend(node.comment[2], clear=True) if node.anchor: from .serializer import templated_id if (not templated_id(node.anchor)): seqtyp.yaml_set_anchor(node.anchor) for (idx, child) in enumerate(node.value): if child.comment: seqtyp._yaml_add_comment(child.comment, key=idx) child.comment = None ret_val.append(self.construct_object(child, deep=deep)) seqtyp._yaml_set_idx_line_col(idx, [child.start_mark.line, child.start_mark.column]) return ret_val def flatten_mapping(self, node): def constructed(value_node): if (value_node in self.constructed_objects): value = self.constructed_objects[value_node] else: value = self.construct_object(value_node, deep=False) return value merge_map_list = [] index = 0 while (index < len(node.value)): (key_node, value_node) = node.value[index] if (key_node.tag == u'tag:yaml.org,2002:merge'): if merge_map_list: if self.allow_duplicate_keys: del node.value[index] index += 1 continue args = ['while constructing a mapping', node.start_mark, 'found duplicate key "{}"'.format(key_node.value), key_node.start_mark, '\n To suppress this check see:\n ', ' Duplicate keys will become an error in future releases, and are errors\n by default when using the new API.\n '] if (self.allow_duplicate_keys is None): warnings.warn(DuplicateKeyFutureWarning(*args)) else: raise DuplicateKeyError(*args) del node.value[index] if isinstance(value_node, MappingNode): merge_map_list.append((index, constructed(value_node))) elif isinstance(value_node, SequenceNode): for subnode in value_node.value: if (not isinstance(subnode, MappingNode)): raise ConstructorError('while constructing a mapping', node.start_mark, ('expected a mapping for merging, but found %s' % subnode.id), subnode.start_mark) merge_map_list.append((index, constructed(subnode))) else: raise ConstructorError('while constructing a mapping', node.start_mark, ('expected a mapping or list of mappings for merging, but found %s' % value_node.id), value_node.start_mark) elif (key_node.tag == u'tag:yaml.org,2002:value'): key_node.tag = u'tag:yaml.org,2002:str' index += 1 else: index += 1 return merge_map_list def _sentinel(self): pass def construct_mapping(self, node, maptyp, deep=False): if (not isinstance(node, MappingNode)): raise ConstructorError(None, None, ('expected a mapping node, but found %s' % node.id), node.start_mark) merge_map = self.flatten_mapping(node) if node.comment: maptyp._yaml_add_comment(node.comment[:2]) if (len(node.comment) > 2): maptyp.yaml_end_comment_extend(node.comment[2], clear=True) if node.anchor: from .serializer import templated_id if (not templated_id(node.anchor)): maptyp.yaml_set_anchor(node.anchor) (last_key, last_value) = (None, self._sentinel) for (key_node, value_node) in node.value: key = self.construct_object(key_node, deep=True) if (not isinstance(key, Hashable)): if isinstance(key, MutableSequence): key_s = CommentedKeySeq(key) if (key_node.flow_style is True): key_s.fa.set_flow_style() elif (key_node.flow_style is False): key_s.fa.set_block_style() key = key_s elif isinstance(key, MutableMapping): key_m = CommentedKeyMap(key) if (key_node.flow_style is True): key_m.fa.set_flow_style() elif (key_node.flow_style is False): key_m.fa.set_block_style() key = key_m if PY2: try: hash(key) except TypeError as exc: raise ConstructorError('while constructing a mapping', node.start_mark, ('found unacceptable key (%s)' % exc), key_node.start_mark) elif (not isinstance(key, Hashable)): raise ConstructorError('while constructing a mapping', node.start_mark, 'found unhashable key', key_node.start_mark) value = self.construct_object(value_node, deep=deep) if self.check_mapping_key(node, key_node, maptyp, key, value): if (key_node.comment and (len(key_node.comment) > 4) and key_node.comment[4]): if (last_value is None): key_node.comment[0] = key_node.comment.pop(4) maptyp._yaml_add_comment(key_node.comment, value=last_key) else: key_node.comment[2] = key_node.comment.pop(4) maptyp._yaml_add_comment(key_node.comment, key=key) key_node.comment = None if key_node.comment: maptyp._yaml_add_comment(key_node.comment, key=key) if value_node.comment: maptyp._yaml_add_comment(value_node.comment, value=key) maptyp._yaml_set_kv_line_col(key, [key_node.start_mark.line, key_node.start_mark.column, value_node.start_mark.line, value_node.start_mark.column]) maptyp[key] = value (last_key, last_value) = (key, value) if merge_map: maptyp.add_yaml_merge(merge_map) def construct_setting(self, node, typ, deep=False): if (not isinstance(node, MappingNode)): raise ConstructorError(None, None, ('expected a mapping node, but found %s' % node.id), node.start_mark) if node.comment: typ._yaml_add_comment(node.comment[:2]) if (len(node.comment) > 2): typ.yaml_end_comment_extend(node.comment[2], clear=True) if node.anchor: from .serializer import templated_id if (not templated_id(node.anchor)): typ.yaml_set_anchor(node.anchor) for (key_node, value_node) in node.value: key = self.construct_object(key_node, deep=True) if (not isinstance(key, Hashable)): if isinstance(key, list): key = tuple(key) if PY2: try: hash(key) except TypeError as exc: raise ConstructorError('while constructing a mapping', node.start_mark, ('found unacceptable key (%s)' % exc), key_node.start_mark) elif (not isinstance(key, Hashable)): raise ConstructorError('while constructing a mapping', node.start_mark, 'found unhashable key', key_node.start_mark) value = self.construct_object(value_node, deep=deep) self.check_set_key(node, key_node, typ, key) if key_node.comment: typ._yaml_add_comment(key_node.comment, key=key) if value_node.comment: typ._yaml_add_comment(value_node.comment, value=key) typ.add(key) def construct_yaml_seq(self, node): data = CommentedSeq() data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) if node.comment: data._yaml_add_comment(node.comment) (yield data) data.extend(self.construct_rt_sequence(node, data)) self.set_collection_style(data, node) def construct_yaml_map(self, node): data = CommentedMap() data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) (yield data) self.construct_mapping(node, data, deep=True) self.set_collection_style(data, node) def set_collection_style(self, data, node): if (len(data) == 0): return if (node.flow_style is True): data.fa.set_flow_style() elif (node.flow_style is False): data.fa.set_block_style() def construct_yaml_object(self, node, cls): data = cls.__new__(cls) (yield data) if hasattr(data, '__setstate__'): state = SafeConstructor.construct_mapping(self, node, deep=True) data.__setstate__(state) else: state = SafeConstructor.construct_mapping(self, node) data.__dict__.update(state) def construct_yaml_omap(self, node): omap = CommentedOrderedMap() omap._yaml_set_line_col(node.start_mark.line, node.start_mark.column) if (node.flow_style is True): omap.fa.set_flow_style() elif (node.flow_style is False): omap.fa.set_block_style() (yield omap) if node.comment: omap._yaml_add_comment(node.comment[:2]) if (len(node.comment) > 2): omap.yaml_end_comment_extend(node.comment[2], clear=True) if (not isinstance(node, SequenceNode)): raise ConstructorError('while constructing an ordered map', node.start_mark, ('expected a sequence, but found %s' % node.id), node.start_mark) for subnode in node.value: if (not isinstance(subnode, MappingNode)): raise ConstructorError('while constructing an ordered map', node.start_mark, ('expected a mapping of length 1, but found %s' % subnode.id), subnode.start_mark) if (len(subnode.value) != 1): raise ConstructorError('while constructing an ordered map', node.start_mark, ('expected a single mapping item, but found %d items' % len(subnode.value)), subnode.start_mark) (key_node, value_node) = subnode.value[0] key = self.construct_object(key_node) assert (key not in omap) value = self.construct_object(value_node) if key_node.comment: omap._yaml_add_comment(key_node.comment, key=key) if subnode.comment: omap._yaml_add_comment(subnode.comment, key=key) if value_node.comment: omap._yaml_add_comment(value_node.comment, value=key) omap[key] = value def construct_yaml_set(self, node): data = CommentedSet() data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) (yield data) self.construct_setting(node, data) def construct_undefined(self, node): try: if isinstance(node, MappingNode): data = CommentedMap() data._yaml_set_line_col(node.start_mark.line, node.start_mark.column) if (node.flow_style is True): data.fa.set_flow_style() elif (node.flow_style is False): data.fa.set_block_style() data.yaml_set_tag(node.tag) (yield data) if node.anchor: data.yaml_set_anchor(node.anchor) self.construct_mapping(node, data) return elif isinstance(node, ScalarNode): data2 = TaggedScalar() data2.value = self.construct_scalar(node) data2.style = node.style data2.yaml_set_tag(node.tag) (yield data2) if node.anchor: data2.yaml_set_anchor(node.anchor, always_dump=True) return elif isinstance(node, SequenceNode): data3 = CommentedSeq() data3._yaml_set_line_col(node.start_mark.line, node.start_mark.column) if (node.flow_style is True): data3.fa.set_flow_style() elif (node.flow_style is False): data3.fa.set_block_style() data3.yaml_set_tag(node.tag) (yield data3) if node.anchor: data3.yaml_set_anchor(node.anchor) data3.extend(self.construct_sequence(node)) return except: pass raise ConstructorError(None, None, ('could not determine a constructor for the tag %r' % utf8(node.tag)), node.start_mark) def construct_yaml_timestamp(self, node, values=None): try: match = self.timestamp_regexp.match(node.value) except TypeError: match = None if (match is None): raise ConstructorError(None, None, 'failed to construct timestamp from "{}"'.format(node.value), node.start_mark) values = match.groupdict() if (not values['hour']): return SafeConstructor.construct_yaml_timestamp(self, node, values) for part in ['t', 'tz_sign', 'tz_hour', 'tz_minute']: if values[part]: break else: return SafeConstructor.construct_yaml_timestamp(self, node, values) year = int(values['year']) month = int(values['month']) day = int(values['day']) hour = int(values['hour']) minute = int(values['minute']) second = int(values['second']) fraction = 0 if values['fraction']: fraction_s = values['fraction'][:6] while (len(fraction_s) < 6): fraction_s += '0' fraction = int(fraction_s) if ((len(values['fraction']) > 6) and (int(values['fraction'][6]) > 4)): fraction += 1 delta = None if values['tz_sign']: tz_hour = int(values['tz_hour']) minutes = values['tz_minute'] tz_minute = (int(minutes) if minutes else 0) delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) if (values['tz_sign'] == '-'): delta = (- delta) if delta: dt = datetime.datetime(year, month, day, hour, minute) dt -= delta data = TimeStamp(dt.year, dt.month, dt.day, dt.hour, dt.minute, second, fraction) data._yaml['delta'] = delta tz = (values['tz_sign'] + values['tz_hour']) if values['tz_minute']: tz += (':' + values['tz_minute']) data._yaml['tz'] = tz else: data = TimeStamp(year, month, day, hour, minute, second, fraction) if values['tz']: data._yaml['tz'] = values['tz'] if values['t']: data._yaml['t'] = True return data def construct_yaml_bool(self, node): b = SafeConstructor.construct_yaml_bool(self, node) if node.anchor: return ScalarBoolean(b, anchor=node.anchor) return b
class DefaultRenderer(PyGridCellRenderer): selected_cells = wx.Brush(wx.Colour(255, 255, 200), wx.SOLID) normal_cells = wx.Brush('white', wx.SOLID) odd_cells = wx.Brush(wx.Colour(240, 240, 240), wx.SOLID) error_cells = wx.Brush(wx.Colour(255, 122, 122), wx.SOLID) warn_cells = wx.Brush(wx.Colour(255, 242, 0), wx.SOLID) def __init__(self, color='black', font='ARIAL', fontsize=8): PyGridCellRenderer.__init__(self) self.color = color self.foundary = font self.fontsize = fontsize self.font = wx.Font(fontsize, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, 0, font) def Clone(self): return DefaultRenderer(self.color, self.foundary, self.fontsize) def Draw(self, grid, attr, dc, rect, row, col, isSelected): self.DrawBackground(grid, attr, dc, rect, row, col, isSelected) self.DrawForeground(grid, attr, dc, rect, row, col, isSelected) dc.DestroyClippingRegion() def DrawBackground(self, grid, attr, dc, rect, row, col, isSelected): dc.SetClippingRegion(rect) dc.SetBackgroundMode(wx.SOLID) dc.SetPen(wx.Pen(wx.WHITE, 1, wx.SOLID)) if isSelected: dc.SetBrush(DefaultRenderer.selected_cells) elif (row % 2): dc.SetBrush(DefaultRenderer.normal_cells) else: dc.SetBrush(DefaultRenderer.odd_cells) dc.DrawRectangle(rect.x, rect.y, rect.width, rect.height) def DrawForeground(self, grid, attr, dc, rect, row, col, isSelected): dc.SetBackgroundMode(wx.TRANSPARENT) text = grid.model.GetValue(row, col) dc.SetTextForeground(self.color) dc.SetFont(self.font) dc.DrawText(self.FormatText(text), (rect.x + 1), (rect.y + 1)) self.DrawEllipses(grid, attr, dc, rect, row, col, isSelected) def FormatText(self, text): try: text = ('%0.3f' % atof(text)) except: pass return text def DrawEllipses(self, grid, attr, dc, rect, row, col, isSelected): text = grid.model.GetValue(row, col) if (not isinstance(text, str)): msg = ('Problem appending "..." to cell: %d %d' % (row, col)) raise TypeError(msg) (width, height) = dc.GetTextExtent(text) if (width > (rect.width - 2)): (width, height) = dc.GetTextExtent('...') x = ((((rect.x + 1) + rect.width) - 2) - width) dc.DrawRectangle(x, (rect.y + 1), (width + 1), height) dc.DrawText('...', x, (rect.y + 1)) def GetBestSize88(self, grid, attr, dc, row, col): size = PyGridCellRenderer.GetBestSize(self, grid, attr, dc, row, col) print('', size) return size
def babai(B, t): A = IntegerMatrix((B.nrows + 1), (B.ncols + 1)) for i in range(B.nrows): for j in range(B.ncols): A[(i, j)] = B[(i, j)] LLL.reduction(A) A.swap_rows(0, B.nrows) for j in range(B.ncols): A[((- 1), j)] = t[j] A[((- 1), (- 1))] = ceil(A[(- 2)].norm()) LLL.reduction(A) v = ([0] * len(t)) if (A[((- 1), (- 1))] > 0): for i in range(len(t)): v[i] = (t[i] - A[(- 1)][i]) else: for i in range(len(t)): v[i] = (t[i] + A[(- 1)][i]) return tuple(v)
def test_correct_number_of_rows_are_generated(): df = gen.generate(props={'region': gen.choice(data=['EMEA', 'LATAM', 'NAM', 'APAC'], weights=[0.1, 0.1, 0.3, 0.5]), 'country': gen.country_codes(region_field='region'), 'client_type': gen.choice(data=data.client_types()), 'client_name': gen.company_namer(field='client_type', field_type='client_type', countrycode_field='country')}, count=50, randomstate=np.random.RandomState()).to_dataframe()
def get_tx_status(account: AbstractAccount, tx_hash: bytes, height: int, conf: int, timestamp: Union[(bool, int)]) -> TxStatus: if (not account.have_transaction_data(tx_hash)): return TxStatus.MISSING metadata = account.get_transaction_metadata(tx_hash) if (metadata.position == 0): if ((height + COINBASE_MATURITY) > account._wallet.get_local_height()): return TxStatus.UNMATURED elif (conf == 0): if (height > 0): return TxStatus.UNVERIFIED return TxStatus.UNCONFIRMED return TxStatus.FINAL
class TestTachoMotorCountPerRotValue(ptc.ParameterizedTestCase): def test_count_per_rot_value(self): self.assertEqual(self._param['motor'].count_per_rot, motor_info[self._param['motor'].driver_name]['count_per_rot']) def test_count_per_rot_value_is_read_only(self): with self.assertRaises(AttributeError): self._param['motor'].count_per_rot = 'ThisShouldNotWork'
class OptionSeriesFunnelSonificationContexttracksMappingNoteduration(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
def show_request_result(request_result): rr = request_result parts = [] log = parts.append def _indent(s): indent_str = ' ' return ('\n' + indent_str).join(s.split('\n')) if rr.query: query_string = ('?' + '&'.join((('%s=%s' % (k, v)) for (k, v) in sorted(rr.query.items())))) else: query_string = '' log(('Fauna %s /%s%s\n' % (rr.method, rr.path, query_string))) if (rr.request_content is not None): log((' Request JSON: %s\n' % _indent(to_json(rr.request_content, pretty=True)))) log((' Response headers: %s\n' % _indent(to_json(dict(rr.response_headers), pretty=True)))) log((' Response JSON: %s\n' % _indent(to_json(rr.response_content, pretty=True)))) log((' Response (%i): Network latency %ims\n' % (rr.status_code, int((rr.time_taken * 1000))))) return u''.join(parts)
class IBCApplicationsTransferRestClientTestCase(TestCase): REST_CLIENT = IBCApplicationsTransferRestClient def make_clients(self, response_content: Dict) -> Tuple[(MockRestClient, IBCApplicationsTransferRestClient)]: mock_client = MockRestClient(json_encode(response_content).encode('utf-8')) rest_client = self.REST_CLIENT(mock_client) return (mock_client, rest_client) def test_DenomTrace(self): content = {'denom_trace': {'path': 'string', 'base_denom': 'string'}} (mock_client, rest_client) = self.make_clients(content) expected_response = ParseDict(content, QueryDenomTraceResponse()) assert (rest_client.DenomTrace(QueryDenomTraceRequest(hash='hash')) == expected_response) assert (mock_client.last_base_url == '/ibc/applications/transfer/v1beta1/denom_traces/hash') def test_DenomTraces(self): content = {'denom_traces': [{'path': 'string', 'base_denom': 'string'}], 'pagination': {'next_key': 'string', 'total': '1'}} (mock_client, rest_client) = self.make_clients(content) expected_response = ParseDict(content, QueryDenomTracesResponse()) assert (rest_client.DenomTraces(QueryDenomTracesRequest()) == expected_response) assert (mock_client.last_base_url == '/ibc/applications/transfer/v1beta1/denom_traces') def test_Params(self): content = {} (mock_client, rest_client) = self.make_clients(content) expected_response = ParseDict(content, QueryParamsResponse()) assert (rest_client.Params(QueryParamsRequest()) == expected_response) assert (mock_client.last_base_url == '/ibc/applications/transfer/v1beta1/params')
def train(config, train_dataloader, valid_dataloader, device, model, loss_class, optimizer, lr_scheduler, post_process_class, eval_class, pre_best_model_dict, logger, vdl_writer=None): cal_metric_during_train = config['Global'].get('cal_metric_during_train', False) log_smooth_window = config['Global']['log_smooth_window'] epoch_num = config['Global']['epoch_num'] print_batch_step = config['Global']['print_batch_step'] eval_batch_step = config['Global']['eval_batch_step'] global_step = 0 if ('global_step' in pre_best_model_dict): global_step = pre_best_model_dict['global_step'] start_eval_step = 0 if ((type(eval_batch_step) == list) and (len(eval_batch_step) >= 2)): start_eval_step = eval_batch_step[0] eval_batch_step = eval_batch_step[1] if (len(valid_dataloader) == 0): logger.info('No Images in eval dataset, evaluation during training will be disabled') start_eval_step = 1e+111 logger.info('During the training process, after the {}th iteration, an evaluation is run every {} iterations'.format(start_eval_step, eval_batch_step)) save_epoch_step = config['Global']['save_epoch_step'] save_model_dir = config['Global']['save_model_dir'] if (not os.path.exists(save_model_dir)): os.makedirs(save_model_dir) main_indicator = eval_class.main_indicator best_model_dict = {main_indicator: 0} best_model_dict.update(pre_best_model_dict) train_stats = TrainingStats(log_smooth_window, ['lr']) model_average = False model.train() use_srn = (config['Architecture']['algorithm'] == 'SRN') use_nrtr = (config['Architecture']['algorithm'] == 'NRTR') try: model_type = config['Architecture']['model_type'] except: model_type = None if ('start_epoch' in best_model_dict): start_epoch = best_model_dict['start_epoch'] else: start_epoch = 1 for epoch in range(start_epoch, (epoch_num + 1)): train_dataloader = build_dataloader(config, 'Train', device, logger, seed=epoch) train_batch_cost = 0.0 train_reader_cost = 0.0 batch_sum = 0 batch_start = time.time() max_iter = ((len(train_dataloader) - 1) if (platform.system() == 'Windows') else len(train_dataloader)) for (idx, batch) in enumerate(train_dataloader): train_reader_cost += (time.time() - batch_start) if (idx >= max_iter): break lr = optimizer.get_lr() images = batch[0] if use_srn: model_average = True if (use_srn or (model_type == 'table') or use_nrtr): preds = model(images, data=batch[1:]) else: preds = model(images) loss = loss_class(preds, batch) avg_loss = loss['loss'] avg_loss.backward() optimizer.step() optimizer.clear_grad() train_batch_cost += (time.time() - batch_start) batch_sum += len(images) if (not isinstance(lr_scheduler, float)): lr_scheduler.step() stats = {k: v.numpy().mean() for (k, v) in loss.items()} stats['lr'] = lr train_stats.update(stats) if cal_metric_during_train: batch = [item.numpy() for item in batch] if (model_type == 'table'): eval_class(preds, batch) else: post_result = post_process_class(preds, batch[1]) eval_class(post_result, batch) metric = eval_class.get_metric() train_stats.update(metric) if ((vdl_writer is not None) and (dist.get_rank() == 0)): for (k, v) in train_stats.get().items(): vdl_writer.add_scalar('TRAIN/{}'.format(k), v, global_step) vdl_writer.add_scalar('TRAIN/lr', lr, global_step) if ((dist.get_rank() == 0) and (((global_step > 0) and ((global_step % print_batch_step) == 0)) or (idx >= (len(train_dataloader) - 1)))): logs = train_stats.log() strs = 'epoch: [{}/{}], iter: {}, {}, reader_cost: {:.5f} s, batch_cost: {:.5f} s, samples: {}, ips: {:.5f}'.format(epoch, epoch_num, global_step, logs, (train_reader_cost / print_batch_step), (train_batch_cost / print_batch_step), batch_sum, (batch_sum / train_batch_cost)) logger.info(strs) train_batch_cost = 0.0 train_reader_cost = 0.0 batch_sum = 0 if ((global_step > start_eval_step) and (((global_step - start_eval_step) % eval_batch_step) == 0) and (dist.get_rank() == 0)): if model_average: Model_Average = paddle.incubate.optimizer.ModelAverage(0.15, parameters=model.parameters(), min_average_window=10000, max_average_window=15625) Model_Average.apply() cur_metric = eval(model, valid_dataloader, post_process_class, eval_class, model_type, use_srn=use_srn) cur_metric_str = 'cur metric, {}'.format(', '.join(['{}: {}'.format(k, v) for (k, v) in cur_metric.items()])) logger.info(cur_metric_str) if (vdl_writer is not None): for (k, v) in cur_metric.items(): if isinstance(v, (float, int)): vdl_writer.add_scalar('EVAL/{}'.format(k), cur_metric[k], global_step) if (cur_metric[main_indicator] >= best_model_dict[main_indicator]): best_model_dict.update(cur_metric) best_model_dict['best_epoch'] = epoch save_model(model, optimizer, save_model_dir, logger, is_best=True, prefix='best_accuracy', best_model_dict=best_model_dict, epoch=epoch, global_step=global_step) best_str = 'best metric, {}'.format(', '.join(['{}: {}'.format(k, v) for (k, v) in best_model_dict.items()])) logger.info(best_str) if (vdl_writer is not None): vdl_writer.add_scalar('EVAL/best_{}'.format(main_indicator), best_model_dict[main_indicator], global_step) global_step += 1 optimizer.clear_grad() batch_start = time.time() if (dist.get_rank() == 0): save_model(model, optimizer, save_model_dir, logger, is_best=False, prefix='latest', best_model_dict=best_model_dict, epoch=epoch, global_step=global_step) if ((dist.get_rank() == 0) and (epoch > 0) and ((epoch % save_epoch_step) == 0)): save_model(model, optimizer, save_model_dir, logger, is_best=False, prefix='iter_epoch_{}'.format(epoch), best_model_dict=best_model_dict, epoch=epoch, global_step=global_step) best_str = 'best metric, {}'.format(', '.join(['{}: {}'.format(k, v) for (k, v) in best_model_dict.items()])) logger.info(best_str) if ((dist.get_rank() == 0) and (vdl_writer is not None)): vdl_writer.close() return
class petsc_ASM(KSP_Preconditioner): def __init__(self, L, prefix=None): self.PCType = 'asm' self.L = L self._initializePC(prefix) self.pc.setFromOptions() def _initializePC(self, prefix=None): self.pc = p4pyPETSc.PC().create() self.pc.setOptionsPrefix(prefix) self.pc.setType('asm') def setUp(self, global_ksp=None, newton_its=None): self.pc.setUp()
def get_archdir_freebytes(arch_cfg: configuration.Archiving) -> typing.Tuple[(typing.Dict[(str, int)], typing.List[str])]: log_messages = [] target = arch_cfg.target_definition() archdir_freebytes = {} timeout = 5 try: completed_process = subprocess.run([target.disk_space_path], env={**os.environ, **arch_cfg.environment()}, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout) except subprocess.TimeoutExpired as e: log_messages.append(f'Disk space check timed out in {timeout} seconds') if (e.stdout is None): stdout = '' else: stdout = e.stdout.decode('utf-8', errors='ignore').strip() if (e.stderr is None): stderr = '' else: stderr = e.stderr.decode('utf-8', errors='ignore').strip() else: stdout = completed_process.stdout.decode('utf-8', errors='ignore').strip() stderr = completed_process.stderr.decode('utf-8', errors='ignore').strip() for line in stdout.splitlines(): line = line.strip() split = line.split(':') if (len(split) != 2): log_messages.append(f'Unable to parse disk script line: {line!r}') continue (archdir, space) = split freebytes = int(space) archdir_freebytes[archdir.strip()] = freebytes for line in log_messages: disk_space_logger.info(line) disk_space_logger.info('stdout from disk space script:') for line in stdout.splitlines(): disk_space_logger.info(f' {line}') disk_space_logger.info('stderr from disk space script:') for line in stderr.splitlines(): disk_space_logger.info(f' {line}') return (archdir_freebytes, log_messages)
class TestHidden(testgui.TestCase): def wait(self): self.loop = GLib.MainLoop() self.loop.run() def quitloop(self, f, status): if (status == 0): self.loop.quit() def testCreate(self): f = gtkfractal.Hidden(TestHidden.g_comp, 64, 40) f.connect('status-changed', self.quitloop) f.draw_image(0, 1) self.wait() def testCopy(self): f = gtkfractal.Hidden(TestHidden.g_comp, 64, 40) copy = f.copy_f() mag = f.get_param(f.MAGNITUDE) copy.set_param(copy.MAGNITUDE, 176.3) self.assertEqual(mag, f.get_param(f.MAGNITUDE)) self.assertNotEqual(mag, copy.get_param(copy.MAGNITUDE)) def testSignals(self): f = gtkfractal.Hidden(TestHidden.g_comp, 64, 40) cc = CallCounter() f.connect('parameters-changed', cc.cb) self.assertEqual(cc.count, 0) f.set_param(f.MAGNITUDE, 0.7) self.assertEqual(cc.count, 1) f.set_param(f.MAGNITUDE, 0.7) self.assertEqual(cc.count, 1) f.set_maxiter(778) f.set_maxiter(778) self.assertEqual(cc.count, 2) f.set_size(57, 211) f.set_size(57, 211) def testLoad(self): f = gtkfractal.Hidden(TestHidden.g_comp, 64, 40) with open('testdata/test_bail.fct') as fh: f.loadFctFile(fh) self.assertEqual(f.saved, True) f.connect('status-changed', self.quitloop) f.draw_image(0, 1) self.wait() def testBigImage(self): f = gtkfractal.HighResolution(TestHidden.g_comp, 640, 400) f.connect('status-changed', self.quitloop) hires_image = os.path.join(TestHidden.tmpdir.name, 'hires.png') f.draw_image(hires_image) self.wait() self.assertEqual(True, os.path.exists(hires_image))
class TestPrivacyRequestsManualWebhooks(): ('fides.api.service.privacy_request.request_runner_service.upload') def test_privacy_request_needs_manual_input_key_in_cache(self, mock_upload, integration_manual_webhook_config, access_manual_webhook, policy, run_privacy_request_task, db): customer_email = 'customer-' data = {'requested_at': '2021-08-30T16:09:37.359Z', 'policy_key': policy.key, 'identity': {'email': customer_email}} pr = get_privacy_request_results(db, policy, run_privacy_request_task, data) db.refresh(pr) assert (pr.status == PrivacyRequestStatus.requires_input) assert (not mock_upload.called) ('fides.api.service.privacy_request.request_runner_service.upload') ('fides.api.service.privacy_request.request_runner_service.run_erasure') def test_manual_input_required_for_erasure_only_policies(self, mock_erasure, mock_upload, integration_manual_webhook_config, access_manual_webhook, erasure_policy, run_privacy_request_task, db): customer_email = 'customer-' data = {'requested_at': '2021-08-30T16:09:37.359Z', 'policy_key': erasure_policy.key, 'identity': {'email': customer_email}} pr = get_privacy_request_results(db, erasure_policy, run_privacy_request_task, data) db.refresh(pr) assert (pr.status == PrivacyRequestStatus.requires_input) assert (not mock_upload.called) assert (not mock_erasure.called) ('fides.api.service.privacy_request.request_runner_service.upload') def test_pass_on_manually_added_input(self, mock_upload, integration_manual_webhook_config, access_manual_webhook, policy, run_privacy_request_task, privacy_request_requires_input: PrivacyRequest, db, cached_access_input): run_privacy_request_task.delay(privacy_request_requires_input.id).get(timeout=PRIVACY_REQUEST_TASK_TIMEOUT) db.refresh(privacy_request_requires_input) assert (privacy_request_requires_input.status == PrivacyRequestStatus.complete) assert mock_upload.called assert (mock_upload.call_args.kwargs['data'] == {'manual_webhook_example': [{'email': 'customer-', 'last_name': 'McCustomer'}]}) ('fides.api.service.privacy_request.request_runner_service.upload') def test_pass_on_partial_manually_added_input(self, mock_upload, integration_manual_webhook_config, access_manual_webhook, policy, run_privacy_request_task, privacy_request_requires_input: PrivacyRequest, db): privacy_request_requires_input.cache_manual_webhook_access_input(access_manual_webhook, {'email': 'customer-'}) run_privacy_request_task.delay(privacy_request_requires_input.id).get(timeout=PRIVACY_REQUEST_TASK_TIMEOUT) db.refresh(privacy_request_requires_input) assert (privacy_request_requires_input.status == PrivacyRequestStatus.complete) assert mock_upload.called assert (mock_upload.call_args.kwargs['data'] == {'manual_webhook_example': [{'email': 'customer-', 'last_name': None}]}) ('fides.api.service.privacy_request.request_runner_service.upload') def test_pass_on_empty_confirmed_input(self, mock_upload, integration_manual_webhook_config, access_manual_webhook, policy, run_privacy_request_task, privacy_request_requires_input: PrivacyRequest, db): privacy_request_requires_input.cache_manual_webhook_access_input(access_manual_webhook, {}) run_privacy_request_task.delay(privacy_request_requires_input.id).get(timeout=PRIVACY_REQUEST_TASK_TIMEOUT) db.refresh(privacy_request_requires_input) assert (privacy_request_requires_input.status == PrivacyRequestStatus.complete) assert mock_upload.called assert (mock_upload.call_args.kwargs['data'] == {'manual_webhook_example': [{'email': None, 'last_name': None}]})
def test_that_invalid_time_map_file_raises_config_validation_error(tmpdir): with tmpdir.as_cwd(): with open('time_map.txt', 'w', encoding='utf-8') as fo: fo.writelines('invalid') with pytest.raises(ConfigValidationError, match='Could not read timemap file'): _ = ModelConfig.from_dict({ConfigKeys.TIME_MAP: 'time_map.txt'})
class OptionSeriesScatter3dDragdropDraghandle(Options): def className(self): return self._config_get('highcharts-drag-handle') def className(self, text: str): self._config(text, js_type=False) def color(self): return self._config_get('#fff') def color(self, text: str): self._config(text, js_type=False) def lineColor(self): return self._config_get('rgba(0, 0, 0, 0.6)') def lineColor(self, text: str): self._config(text, js_type=False) def lineWidth(self): return self._config_get(1) def lineWidth(self, num: float): self._config(num, js_type=False) def zIndex(self): return self._config_get(901) def zIndex(self, num: float): self._config(num, js_type=False)
def delete_service_archive(archive_name): response = get_session().delete((base_url + 'service-archives/{}'.format(archive_name))) if (response.status_code != 200): raise get_exception(response) else: return response.json().get('message', 'service archive deleted successfully')
def test_encode_complete_keywords(): df = pd.DataFrame({'var_A': (((((((['A'] * 5) + (['B'] * 11)) + (['C'] * 4)) + (['D'] * 9)) + (['E'] * 2)) + (['F'] * 2)) + (['G'] * 7)), 'var_B': (((((((['A'] * 11) + (['B'] * 7)) + (['C'] * 4)) + (['D'] * 9)) + (['E'] * 2)) + (['F'] * 2)) + (['G'] * 5)), 'var_C': (((((((['A'] * 4) + (['B'] * 5)) + (['C'] * 11)) + (['D'] * 9)) + (['E'] * 2)) + (['F'] * 2)) + (['G'] * 7))}) encoder = StringSimilarityEncoder(keywords={'var_A': ['X'], 'var_B': ['Y'], 'var_C': ['Z']}) X = encoder.fit_transform(df) transf = {'var_A_X': 0, 'var_B_Y': 0, 'var_C_Z': 0} assert (encoder.variables_ == ['var_A', 'var_B', 'var_C']) assert (encoder.n_features_in_ == 3) assert (encoder.encoder_dict_ == {'var_A': ['X'], 'var_B': ['Y'], 'var_C': ['Z']}) for col in transf.keys(): assert (X[col].sum() == transf[col]) assert ('var_B' not in X.columns) assert ('var_B_F' not in X.columns)
def test_break_contained_in_switch_add_case_default(task): (arg1_1, vertices) = __graph_loop_break_in_switch(task) task.options = Options() task.options.update({'pattern-independent-restructuring.loop_break_switch': 'None'}) PatternIndependentRestructuring().run(task) assert (isinstance((seq_node := task.syntax_tree.root), SeqNode) and (len(seq_node.children) == 3)) assert (isinstance(seq_node.children[0], CodeNode) and (seq_node.children[0].instructions == vertices[0].instructions)) assert isinstance((loop_node := seq_node.children[1]), WhileLoopNode) assert (isinstance(seq_node.children[2], CodeNode) and (seq_node.children[2].instructions == vertices[4].instructions)) assert (isinstance((body := loop_node.body), SeqNode) and (len(body.children) == 3)) assert loop_node.condition.is_literal if loop_node.condition.is_symbol: assert (task.syntax_tree.condition_map[loop_node.condition] == vertices[1].instructions[0].condition) else: assert (task.syntax_tree.condition_map[(~ loop_node.condition)] == vertices[1].instructions[0].condition.negate()) assert (isinstance((case_5 := body.children[0]), ConditionNode) and (case_5.false_branch is None) and case_5.condition.is_literal) assert isinstance((switch := body.children[1]), SwitchNode) assert (isinstance(body.children[2], CodeNode) and (body.children[2].instructions == vertices[10].instructions)) if case_5.condition.is_symbol: assert (task.syntax_tree.condition_map[case_5.condition] == Condition(OperationType.equal, [arg1_1, Constant(5, arg1_1.type)])) else: assert (task.syntax_tree.condition_map[(~ case_5.condition)] == Condition(OperationType.not_equal, [arg1_1, Constant(5, arg1_1.type)])) assert (isinstance((case_seq := case_5.true_branch_child), SeqNode) and (len(case_seq.children) == 2)) assert (isinstance((break_cond := case_seq.children[0]), ConditionNode) and (break_cond.false_branch is None)) assert (isinstance((cn_5 := case_seq.children[1]), CodeNode) and (cn_5.instructions == vertices[11].instructions)) assert (task.syntax_tree.condition_map[break_cond.condition] == vertices[8].instructions[0].condition) assert (isinstance((break_node := break_cond.true_branch_child), CodeNode) and (break_node.instructions == [Break()])) assert ((switch.expression == arg1_1) and (len(switch.children) == 4)) assert (isinstance((case1 := switch.cases[0]), CaseNode) and (case1.constant == Constant(1, Integer.int32_t())) and (case1.break_case is True)) assert (isinstance((case2 := switch.cases[1]), CaseNode) and (case2.constant == Constant(2, Integer.int32_t())) and (case2.break_case is False)) assert (isinstance((case3 := switch.cases[2]), CaseNode) and (case3.constant == Constant(4, Integer.int32_t())) and (case3.break_case is True)) assert (isinstance((case4 := switch.cases[3]), CaseNode) and (case4.constant == Constant(3, Integer.int32_t())) and (case4.break_case is True)) assert (isinstance(case1.child, CodeNode) and (case1.child.instructions == vertices[5].instructions)) assert (isinstance(case2.child, CodeNode) and (case2.child.instructions == vertices[6].instructions)) assert (isinstance(case3.child, CodeNode) and (case3.child.instructions == vertices[7].instructions)) assert (isinstance(case4.child, CodeNode) and (case4.child.instructions == vertices[9].instructions))
def botcmd(*args, hidden: bool=None, name: str=None, split_args_with: str='', admin_only: bool=False, historize: bool=True, template: str=None, flow_only: bool=False, syntax: str=None) -> Callable[([BotPlugin, Message, Any], Any)]: def decorator(func): return _tag_botcmd(func, _re=False, _arg=False, hidden=hidden, name=(name or func.__name__), split_args_with=split_args_with, admin_only=admin_only, historize=historize, template=template, syntax=syntax, flow_only=flow_only) return (decorator(args[0]) if args else decorator)
class ReverseRegisterLookup(): def __init__(self, d): self._dict = d def __getitem__(self, spec_lookup): for (_entity_cls, entity_id) in self._dict.items(): for (entity_idd, entry) in entity_id.items(): if (entry['spec'] == spec_lookup): return entity_idd
class IndexValidator(OptionValidator): def __init__(self, model, extra=None, exclude=None): self.model = model self.extra = (extra or []) self.exclude = (exclude or []) def values(self): inspector = sa.inspect(db.engine) column_map = {column.key: label for (label, column) in self.model.__mapper__.columns.items()} return ([column_map[column['column_names'][0]] for column in inspector.get_indexes(self.model.__tablename__, self.model.__table__.schema) if (not self._is_excluded(column_map.get(column['column_names'][0])))] + self.extra) def _is_excluded(self, value): return ((not value) or (value in self.exclude))
def test_show_with_content(tmp_path, capsys): args = helpers.setup_temp_env(tmp_path) reqid = 'req-compile-bench--nobody-mac' reqdir = (((tmp_path / 'BENCH') / 'REQUESTS') / reqid) reqdir.mkdir() shutil.copy((helpers.DATA_ROOT / 'request.json'), (reqdir / 'request.json')) shutil.copy((helpers.DATA_ROOT / 'results.json'), (reqdir / 'results.json')) (reqdir / 'job.log').write_text(textwrap.dedent('\n LINE 1 OF LOG\n LINE 2 OF LOG\n LINE 3 OF LOG\n ')) __main__._parse_and_main([*args, 'show', reqid, '--lines', '2'], __file__) captured = capsys.readouterr() assert re.fullmatch(textwrap.dedent("\n Request req-compile-bench--nobody-mac:\n kind: compile-bench\n user: nobody\n status: pending\n is staged: False\n\n Details:\n ref: main\n pyperformance_ref: dd53b79de0ea98af6aa961daef4e9774\n remote: origin\n revision: main\n branch: main\n benchmarks: \\['deepcopy'\\]\n optimize: True\n debug: False\n ssh okay: \\?\\?\\?\n\n History:\n created: 2022-09-27 15:15:28\n pending: 2022-09-27 15:15:29\n\n Request files:\n data root: \\(/home/benchmarking/BENCH/REQUESTS/req-compile-bench--nobody-mac\\)\n metadata: .*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/request.json\n job_script: \\(.*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/run.sh\\)\n portal_script: \\(.*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/send.sh\\)\n ssh_okay: \\(.*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/ssh.ok\\)\n pyperformance_manifest: \\(.*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/benchmarks.manifest\\)\n pyperformance_config: \\(.*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/pyperformance.ini\\)\n\n Result files:\n data root: .*?/BENCH/REQUESTS/req-compile-bench--nobody-mac\n metadata: .*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/results.json\n pidfile: \\(.*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/send.pid\\)\n logfile: .*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/job.log\n pyperformance_log: \\(.*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/pyperformance.log\\)\n pyperformance_results: \\(.*?/BENCH/REQUESTS/req-compile-bench--nobody-mac/pyperformance-results.json.gz\\)\n LINE 2 OF LOG\n LINE 3 OF LOG\n ").strip(), captured.out.strip())
def _build_request(self, func, path, method, *args, **kwargs): return_type = get_type_hints(func).get('return') if (return_type is None): raise TypeError('Return type must be annotated in the decorated function.') actual_dataclass = _extract_dataclass_from_generic(return_type) logger.debug(f'return_type: {return_type}, actual_dataclass: {actual_dataclass}') if (not actual_dataclass): actual_dataclass = return_type sig = signature(func) base_url = self.base_url bound = sig.bind(self, *args, **kwargs) bound.apply_defaults() formatted_url = (base_url + path.format(**bound.arguments)) arg_names = list(sig.parameters.keys())[1:] combined_args = dict(zip(arg_names, args)) combined_args.update(kwargs) request_data = {} for (key, value) in combined_args.items(): if is_dataclass(value): request_data = asdict(value) else: request_data[key] = value request_params = {'method': method, 'url': formatted_url} if (method in ['POST', 'PUT', 'PATCH']): request_params['json'] = request_data else: request_params['params'] = request_data logger.debug(f'request_params: {request_params}, args: {args}, kwargs: {kwargs}') return (return_type, actual_dataclass, request_params)
def ths_1(): shape = (2000, 1000) sample = np.random.randint(0, 255, (1000,), dtype='uint8') plain = np.random.randint(0, 255, 16, dtype='uint8') samples = np.array([sample for i in range(shape[0])], dtype='uint8') plaintext = np.array([plain for i in range(shape[0])], dtype='uint8') return scared.traces.formats.read_ths_from_ram(samples=samples, plaintext=plaintext)
class OptionPlotoptionsAreasplineSonificationTracksMapping(Options): def frequency(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingFrequency': return self._config_sub_data('frequency', OptionPlotoptionsAreasplineSonificationTracksMappingFrequency) def gapBetweenNotes(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingGapbetweennotes': return self._config_sub_data('gapBetweenNotes', OptionPlotoptionsAreasplineSonificationTracksMappingGapbetweennotes) def highpass(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingHighpass': return self._config_sub_data('highpass', OptionPlotoptionsAreasplineSonificationTracksMappingHighpass) def lowpass(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingLowpass': return self._config_sub_data('lowpass', OptionPlotoptionsAreasplineSonificationTracksMappingLowpass) def noteDuration(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingNoteduration': return self._config_sub_data('noteDuration', OptionPlotoptionsAreasplineSonificationTracksMappingNoteduration) def pan(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingPan': return self._config_sub_data('pan', OptionPlotoptionsAreasplineSonificationTracksMappingPan) def pitch(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingPitch': return self._config_sub_data('pitch', OptionPlotoptionsAreasplineSonificationTracksMappingPitch) def playDelay(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingPlaydelay': return self._config_sub_data('playDelay', OptionPlotoptionsAreasplineSonificationTracksMappingPlaydelay) def rate(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingRate': return self._config_sub_data('rate', OptionPlotoptionsAreasplineSonificationTracksMappingRate) def text(self): return self._config_get(None) def text(self, text: str): self._config(text, js_type=False) def time(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingTime': return self._config_sub_data('time', OptionPlotoptionsAreasplineSonificationTracksMappingTime) def tremolo(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingTremolo': return self._config_sub_data('tremolo', OptionPlotoptionsAreasplineSonificationTracksMappingTremolo) def volume(self) -> 'OptionPlotoptionsAreasplineSonificationTracksMappingVolume': return self._config_sub_data('volume', OptionPlotoptionsAreasplineSonificationTracksMappingVolume)
class OptionPlotoptionsWaterfallSonificationTracksMappingLowpass(Options): def frequency(self) -> 'OptionPlotoptionsWaterfallSonificationTracksMappingLowpassFrequency': return self._config_sub_data('frequency', OptionPlotoptionsWaterfallSonificationTracksMappingLowpassFrequency) def resonance(self) -> 'OptionPlotoptionsWaterfallSonificationTracksMappingLowpassResonance': return self._config_sub_data('resonance', OptionPlotoptionsWaterfallSonificationTracksMappingLowpassResonance)
def run_pre_publishers(): from anima.dcc.mayaEnv.publish import PublishError from anima.dcc import mayaEnv m_env = mayaEnv.Maya() version = m_env.get_current_version() if (not version): return from anima.representation import Representation if (Representation.repr_separator in version.take_name): return if version.is_published: from anima.publish import run_publishers, staging, POST_PUBLISHER_TYPE type_name = '' if version.task.type: type_name = version.task.type.name staging['version'] = version try: run_publishers(type_name) except (PublishError, RuntimeError) as e: staging.clear() pm.confirmDialog(title='SaveError', icon='critical', message=('<b>%s</b><br/><br/>%s' % ('SCENE NOT SAVED!!!', e)), button=['Ok']) raise e staging.clear() else: from anima.dcc.mayaEnv import publish as publish_scripts try: publish_scripts.check_node_names_with_bad_characters() except (PublishError, RuntimeError) as e: pm.confirmDialog(title='SaveError', icon='critical', message=('<b>%s</b><br/><br/>%s' % ('SCENE NOT SAVED!!!', e)), button=['Ok']) raise e from stalker import LocalSession ls = LocalSession() logged_in_user = ls.logged_in_user if logged_in_user: version.updated_by = logged_in_user from stalker.db.session import DBSession DBSession.commit()
class TestGlobalAdaptationManager(unittest.TestCase): examples = traits.adaptation.tests.abc_examples def setUp(self): reset_global_adaptation_manager() def test_reset_adaptation_manager(self): ex = self.examples adaptation_manager = get_global_adaptation_manager() adaptation_manager.register_factory(factory=ex.UKStandardToEUStandard, from_protocol=ex.UKStandard, to_protocol=ex.EUStandard) uk_plug = ex.UKPlug() reset_global_adaptation_manager() adaptation_manager = get_global_adaptation_manager() with self.assertRaises(AdaptationError): adaptation_manager.adapt(uk_plug, ex.EUStandard) def test_set_adaptation_manager(self): ex = self.examples adaptation_manager = AdaptationManager() adaptation_manager.register_factory(factory=ex.UKStandardToEUStandard, from_protocol=ex.UKStandard, to_protocol=ex.EUStandard) uk_plug = ex.UKPlug() set_global_adaptation_manager(adaptation_manager) global_adaptation_manager = get_global_adaptation_manager() eu_plug = global_adaptation_manager.adapt(uk_plug, ex.EUStandard) self.assertIsNotNone(eu_plug) self.assertIsInstance(eu_plug, ex.UKStandardToEUStandard) def test_global_convenience_functions(self): ex = self.examples register_factory(factory=ex.UKStandardToEUStandard, from_protocol=ex.UKStandard, to_protocol=ex.EUStandard) uk_plug = ex.UKPlug() eu_plug = adapt(uk_plug, ex.EUStandard) self.assertIsNotNone(eu_plug) self.assertIsInstance(eu_plug, ex.UKStandardToEUStandard) self.assertTrue(provides_protocol(ex.UKPlug, ex.UKStandard)) self.assertTrue(supports_protocol(uk_plug, ex.EUStandard)) def test_global_register_provides(self): from traits.api import Interface class IFoo(Interface): pass obj = {} register_provides(dict, IFoo) self.assertEqual(obj, adapt(obj, IFoo)) def test_global_register_offer(self): ex = self.examples offer = AdaptationOffer(factory=ex.UKStandardToEUStandard, from_protocol=ex.UKStandard, to_protocol=ex.EUStandard) register_offer(offer) uk_plug = ex.UKPlug() eu_plug = adapt(uk_plug, ex.EUStandard) self.assertIsNotNone(eu_plug) self.assertIsInstance(eu_plug, ex.UKStandardToEUStandard)
def extractHiganbanaloveschrysanthemumHomeBlog(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('Powerful Skull in The Last Days', 'Powerful Skull in The Last Days', 'translated'), ('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
class TestCallableModuleCallableRaise(): def test_callable_module(self, monkeypatch): with monkeypatch.context() as m: m.setattr(sys, 'argv', ['', '--config', './tests/conf/yaml/test_fail_callable.yaml']) with pytest.raises(_SpockFieldHandlerError): config = ConfigArgBuilder(*all_configs) return config.generate()
class Close(): def __init__(self, ilo, delete_aliases=False, skip_flush=False): verify_index_list(ilo) self.index_list = ilo self.delete_aliases = delete_aliases self.skip_flush = skip_flush self.client = ilo.client self.loggit = logging.getLogger('curator.actions.close') def do_dry_run(self): show_dry_run(self.index_list, 'close', **{'delete_aliases': self.delete_aliases}) def do_action(self): self.index_list.filter_closed() self.index_list.empty_list_check() self.loggit.info('Closing %s selected indices: %s', len(self.index_list.indices), self.index_list.indices) try: index_lists = chunk_index_list(self.index_list.indices) for lst in index_lists: lst_as_csv = to_csv(lst) self.loggit.debug('CSV list of indices to close: %s', lst_as_csv) if self.delete_aliases: self.loggit.info('Deleting aliases from indices before closing.') self.loggit.debug('Deleting aliases from: %s', lst) try: self.client.indices.delete_alias(index=lst_as_csv, name='_all') self.loggit.debug('Deleted aliases from: %s', lst) except Exception as err: self.loggit.warning('Some indices may not have had aliases. Exception: %s', err) if (not self.skip_flush): self.client.indices.flush(index=lst_as_csv, ignore_unavailable=True, force=True) self.client.indices.close(index=lst_as_csv, ignore_unavailable=True) except Exception as err: report_failure(err)
class BarrierRequestReply(base_tests.SimpleProtocol): def runTest(self): logging.info('Running Barrier_Request_Reply test') logging.info('Sending Barrier Request') logging.info('Expecting a Barrier Reply with same xid') request = ofp.message.barrier_request() (response, pkt) = self.controller.transact(request) self.assertEqual(response.type, ofp.OFPT_BARRIER_REPLY, 'response is not barrier_reply') self.assertEqual(request.xid, response.xid, 'response xid != request xid')
def clear_orphan_webdriver(): process = all_webdriver() killed = [] for item in process: if (not item.parent()): kill_process_tree_by_id(item.pid) killed.append(item) elif (item.parent().name().lower() == 'systemd'): kill_process_tree_by_id(item.pid) killed.append(item) return killed
class Neon(Memory): def global_(cls): return '#include <arm_neon.h>' def can_read(cls): return False def alloc(cls, new_name, prim_type, shape, srcinfo): if (not shape): raise MemGenError(f'{srcinfo}: Neon vectors are not scalar values') vec_types = {'float': (4, 'float32x4_t'), 'double': (2, 'float64x2_t'), '_Float16': (8, 'float16x8_t')} if (not (prim_type in vec_types.keys())): raise MemGenError(f'{srcinfo}: Neon vectors must be f32/f64 (for now)') (reg_width, C_reg_type_name) = vec_types[prim_type] if (not _is_const_size(shape[(- 1)], reg_width)): raise MemGenError(f'{srcinfo}: Neon vectors of type {prim_type} must be {reg_width}-wide, got {shape}') shape = shape[:(- 1)] if shape: if (not all((_is_some_const_size(s) for s in shape))): raise MemGenError(f'{srcinfo}: Cannot allocate variable numbers of Neon vectors') result = f"{C_reg_type_name} {new_name}[{']['.join(map(str, shape))}];" else: result = f'{C_reg_type_name} {new_name};' return result def free(cls, new_name, prim_type, shape, srcinfo): return '' def window(cls, basetyp, baseptr, indices, strides, srcinfo): assert (strides[(- 1)] == '1') idxs = (indices[:(- 1)] or '') if idxs: idxs = (('[' + ']['.join(idxs)) + ']') return f'{baseptr}{idxs}'
def esrep_sync_cb(result, task_prefix): result['task_prefix'] = task_prefix task_id = generate_internal_task_id() task = cq.send_task(ESREP_SYNC_CB, args=(result, task_id), queue=Q_MGMT, expires=120, task_id=task_id) if (not task): raise CallbackError(('Failed to created task "%s"' % ESREP_SYNC_CB)) return task
def test_performative_string_value(): assert (str(FipaMessage.Performative.CFP) == 'cfp'), 'The str value must be cfp' assert (str(FipaMessage.Performative.PROPOSE) == 'propose'), 'The str value must be propose' assert (str(FipaMessage.Performative.DECLINE) == 'decline'), 'The str value must be decline' assert (str(FipaMessage.Performative.ACCEPT) == 'accept'), 'The str value must be accept' assert (str(FipaMessage.Performative.MATCH_ACCEPT) == 'match_accept'), 'The str value must be match_accept' assert (str(FipaMessage.Performative.ACCEPT_W_INFORM) == 'accept_w_inform'), 'The str value must be accept_w_inform' assert (str(FipaMessage.Performative.MATCH_ACCEPT_W_INFORM) == 'match_accept_w_inform'), 'The str value must be match_accept_w_inform' assert (str(FipaMessage.Performative.INFORM) == 'inform'), 'The str value must be inform'
(st.lists(st.integers(min_value=0, max_value=6), min_size=3)) def test_prune_reinsert_root_tracking_binary_tree(element_flipping): tracker = RootTracker() present = set() for node_id in element_flipping: node = FULL_BINARY_TREE[node_id] if (node in present): (prune_root_id, _) = tracker.get_root(node) tracker.prune(prune_root_id) present.remove(prune_root_id) else: tracker.add(node, binary_parent(node)) present.add(node) for test_node in present: (root_node, depth) = tracker.get_root(test_node) assert (binary_parent(root_node) not in present) assert (depth == (test_node[0] - root_node[0]))
.xfail(raises=ImageComparisonFailure, reason='Matplotlib plots for reasons a different image size.') .skipif((MID_MEMORY > memory), reason='Travis has too less memory to run it.') def test_hicAggregateContacts_chromosome_not_given(): outfile_aggregate_plots = NamedTemporaryFile(suffix='.png', prefix='hicaggregate_test_', delete=False) args = '--matrix {root}/Li_et_al_2015.h5 --BED {root}/hicAggregateContacts/test_regions_region_not_given.bed --outFileName {out_agg} --numberOfBins 30 --range 50000:900000 --disable_bbox_tight --dpi 100 --mode intra-chr --perChr '.format(root=ROOT, out_agg=outfile_aggregate_plots.name) compute(hicexplorer.hicAggregateContacts.main, args.split(), 5)
class GraphicalWifiMixin(_BaseMixin): defaults = [('wifi_arc', 75, 'Angle of arc in degrees.'), ('wifi_rectangle_width', 5, 'Width of rectangle in pixels.'), ('wifi_shape', 'arc', "'arc' or 'rectangle'")] def __init__(self): self.wifi_width = 0 def set_wifi_sizes(self): self.wifi_padding_x = getattr(self, 'padding_x', getattr(self, 'padding', 0)) self.wifi_padding_y = getattr(self, 'padding_y', getattr(self, 'padding', 0)) self.wifi_height = (self.bar.height - (self.wifi_padding_y * 2)) width_ratio = math.sin(to_rads((self.wifi_arc / 2))) if (self.wifi_shape == 'arc'): self.wifi_width = ((self.wifi_height * width_ratio) * 2) self.wifi_width = math.ceil(self.wifi_width) else: self.wifi_width = self.wifi_rectangle_width self.icon_size = self.wifi_height def draw_wifi(self, percentage, foreground='ffffff', background='777777'): if (self.wifi_shape == 'arc'): func = self._draw_wifi_arc else: func = self._draw_wifi_rectangle func(percentage, foreground, background) def _draw_wifi_arc(self, percentage, foreground, background): offset = self.wifi_padding_x half_arc = (self.wifi_arc / 2) x_offset = int((self.wifi_height * math.sin(to_rads(half_arc)))) self.drawer.ctx.new_sub_path() self.drawer.ctx.move_to((self.wifi_padding_x + x_offset), (self.wifi_padding_y + self.wifi_height)) self.drawer.ctx.arc((offset + x_offset), (self.wifi_padding_y + self.wifi_height), self.wifi_height, to_rads((270 - half_arc)), to_rads((270 + half_arc))) self.drawer.set_source_rgb(background) self.drawer.ctx.fill() self.drawer.ctx.new_sub_path() self.drawer.ctx.move_to((offset + x_offset), (self.wifi_padding_y + self.wifi_height)) self.drawer.ctx.arc((offset + x_offset), (self.wifi_padding_y + self.wifi_height), (self.wifi_height * percentage), to_rads((270 - half_arc)), to_rads((270 + half_arc))) self.drawer.set_source_rgb(foreground) self.drawer.ctx.fill() def _draw_wifi_rectangle(self, percentage, foreground, background): ctx = self.drawer.ctx ctx.save() ctx.translate(self.wifi_padding_x, self.wifi_padding_y) ctx.rectangle(0, 0, self.wifi_width, self.wifi_height) self.drawer.set_source_rgb(background) ctx.fill() ctx.rectangle(0, (self.wifi_height * (1 - percentage)), self.wifi_width, (self.wifi_height * percentage)) self.drawer.set_source_rgb(foreground) ctx.fill() ctx.restore()
def test_epoch(): expected = pytest.approx(0) assert (_rfc3339.parse_to_epoch('1970-01-01T00:00:00Z') == expected) assert (_rfc3339.parse_to_epoch('1970-01-01T00:00:00z') == expected) assert (_rfc3339.parse_to_epoch('1970-01-01T00:00:00+00:00') == expected) assert (_rfc3339.parse_to_epoch('1970-01-01T00:00:00-00:00') == expected) assert (_rfc3339.parse_to_epoch('1970-01-01T01:00:00+01:00') == expected) assert (_rfc3339.parse_to_epoch('1969-12-31T23:00:00-01:00') == expected)
class Trips(list): def __init__(self, *args): list.__init__(self, *args) self.trip_num = 1 def to_geo_json(self): feature_collection = FeatureCollection(self) return feature_collection def get_trips_as_dict(self): return [trip.get_info() for trip in self] def get_distance(self): return (self[(- 1)].mileage - self[0].mileage) def check_and_append(self, trip: Trip): if ((trip.consumption_km <= trip.car.max_elec_consumption) and (trip.consumption_fuel_km <= trip.car.max_fuel_consumption)): trip.id = self.trip_num self.trip_num += 1 self.append(trip) return True logger.debugv('trip discarded') return False def get_trips(vehicles_list: Cars) -> Dict[(str, 'Trips')]: conn = Database.get_db() vehicles = conn.execute('SELECT DISTINCT vin FROM position;').fetchall() trips_by_vin = {} for vin in vehicles: trips = Trips() vin = vin[0] res = conn.execute('SELECT Timestamp, VIN, longitude, latitude, mileage, level, moving, temperature, level_fuel, altitude FROM position WHERE VIN=? AND mileage IS NOT NULL ORDER BY Timestamp', (vin,)).fetchall() if (len(res) > 1): car = vehicles_list.get_car_by_vin(vin) assert (car is not None) trip_parser = TripParser(car) start = res[0] end = res[1] trip = Trip() for x in range(0, (len(res) - 2)): if logger.isEnabledFor(logging.DEBUG): logger.debugv('%s mileage:%.1f level:%s level_fuel:%s', res[x]['Timestamp'], res[x]['mileage'], res[x]['level'], res[x]['level_fuel']) next_point = res[(x + 2)] distance = 0 try: distance = (end['mileage'] - start['mileage']) except TypeError: logger.debug('Bad mileage value in DB') duration = ((end['Timestamp'] - start['Timestamp']).total_seconds() / 3600) try: speed_average = (distance / duration) except ZeroDivisionError: speed_average = 0 if (TripParser.is_low_speed(speed_average, duration) or trip_parser.is_refuel(start, end, distance)): start = end trip = Trip() logger.debugv('restart trip at {0[Timestamp]} mileage:{0[mileage]:.1f} level:{0[level]} level_fuel:{0[level_fuel]}', start, style='{') else: distance = (next_point['mileage'] - end['mileage']) duration = ((next_point['Timestamp'] - end['Timestamp']).total_seconds() / 3600) try: speed_average = (distance / duration) except ZeroDivisionError: speed_average = 0 end_trip = False if (trip_parser.is_refuel(end, next_point, distance) or TripParser.is_low_speed(speed_average, duration)): end_trip = True elif (duration > 2): end_trip = True logger.debugv('too much time detected') elif (x == (len(res) - 3)): end = next_point end_trip = True logger.debugv('last position found') if end_trip: logger.debugv('stop trip at {0[Timestamp]} mileage:{0[mileage]:.1f} level:{0[level]} level_fuel:{0[level_fuel]}', end, style='{') trip.distance = (end['mileage'] - start['mileage']) if (trip.distance > 0): trip.start_at = start['Timestamp'] trip.end_at = end['Timestamp'] trip.add_points(end['latitude'], end['longitude']) if ((end['temperature'] is not None) and (start['temperature'] is not None)): trip.add_temperature(end['temperature']) trip.duration = ((end['Timestamp'] - start['Timestamp']).total_seconds() / 3600) trip.speed_average = (trip.distance / trip.duration) (diff_level, diff_level_fuel) = trip_parser.get_level_consumption(start, end) trip.set_altitude_diff(start['altitude'], end['altitude']) trip.car = car if (diff_level != 0): trip.set_consumption(diff_level) if (diff_level_fuel != 0): trip.set_fuel_consumption(diff_level_fuel) trip.mileage = end['mileage'] logger.debugv('Trip: {0.start_at} -> {0.end_at} {0.distance:.1f}km {0.duration:.2f}h {0.speed_average:.0f}km/h {0.consumption:.2f}kWh {0.consumption_km:.2f}kWh/100km {0.consumption_fuel:.2f}L {0.consumption_fuel_km:.2f}L/100km {0.mileage:.1f}km', trip, style='{') trips.check_and_append(trip) start = next_point trip = Trip() else: trip.add_points(end['latitude'], end['longitude']) end = next_point trips_by_vin[vin] = trips conn.close() return trips_by_vin
def gen_function(func_attrs, reduction_op, reduction_identity='ElementCompute()'): backend_spec = CUDASpec() elem_input_type = backend_spec.dtype_to_lib_type(func_attrs['inputs'][0]._attrs['dtype']) output_type = func_attrs['outputs'][0]._attrs['dtype'] elem_output_type = backend_spec.dtype_to_lib_type(output_type) vector_lens_config = [32, 16, 8, 4, 1] exec_paths = '' for vlen in vector_lens_config: exec_program = EXEC_COND_TEMPLATE.render(func_name=func_attrs['name'], elem_input_type=elem_input_type, elem_output_type=elem_output_type, vector_length=vlen, indent=' ') exec_paths += exec_program if (func_attrs.get('workspace', 0) > 0): workspace_ptr = 'workspace' else: workspace_ptr = 'nullptr' accumulation_type = 'float' if (Target.current()._kwargs.get('use_fp16_acc', False) and (output_type == 'float16')): accumulation_type = elem_output_type special_reduction_code = '' if meets_special_kernel_conditions(func_attrs, elem_input_type, elem_output_type): (exec_paths, special_reduction_code) = get_special_exec_cond_and_kernel(func_attrs, elem_input_type, elem_output_type, accumulation_type, func_attrs['output_accessors'], reduction_op, reduction_identity) return SRC_TEMPLATE.render(func_name=func_attrs['name'], reduction_op=reduction_op, reduction_identity=reduction_identity, exec_paths=exec_paths, workspace_ptr=workspace_ptr, accumulation_type=accumulation_type, special_reduction_code=special_reduction_code)
def prepare_db(user_engine=None, user_session=None, print_sql=False): global engine global Session if ((user_engine is not None) and (user_session is not None)): engine = user_engine Session = user_session if ((engine is None) or (Session is None)): engine_args = {'encoding': 'utf-8'} if print_sql: engine_args['echo'] = True engine = create_engine(SQL_ALCHEMY_CONN, **engine_args) Session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=engine, expire_on_commit=False))
def validate_bls_to_execution_change(btec_dict: Dict[(str, Any)], credential: Credential, *, input_validator_index: int, input_execution_address: str, chain_setting: BaseChainSetting) -> bool: validator_index = int(btec_dict['message']['validator_index']) from_bls_pubkey = BLSPubkey(decode_hex(btec_dict['message']['from_bls_pubkey'])) to_execution_address = decode_hex(btec_dict['message']['to_execution_address']) signature = BLSSignature(decode_hex(btec_dict['signature'])) genesis_validators_root = decode_hex(btec_dict['metadata']['genesis_validators_root']) if (validator_index != input_validator_index): return False if (from_bls_pubkey != credential.withdrawal_pk): return False if ((to_execution_address != credential.eth1_withdrawal_address) or (to_execution_address != decode_hex(input_execution_address))): return False if (genesis_validators_root != chain_setting.GENESIS_VALIDATORS_ROOT): return False message = BLSToExecutionChange(validator_index=validator_index, from_bls_pubkey=from_bls_pubkey, to_execution_address=to_execution_address) domain = compute_bls_to_execution_change_domain(fork_version=chain_setting.GENESIS_FORK_VERSION, genesis_validators_root=genesis_validators_root) signing_root = compute_signing_root(message, domain) if (not bls.Verify(BLSPubkey(credential.withdrawal_pk), signing_root, signature)): return False return True
_tag(takes_context=True) def feincms_nav(context, feincms_page, level=1, depth=1, group=None): page_class = _get_page_model() if (not feincms_page): return [] if isinstance(feincms_page, HttpRequest): try: feincms_page = page_class.objects.for_request(feincms_page, best_match=True) except page_class.DoesNotExist: return [] mptt_opts = feincms_page._mptt_meta mptt_level_range = [(level - 1), ((level + depth) - 1)] queryset = feincms_page.__class__._default_manager.in_navigation().filter(**{('%s__gte' % mptt_opts.level_attr): mptt_level_range[0], ('%s__lt' % mptt_opts.level_attr): mptt_level_range[1]}) page_level = getattr(feincms_page, mptt_opts.level_attr) parent = None if (level > 1): if ((level - 2) == page_level): parent = feincms_page elif ((level - 2) < page_level): parent = feincms_page.get_ancestors()[(level - 2)] elif ((level - 1) > page_level): queryset = page_class.objects.none() if parent: if getattr(parent, 'navigation_extension', None): return list(parent.extended_navigation(depth=depth, request=context.get('request'))) queryset &= parent.get_descendants() if (depth > 1): parents = {None} if parent: parents.add(parent.id) def _parentactive_filter(iterable): for elem in iterable: if (elem.parent_id in parents): (yield elem) parents.add(elem.id) queryset = _parentactive_filter(queryset) if (group is not None): def _navigationgroup_filter(iterable): for elem in iterable: if (getattr(elem, 'navigation_group', None) == group): (yield elem) queryset = _navigationgroup_filter(queryset) if hasattr(feincms_page, 'navigation_extension'): def _navext_filter(iterable): current_navextension_node = None for elem in iterable: if ((current_navextension_node is not None) and current_navextension_node.is_ancestor_of(elem)): continue (yield elem) if getattr(elem, 'navigation_extension', None): current_navextension_node = elem try: for extended in elem.extended_navigation(depth=depth, request=context.get('request')): this_level = getattr(extended, mptt_opts.level_attr, 0) if (this_level < ((level + depth) - 1)): (yield extended) except Exception as e: logger.warn('feincms_nav caught exception in navigation extension for page %d: %s', current_navextension_node.id, format_exception(e)) else: current_navextension_node = None queryset = _navext_filter(queryset) return list(queryset)
class Query(object): def Channel(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/Channel', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryChannelRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryChannelResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def Channels(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/Channels', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryChannelsRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryChannelsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def ConnectionChannels(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/ConnectionChannels', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryConnectionChannelsRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryConnectionChannelsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def ChannelClientState(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/ChannelClientState', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryChannelClientStateRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryChannelClientStateResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def ChannelConsensusState(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/ChannelConsensusState', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryChannelConsensusStateRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryChannelConsensusStateResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def PacketCommitment(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/PacketCommitment', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketCommitmentRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketCommitmentResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def PacketCommitments(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/PacketCommitments', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketCommitmentsRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketCommitmentsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def PacketReceipt(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/PacketReceipt', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketReceiptRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketReceiptResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def PacketAcknowledgement(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/PacketAcknowledgement', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketAcknowledgementRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketAcknowledgementResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def PacketAcknowledgements(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/PacketAcknowledgements', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketAcknowledgementsRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryPacketAcknowledgementsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def UnreceivedPackets(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/UnreceivedPackets', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryUnreceivedPacketsRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryUnreceivedPacketsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def UnreceivedAcks(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/UnreceivedAcks', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryUnreceivedAcksRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryUnreceivedAcksResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) def NextSequenceReceive(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/ibc.core.channel.v1.Query/NextSequenceReceive', ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryNextSequenceReceiveRequest.SerializeToString, ibc_dot_core_dot_channel_dot_v1_dot_query__pb2.QueryNextSequenceReceiveResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class OptionSeriesNetworkgraphDataEvents(Options): def click(self): return self._config_get(None) def click(self, value: Any): self._config(value, js_type=False) def drag(self): return self._config_get(None) def drag(self, value: Any): self._config(value, js_type=False) def dragStart(self): return self._config_get(None) def dragStart(self, value: Any): self._config(value, js_type=False) def drop(self): return self._config_get(None) def drop(self, value: Any): self._config(value, js_type=False) def mouseOut(self): return self._config_get(None) def mouseOut(self, value: Any): self._config(value, js_type=False) def mouseOver(self): return self._config_get(None) def mouseOver(self, value: Any): self._config(value, js_type=False) def remove(self): return self._config_get(None) def remove(self, value: Any): self._config(value, js_type=False) def select(self): return self._config_get(None) def select(self, value: Any): self._config(value, js_type=False) def unselect(self): return self._config_get(None) def unselect(self, value: Any): self._config(value, js_type=False) def update(self): return self._config_get(None) def update(self, value: Any): self._config(value, js_type=False)
class TestDeleteComposableTemplateParamSource(): def test_delete_composable_template_by_name(self): source = params.DeleteComposableTemplateParamSource(track.Track(name='unit-test'), params={'template': 'default'}) assert (source.params() == {'template': 'default', 'templates': [('default', False, None)], 'only-if-exists': True, 'request-params': {}}) def test_no_composable_templates(self): with pytest.raises(exceptions.InvalidSyntax) as exc: params.DeleteComponentTemplateParamSource(track.Track(name='unit-test'), params={'operation-type': 'delete-composable-template'}) assert (exc.value.args[0] == "Please set the property 'template' for the delete-composable-template operation.") def test_delete_composable_template_from_track(self): tpl1 = track.IndexTemplate(name='logs', pattern='logs-*', content={'template': {'mappings': {'properties': {'': {'type': 'date'}}}}}) tpl2 = track.IndexTemplate(name='metrics', pattern='metrics-*', content={'template': {'settings': {'index.number_of_shards': 1, 'index.number_of_replicas': 1}}}) source = params.DeleteComposableTemplateParamSource(track.Track(name='unit-test', composable_templates=[tpl1, tpl2]), params={'request-params': {'master_timeout': 20}, 'only-if-exists': False}) p = source.params() assert (len(p['templates']) == 2) assert (p['templates'][0][0] == 'logs') assert (p['templates'][1][0] == 'metrics') assert (not p['only-if-exists']) assert (p['request-params'] == {'master_timeout': 20}) source = params.DeleteComposableTemplateParamSource(track.Track(name='unit-test', composable_templates=[tpl1, tpl2]), params={'template': 'logs'}) p = source.params() assert (len(p['templates']) == 1) assert (p['templates'][0][0] == 'logs')
def extractXiongmaotlsWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
def downgrade(): op.add_column('session', sa.Column('level_id', sa.INTEGER(), autoincrement=False, nullable=True)) op.create_foreign_key(u'session_level_id_fkey', 'session', 'level', ['level_id'], ['id']) op.create_table('level', sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('name', sa.VARCHAR(), autoincrement=False, nullable=False), sa.Column('label_en', sa.VARCHAR(), autoincrement=False, nullable=True), sa.Column('event_id', sa.INTEGER(), autoincrement=False, nullable=False), sa.PrimaryKeyConstraint('id', name=u'level_pkey'))
class LtileLayer(): def __init__(self, component, token, url, options, leaflet_map=None): (self.component, self.leaflet_map) = (component, leaflet_map) self.srv_url = ('%s?access_token=%s' % (url, token)) (self.options, self.__is_attached, self._js) = ((options or {}), False, []) def attribution(self, text: str): self.options['attribution'] = text return self def maxBounds(self, latLngBounds): self.options['maxBounds'] = latLngBounds return self def maxZoom(self, num: float): self.options['maxZoom'] = num return self def minZoom(self, num: float): self.options['minZoom'] = num return self def id(self, value): self.options['id'] = value return self def tileSize(self, num: int): self.options['tileSize'] = num return self def zoomOffset(self, num: float): self.options['zoomOffset'] = num return self def accessToken(self, token: str): self.srv_url = ('%s?access_token=%s' % (self.srv_url.split('?')[0], token)) return self def addTo(self, map): self.__is_attached = True self._js.append(('addTo(%s)' % map)) return self def toStr(self): if ((not self.__is_attached) and (self.leaflet_map is not None)): self.addTo(self.leaflet_map.varName) js_fnc = '.'.join(self._js) self._js = [] return ('L.tileLayer(%s, %s).%s' % (JsUtils.jsConvertData(self.srv_url, None), JsUtils.jsConvertData(self.options, None), js_fnc))
def Run(params): base_branch = params.args[1] repos_and_local_branches = GetReposAndLocalBranches(params, patterns=[('*%s*' % base_branch)]) branch_to_repos = ConvertRepoToBranchesToBranchToRepos(repos_and_local_branches) if (len(params.config.repos) == 1): params.config.serial = True if ((base_branch in branch_to_repos) or (not branch_to_repos)): from .action_default import Run return Run(params) if (len(branch_to_repos) == 1): (branch, _repo) = list(branch_to_repos.items()).pop() params.args[1] = branch from .action_default import Run return Run(params) Print(('Found more than one branch that matches ${START_COLOR}%s${RESET_COLOR}:\n' % params.args[1])) PrintBranchToRepos(branch_to_repos, params) Print('\n${START_COLOR}ERROR${RESET_COLOR}: unable to decide branch to work on.', __color__='RED')
(name='api.mon.node.tasks.mon_node_sync', base=NodeMonInternalTask) _lock(key_kwargs=('node_uuid',), wait_for_release=True) _task_log(LOG_MON_NODE_UPDATE) def mon_node_sync(task_id, sender, node_uuid=None, log=LOG, **kwargs): assert node_uuid node = log.obj = Node.objects.get(uuid=node_uuid) return get_monitoring(DefaultDc()).node_sync(node, task_log=log)
class TestUserPermissions(BaseTestCase): def test_user_permissions(self, db_session): created_user = add_user(db_session) permissions = UserService.permissions(created_user, db_session=db_session) expected = [PermissionTuple(created_user, 'alter_users', 'user', None, None, False, True), PermissionTuple(created_user, 'root', 'user', None, None, False, True)] check_one_in_other(permissions, expected) def test_owned_permissions(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') created_user.resources.append(resource) db_session.flush() resources = UserService.resources_with_perms(created_user, ['test_perm'], db_session=db_session).all() assert (resources[0] == resource) permission = ResourceService.direct_perms_for_user(resource, created_user)[0] assert (permission.owner is True) assert (permission.allowed is True) assert (permission.user.id == created_user.id) def test_resources_with_perm(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') permission = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource.resource_id) resource.user_permissions.append(permission) db_session.flush() resources = UserService.resources_with_perms(created_user, ['test_perm'], db_session=db_session).all() assert (resources[0] == resource) def test_mixed_perms(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') permission = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource.resource_id) resource.user_permissions.append(permission) resource2 = add_resource(db_session, 2, 'test_resource') created_user.resources.append(resource2) add_resource(db_session, 3, 'test_resource') add_resource_b(db_session, 4, 'test_resource') db_session.flush() resources = UserService.resources_with_perms(created_user, ['test_perm'], db_session=db_session).all() found_ids = [r.resource_id for r in resources] assert (sorted(found_ids) == [1, 2]) def test_resources_with_perm_type_found(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') permission = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource.resource_id) resource.user_permissions.append(permission) db_session.flush() resources = UserService.resources_with_perms(created_user, ['test_perm'], resource_types=['test_resource'], db_session=db_session).all() assert (resources[0] == resource) def test_resources_with_perm_type_not_found(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') permission = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource.resource_id) resource.user_permissions.append(permission) db_session.flush() resources = UserService.resources_with_perms(created_user, ['test_perm'], resource_types=['test_resource_b'], db_session=db_session).all() assert (resources == []) def test_resources_with_perm_type_other_found(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') resource2 = add_resource_b(db_session, 2, 'test_resource') resource3 = add_resource(db_session, 3, 'test_resource') resource4 = add_resource_b(db_session, 4, 'test_resource') db_session.flush() permission = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource.resource_id) resource.user_permissions.append(permission) permission2 = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource2.resource_id) resource2.user_permissions.append(permission2) permission3 = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource3.resource_id) resource3.user_permissions.append(permission3) permission4 = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource4.resource_id) resource4.user_permissions.append(permission4) db_session.flush() resources = UserService.resources_with_perms(created_user, ['test_perm'], resource_types=['test_resource_b'], db_session=db_session).all() assert (len(resources) == 2) def test_resources_with_wrong_perm(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') permission = UserResourcePermission(perm_name='test_perm_bad', user_id=created_user.id, resource_id=resource.resource_id) with pytest.raises(AssertionError): resource.user_permissions.append(permission) def test_multiple_resources_with_perm(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') permission = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource.resource_id) resource.user_permissions.append(permission) resource2 = add_resource(db_session, 2, 'test_resource2') permission2 = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource2.resource_id) resource2.user_permissions.append(permission2) resources = UserService.resources_with_perms(created_user, ['test_perm'], db_session=db_session).all() assert (resources == [resource, resource2]) def test_resources_ids_with_perm(self, db_session): created_user = add_user(db_session) resource1 = add_resource(db_session, 1, 'test_resource1') resource2 = add_resource(db_session, 2, 'test_resource2') resource3 = add_resource(db_session, 3, 'test_resource3') permission1 = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource1.resource_id) permission2 = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource2.resource_id) permission3 = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource3.resource_id) resource1.user_permissions.append(permission1) resource2.user_permissions.append(permission2) resource3.user_permissions.append(permission3) db_session.flush() resources = UserService.resources_with_perms(created_user, ['test_perm'], resource_ids=[1, 3], db_session=db_session).all() assert (resources == [resource1, resource3]) def test_resources_with_wrong_group_permission(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') group = add_group(db_session) group.users.append(created_user) group_permission = GroupResourcePermission(perm_name='test_perm_bad', group_id=group.id, resource_id=resource.resource_id) with pytest.raises(AssertionError): resource.group_permissions.append(group_permission) def test_resources_with_group_permission(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') resource2 = add_resource(db_session, 2, 'test_resource2') add_resource(db_session, 3, 'test_resource3') group = add_group(db_session) group.users.append(created_user) group_permission = GroupResourcePermission(perm_name='test_perm', group_id=1, resource_id=resource.resource_id) group_permission2 = GroupResourcePermission(perm_name='foo_perm', group_id=1, resource_id=resource2.resource_id) resource.group_permissions.append(group_permission) resource2.group_permissions.append(group_permission2) db_session.flush() resources = UserService.resources_with_perms(created_user, ['foo_perm'], db_session=db_session).all() assert (resources[0] == resource2) def test_resources_with_direct_user_perms(self, db_session): self.set_up_user_group_and_perms(db_session) perms = ResourceService.direct_perms_for_user(self.resource, self.user, db_session=db_session) second = [PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True)] check_one_in_other(perms, second) def test_resources_with_direct_group_perms(self, db_session): self.set_up_user_group_and_perms(db_session) perms = ResourceService.group_perms_for_user(self.resource, self.user, db_session=db_session) second = [PermissionTuple(self.user, 'group_perm', 'group', self.group, self.resource, False, True)] check_one_in_other(perms, second) def test_resources_with_user_perms(self, db_session): self.maxDiff = 9999 self.set_up_user_group_and_perms(db_session) perms = ResourceService.perms_for_user(self.resource, self.user, db_session=db_session) second = [PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_users_for_perm(self, db_session): self.set_up_user_group_and_perms(db_session) perms = ResourceService.users_for_perm(self.resource, 'foo_perm', db_session=db_session) second = [PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_users_for_any_perm(self, db_session): self.maxDiff = 99999 self.set_up_user_group_and_perms(db_session) perms = ResourceService.users_for_perm(self.resource, '__any_permission__', db_session=db_session) second = [PermissionTuple(self.user, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True), PermissionTuple(self.user4, 'group_perm', 'group', self.group2, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_users_for_any_perm_resource_2(self, db_session): self.set_up_user_group_and_perms(db_session) perms = ResourceService.users_for_perm(self.resource2, '__any_permission__', db_session=db_session) second = [PermissionTuple(self.user2, 'foo_perm', 'user', None, self.resource2, False, True), PermissionTuple(self.user3, 'test_perm', 'user', None, self.resource2, False, True)] check_one_in_other(perms, second) def test_resource_users_limited_users(self, db_session): self.maxDiff = 9999 self.set_up_user_group_and_perms(db_session) perms = ResourceService.users_for_perm(self.resource, '__any_permission__', user_ids=[self.user.id], db_session=db_session) second = [PermissionTuple(self.user, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_users_limited_group(self, db_session): self.maxDiff = 9999 self.set_up_user_group_and_perms(db_session) perms = ResourceService.users_for_perm(self.resource, '__any_permission__', user_ids=[self.user.id], group_ids=[self.group2.id], db_session=db_session) second = [PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_users_limited_group_other_user_3(self, db_session): self.maxDiff = 9999 self.set_up_user_group_and_perms(db_session) perms = ResourceService.users_for_perm(self.resource2, '__any_permission__', user_ids=[self.user3.id], db_session=db_session) second = [PermissionTuple(self.user3, 'test_perm', 'user', None, self.resource2, False, True)] check_one_in_other(perms, second) def test_resource_users_limited_group_other_user_4(self, db_session): self.maxDiff = 9999 self.set_up_user_group_and_perms(db_session) perms = ResourceService.users_for_perm(self.resource, '__any_permission__', user_ids=[self.user4.id], group_ids=[self.group2.id], db_session=db_session) second = [PermissionTuple(self.user4, 'group_perm', 'group', self.group2, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_users_limited_group_ownage(self, db_session): self.maxDiff = 9999 self.set_up_user_group_and_perms(db_session) resource = ResourceTestobjB(resource_id=99, resource_name='other', owner_user_id=self.user2.id) group3 = add_group(db_session, 'group 3') user2_permission = UserResourcePermission(perm_name='foo_perm', user_id=self.user2.id) group3_permission = GroupResourcePermission(perm_name='group_perm', group_id=group3.id) resource.group_permissions.append(group3_permission) resource.user_permissions.append(user2_permission) group3.users.append(self.user3) self.user.resources.append(resource) self.group2.resources.append(resource) db_session.flush() perms = ResourceService.users_for_perm(resource, '__any_permission__', db_session=db_session) second = [PermissionTuple(self.user2, 'foo_perm', 'user', None, resource, False, True), PermissionTuple(self.user, ALL_PERMISSIONS, 'user', None, resource, True, True), PermissionTuple(self.user4, ALL_PERMISSIONS, 'group', self.group2, resource, True, True), PermissionTuple(self.user3, 'group_perm', 'group', group3, resource, False, True)] check_one_in_other(perms, second) def test_users_for_perms(self, db_session): user = User(user_name='aaa', email='aaa', status=0) UserService.set_password(user, 'password') aaa_perm = UserPermission(perm_name='aaa') bbb_perm = UserPermission(perm_name='bbb') bbb2_perm = UserPermission(perm_name='bbb') user.user_permissions.append(aaa_perm) user.user_permissions.append(bbb_perm) user2 = User(user_name='bbb', email='bbb', status=0) UserService.set_password(user2, 'password') user2.user_permissions.append(bbb2_perm) user3 = User(user_name='ccc', email='ccc', status=0) UserService.set_password(user3, 'password') group = add_group(db_session) group.users.append(user3) db_session.add(user) db_session.add(user2) db_session.flush() users = UserService.users_for_perms(['aaa'], db_session=db_session) assert (len(users.all()) == 1) assert (users[0].user_name == 'aaa') users = UserService.users_for_perms(['bbb'], db_session=db_session).all() assert (len(users) == 2) assert (['aaa', 'bbb'] == sorted([u.user_name for u in users])) users = UserService.users_for_perms(['aaa', 'bbb', 'manage_apps'], db_session=db_session) assert (['aaa', 'bbb', 'ccc'] == sorted([u.user_name for u in users])) def test_resources_with_possible_perms(self, db_session): self.set_up_user_group_and_perms(db_session) resource = ResourceTestobjB(resource_id=3, resource_name='other', owner_user_id=self.user.id) self.user.resources.append(resource) resource_g = ResourceTestobjB(resource_id=4, resource_name='group owned') self.group.resources.append(resource_g) db_session.flush() perms = UserService.resources_with_possible_perms(self.user, db_session=db_session) second = [PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True), PermissionTuple(self.user, ALL_PERMISSIONS, 'user', None, resource, True, True), PermissionTuple(self.user, ALL_PERMISSIONS, 'group', self.group, resource_g, True, True)] check_one_in_other(perms, second) def test_resource_users_for_any_perm_additional_users(self, db_session): self.maxDiff = 99999 self.set_up_user_group_and_perms(db_session) user6 = add_user(db_session, 6, 'user 6') user7 = add_user(db_session, 7, 'user 7') perm2 = GroupResourcePermission(perm_name='group_perm2', resource_id=self.resource.resource_id) self.group.resource_permissions.append(perm2) self.group.users.append(user6) self.group.users.append(user7) perms = ResourceService.users_for_perm(self.resource, '__any_permission__', db_session=db_session) second = [PermissionTuple(self.user, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(user6, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(user7, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(self.user, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(user6, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(user7, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True), PermissionTuple(self.user4, 'group_perm', 'group', self.group2, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_users_for_any_perm_limited_group_perms(self, db_session): self.maxDiff = 99999 self.set_up_user_group_and_perms(db_session) user6 = add_user(db_session, 6, 'user 6') user7 = add_user(db_session, 7, 'user 7') perm2 = GroupResourcePermission(perm_name='group_perm2', resource_id=self.resource.resource_id) self.group.resource_permissions.append(perm2) self.group.users.append(user6) self.group.users.append(user7) perms = ResourceService.users_for_perm(self.resource, '__any_permission__', limit_group_permissions=True, db_session=db_session) second = [PermissionTuple(None, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(None, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True), PermissionTuple(None, 'group_perm', 'group', self.group2, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_groups_for_any_perm_additional_users(self, db_session): self.maxDiff = 99999 self.set_up_user_group_and_perms(db_session) user6 = add_user(db_session, 6, 'user 6') user7 = add_user(db_session, 7, 'user 7') perm2 = GroupResourcePermission(perm_name='group_perm2', resource_id=self.resource.resource_id) self.group.resource_permissions.append(perm2) self.group.users.append(user6) self.group.users.append(user7) perms = ResourceService.groups_for_perm(self.resource, '__any_permission__', db_session=db_session) second = [PermissionTuple(self.user, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(user6, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(user7, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(self.user, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(user6, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(user7, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(self.user4, 'group_perm', 'group', self.group2, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_groups_for_any_perm_just_group_perms_limited(self, db_session): self.maxDiff = 99999 self.set_up_user_group_and_perms(db_session) user6 = add_user(db_session, 6, 'user 6') user7 = add_user(db_session, 7, 'user 7') perm2 = GroupResourcePermission(perm_name='group_perm2', resource_id=self.resource.resource_id) self.group.resource_permissions.append(perm2) self.group.users.append(user6) self.group.users.append(user7) perms = ResourceService.groups_for_perm(self.resource, '__any_permission__', limit_group_permissions=True, db_session=db_session) second = [PermissionTuple(None, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(None, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(None, 'group_perm', 'group', self.group2, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_users_for_any_perm_excluding_group_perms(self, db_session): self.maxDiff = 99999 self.set_up_user_group_and_perms(db_session) user6 = add_user(db_session, 6, 'user 6') user7 = add_user(db_session, 7, 'user 7') perm2 = GroupResourcePermission(perm_name='group_perm2', resource_id=self.resource.resource_id) self.group.resource_permissions.append(perm2) self.group.users.append(user6) self.group.users.append(user7) perms = ResourceService.users_for_perm(self.resource, '__any_permission__', limit_group_permissions=True, skip_group_perms=True, db_session=db_session) second = [PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_groups_for_any_perm_just_group_perms_limited_empty_group(self, db_session): self.maxDiff = 99999 self.set_up_user_group_and_perms(db_session) user6 = add_user(db_session, 6, 'user 6') user7 = add_user(db_session, 7, 'user 7') perm2 = GroupResourcePermission(perm_name='group_perm2', resource_id=self.resource.resource_id) self.group.resource_permissions.append(perm2) self.group.users.append(user6) self.group.users.append(user7) group3 = add_group(db_session, 'Empty group') perm3 = GroupResourcePermission(perm_name='group_permx', resource_id=self.resource.resource_id) group3.resource_permissions.append(perm3) perms = ResourceService.groups_for_perm(self.resource, '__any_permission__', limit_group_permissions=True, db_session=db_session) second = [PermissionTuple(None, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(None, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(None, 'group_perm', 'group', self.group2, self.resource, False, True), PermissionTuple(None, 'group_permx', 'group', group3, self.resource, False, True)] check_one_in_other(perms, second) def test_resource_users_for_any_perm_limited_group_perms_empty_group(self, db_session): self.maxDiff = 99999 self.set_up_user_group_and_perms(db_session) user6 = add_user(db_session, 6, 'user 6') user7 = add_user(db_session, 7, 'user 7') perm2 = GroupResourcePermission(perm_name='group_perm2', resource_id=self.resource.resource_id) self.group.resource_permissions.append(perm2) self.group.users.append(user6) self.group.users.append(user7) group3 = add_group(db_session, 'Empty group') perm3 = GroupResourcePermission(perm_name='group_permx', resource_id=self.resource.resource_id) group3.resource_permissions.append(perm3) perms = ResourceService.users_for_perm(self.resource, '__any_permission__', limit_group_permissions=True, db_session=db_session) second = [PermissionTuple(None, 'group_perm', 'group', self.group, self.resource, False, True), PermissionTuple(None, 'group_perm2', 'group', self.group, self.resource, False, True), PermissionTuple(self.user, 'test_perm2', 'user', None, self.resource, False, True), PermissionTuple(self.user, 'foo_perm', 'user', None, self.resource, False, True), PermissionTuple(None, 'group_perm', 'group', self.group2, self.resource, False, True), PermissionTuple(None, 'group_permx', 'group', group3, self.resource, False, True)] check_one_in_other(perms, second) def test_get_resource_permission(self, db_session): created_user = add_user(db_session) resource = add_resource(db_session, 1, 'test_resource') permission = UserResourcePermission(perm_name='test_perm', user_id=created_user.id, resource_id=resource.resource_id) resource.user_permissions.append(permission) db_session.flush() perm = UserResourcePermissionService.get(user_id=created_user.id, resource_id=resource.resource_id, perm_name='test_perm', db_session=db_session) assert (perm.perm_name == 'test_perm') assert (perm.resource_id == resource.resource_id) assert (perm.user_id == created_user.id)
class SnmpContext(object): def __init__(self, snmpEngine, contextEngineId=None): mibBuilder = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder (snmpEngineId,) = mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID') if (contextEngineId is None): self.contextEngineId = snmpEngineId.syntax else: self.contextEngineId = snmpEngineId.syntax.clone(contextEngineId) ((debug.logger & debug.FLAG_INS) and debug.logger(('SnmpContext: contextEngineId %r' % (self.contextEngineId,)))) self.contextNames = {null: snmpEngine.msgAndPduDsp.mibInstrumController} def registerContextName(self, contextName, mibInstrum=None): contextName = univ.OctetString(contextName).asOctets() if (contextName in self.contextNames): raise error.PySnmpError(('Duplicate contextName %s' % contextName)) ((debug.logger & debug.FLAG_INS) and debug.logger(('registerContextName: registered contextName %r, mibInstrum %r' % (contextName, mibInstrum)))) if (mibInstrum is None): self.contextNames[contextName] = self.contextNames[null] else: self.contextNames[contextName] = mibInstrum def unregisterContextName(self, contextName): contextName = univ.OctetString(contextName).asOctets() if (contextName in self.contextNames): ((debug.logger & debug.FLAG_INS) and debug.logger(('unregisterContextName: unregistered contextName %r' % contextName))) del self.contextNames[contextName] def getMibInstrum(self, contextName=null): contextName = univ.OctetString(contextName).asOctets() if (contextName not in self.contextNames): ((debug.logger & debug.FLAG_INS) and debug.logger(('getMibInstrum: contextName %r not registered' % contextName))) raise error.PySnmpError(('Missing contextName %s' % contextName)) else: ((debug.logger & debug.FLAG_INS) and debug.logger(('getMibInstrum: contextName %r, mibInstum %r' % (contextName, self.contextNames[contextName])))) return self.contextNames[contextName]
class OptionSeriesAreasplinerangeSonification(Options): def contextTracks(self) -> 'OptionSeriesAreasplinerangeSonificationContexttracks': return self._config_sub_data('contextTracks', OptionSeriesAreasplinerangeSonificationContexttracks) def defaultInstrumentOptions(self) -> 'OptionSeriesAreasplinerangeSonificationDefaultinstrumentoptions': return self._config_sub_data('defaultInstrumentOptions', OptionSeriesAreasplinerangeSonificationDefaultinstrumentoptions) def defaultSpeechOptions(self) -> 'OptionSeriesAreasplinerangeSonificationDefaultspeechoptions': return self._config_sub_data('defaultSpeechOptions', OptionSeriesAreasplinerangeSonificationDefaultspeechoptions) def enabled(self): return self._config_get(True) def enabled(self, flag: bool): self._config(flag, js_type=False) def pointGrouping(self) -> 'OptionSeriesAreasplinerangeSonificationPointgrouping': return self._config_sub_data('pointGrouping', OptionSeriesAreasplinerangeSonificationPointgrouping) def tracks(self) -> 'OptionSeriesAreasplinerangeSonificationTracks': return self._config_sub_data('tracks', OptionSeriesAreasplinerangeSonificationTracks)
class File(FileSource): def __init__(self, path, expand_user=True, expand_vars=False, unix_glob=True, recursive_glob=True, filter=None, merger=None): if expand_user: path = os.path.expanduser(path) if expand_vars: path = os.path.expandvars(path) if (unix_glob and set(path).intersection(set('[]?*'))): matches = glob.glob(path, recursive=recursive_glob) if (len(matches) == 1): path = matches[0] if (len(matches) > 1): path = sorted(matches) super().__init__(path, filter, merger)
(frozen=True) class ConditionSymbol(): condition: Condition symbol: LogicCondition z3_condition: PseudoLogicCondition def __eq__(self, other): return (isinstance(other, ConditionSymbol) and (self.condition == other.condition) and (self.symbol == other.symbol) and self.z3_condition.is_equivalent_to(other.z3_condition))
class ExchangeRateTests(unittest.TestCase): def test_unicode(self): exchange_rate = ExchangeRate() exchange_rate.SourceCurrencyCode = 'EUR' self.assertEqual(str(exchange_rate), 'EUR') def test_valid_object_name(self): obj = ExchangeRate() client = QuickBooks() result = client.isvalid_object_name(obj.qbo_object_name) self.assertTrue(result)
class Billing(ModelNormal): allowed_values = {} validations = {} _property def additional_properties_type(): lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type) _nullable = False _property def openapi_types(): lazy_import() return {'end_time': (datetime, none_type), 'start_time': (datetime, none_type), 'invoice_id': (str,), 'customer_id': (str,), 'vendor_state': (str,), 'status': (BillingStatus,), 'total': (BillingTotal,), 'regions': ({str: ({str: ({str: (bool, date, datetime, dict, float, int, list, str, none_type)},)},)},)} _property def discriminator(): return None attribute_map = {'end_time': 'end_time', 'start_time': 'start_time', 'invoice_id': 'invoice_id', 'customer_id': 'customer_id', 'vendor_state': 'vendor_state', 'status': 'status', 'total': 'total', 'regions': 'regions'} read_only_vars = {'end_time', 'start_time', 'invoice_id', 'customer_id', 'vendor_state'} _composed_schemas = {} _js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) return self required_properties = set(['_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes']) _js_args_to_python_args def __init__(self, *args, **kwargs): _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError(('Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments.' % (args, self.__class__.__name__)), path_to_item=_path_to_item, valid_classes=(self.__class__,)) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = (_visited_composed_classes + (self.__class__,)) for (var_name, var_value) in kwargs.items(): if ((var_name not in self.attribute_map) and (self._configuration is not None) and self._configuration.discard_unknown_keys and (self.additional_properties_type is None)): continue setattr(self, var_name, var_value) if (var_name in self.read_only_vars): raise ApiAttributeError(f'`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate class with read only attributes.')
class Solution(): def postorderTraversal(self, root: TreeNode) -> List[int]: stk = [(root, False, False)] ret = [] while stk: (curr, left_done, right_done) = stk.pop() if (curr is None): continue if (left_done and right_done): ret.append(curr.val) continue elif (left_done and (not right_done)): stk.append((curr, True, True)) stk.append((curr.right, False, False)) elif (not left_done): stk.append((curr, True, False)) stk.append((curr.left, False, False)) return ret
class Migration(migrations.Migration): dependencies = [('reporting', '0002_auto__2051')] operations = [migrations.CreateModel(name='ReportingAgencyMissingTas', fields=[('reporting_agency_missing_tas_id', models.AutoField(primary_key=True, serialize=False)), ('toptier_code', models.TextField()), ('fiscal_year', models.IntegerField()), ('fiscal_period', models.IntegerField()), ('tas_rendering_label', models.TextField()), ('obligated_amount', models.DecimalField(decimal_places=2, max_digits=23))], options={'db_table': 'reporting_agency_missing_tas'}), migrations.AddIndex(model_name='reportingagencymissingtas', index=models.Index(fields=['fiscal_year', 'fiscal_period', 'toptier_code'], name='rpt_agency_missing_tas_grp_idx'))]
class BlockStateDep(Checker): def __init__(self): super().__init__() def check(self, logger): block_state_op_idx = (- 1) for (i, log) in enumerate(logger.logs): if (log.op in (COINBASE, TIMESTAMP, NUMBER, DIFFICULTY, GASLIMIT)): block_state_op_idx = i break if (block_state_op_idx == (- 1)): return False last_send_ether_idx = (- 1) for i in range(block_state_op_idx, len(logger.logs)): log = logger.logs[i] if (log.op == CREATE): value = int(log.stack[(- 1)], 16) if (value > 0): last_send_ether_idx = max(last_send_ether_idx, i) elif (log.op in (CALL, CALLCODE)): value = int(log.stack[(- 3)], 16) if (value > 0): last_send_ether_idx = max(last_send_ether_idx, i) if (last_send_ether_idx == (- 1)): return False for i in range(block_state_op_idx, (last_send_ether_idx + 1)): log = logger.logs[i] if (log.op == CREATE): value = int(log.stack[(- 1)], 16) if (value == 0): continue try: (_, value_from_block) = logger.trace_log_stack((i - 1), (- 1)) if value_from_block: return True except RecursionError: pass elif (log.op in (CALL, CALLCODE)): value = int(log.stack[(- 3)], 16) if (value == 0): continue try: (_, value_from_block) = logger.trace_log_stack((i - 1), (- 3)) if value_from_block: return True except RecursionError: continue elif (log.op == JUMPI): try: (_, value_from_block) = logger.trace_log_stack((i - 1), (- 2)) if value_from_block: return True except RecursionError: continue return False
def get_context(context): if (frappe.session.user == 'Guest'): frappe.throw(_('You need to be logged in to access this page'), frappe.PermissionError) context.show_sidebar = True if frappe.db.exists('Patient', {'email': frappe.session.user}): patient = frappe.get_doc('Patient', {'email': frappe.session.user}) context.doc = patient frappe.form_dict.new = 0 frappe.form_dict.name = patient.name
class Migration(migrations.Migration): dependencies = [('django_etebase', '0017_auto__0958')] operations = [migrations.AlterField(model_name='collectionitem', name='uid', field=models.CharField(db_index=True, max_length=43, validators=[django.core.validators.RegexValidator(message='Not a valid UID', regex='^[a-zA-Z0-9\\-_]*$')]))]
(no_gui_test_assistant, 'No GuiTestAssistant') class TestDoLater(TestCase, GuiTestAssistant): def setUp(self): GuiTestAssistant.setUp(self) def tearDown(self): GuiTestAssistant.tearDown(self) def test_basic(self): handler = ConditionHandler() timer = do_later(handler.callback) try: self.assertTrue(timer.active) self.event_loop_helper.event_loop_until_condition((lambda : (not timer.active))) self.assertFalse(timer.active) finally: timer.stop() self.assertEqual(handler.count, 1)
class TestCalcParser(unittest.TestCase): def setUp(self): self.calc_parser = rd_parser.CalcParser() def calc(self, calc_stmt): self.calc_parser.calc(calc_stmt) def assertResult(self, calc_stmt, result): self.assertEqual(self.calc_parser.calc(calc_stmt), result) def assertParseError(self, calc_stmt): self.assertRaises(rd_parser.ParseError, self.calc_parser.calc, calc_stmt) def test_basics(self): self.assertResult('5', 5) self.assertResult('2 * (2 - 2)', 0) self.assertResult('2 + 7', 9) self.assertResult('22 / 11', 2) self.assertResult('2 + -3', (- 1)) self.assertResult('2 ** -3', 0.125) self.assertResult('2 + -6 ** 2', 38) self.assertResult('2 + 7 * -3', (- 19)) self.assertResult('2 ** 3 ** 2 + 7', 519) self.assertResult('9991929 > 881828', True) self.assertResult('9991929 >= 881828', True) self.assertResult('9991929 < 881828', False) self.assertResult('9991929 <= 881828', False) self.assertResult('9991929 != 881828', True) self.assertResult('9991929 == 881828', False) def test_bitwise(self): self.assertResult('2 << 2', 8) self.assertResult('1024 >> 3', 128) self.assertResult('15 & 12', 12) self.assertResult('15 | 12', 15) self.assertResult('5 ^ 2', 7) self.assertResult('5 ^ 16 & 12 + 11 | 13', 29) self.assertResult('2 & 3 ^ (4 | 1) * 11 | 1002 & 4', 53) def test_vars(self): self.calc('set joe = 15') self.calc('set mxa = 993 - 998') self.assertResult('joe + mxa * 2', 5) self.calc('set joe = 10') self.calc('set kkkkkk = joe + -mxa') self.assertResult('kkkkkk', 15) def test_statements(self): self.calc('set x = 4') self.assertResult('if x == 4 then 10 else 15', 10) self.assertResult('if x != 4 then 10 else 15', 15) self.assertResult('if x != 4 then 10', None) self.assertResult('if x == 4 then 10', 10) self.calc('set p = 1') self.calc('if 1 == 0 then set p = 12 else set p = 16') self.assertResult('p', 16) self.calc('if 0 == 0 then set p = 12 else set p = 16') self.assertResult('p', 12) self.calc('if 5 >= 5 then set p = p * 2 else set p = 0') self.assertResult('p', 24) self.calc('if 5 > 5 then set p = p * 2 else set p = 0') self.assertResult('p', 0) def test_long(self): self.calc('set joe = 2 - 1 - 1') self.calc('set mar = joe + 2 ** 4 * -3') self.calc('set pie = 2 ** 3 ** 2') self.calc('if joe != 0 then set pie = 3') self.calc('if 1 == 1 then set k = 10 else set k = 20') self.calc('if k > 20 then set k = 12') self.calc('if k <= 11 then set t = 0 else set t = 2') self.assertResult('pie - (k * -mar) + k + t', 42) def test_errors(self): self.assertParseError('2 + ') self.assertParseError('apchi') self.assertParseError('1 + 2 12') self.assertParseError('(1 + 2') self.assertParseError('p + 1') self.assertParseError('if 1 == 2 the')
class OptionSeriesArearangeStatesSelectMarker(Options): def enabled(self): return self._config_get(None) def enabled(self, flag: bool): self._config(flag, js_type=False) def enabledThreshold(self): return self._config_get(2) def enabledThreshold(self, num: float): self._config(num, js_type=False) def fillColor(self): return self._config_get(None) def fillColor(self, text: str): self._config(text, js_type=False) def height(self): return self._config_get(None) def height(self, num: float): self._config(num, js_type=False) def lineColor(self): return self._config_get('#ffffff') def lineColor(self, text: str): self._config(text, js_type=False) def lineWidth(self): return self._config_get(0) def lineWidth(self, num: float): self._config(num, js_type=False) def radius(self): return self._config_get(4) def radius(self, num: float): self._config(num, js_type=False) def width(self): return self._config_get(None) def width(self, num: float): self._config(num, js_type=False)
class DummyEmbeddingProvider(BaseEmbeddingProvider): def get(self, text): return np.zeros(768) def __call__(self, text): return self.get(text) def config(self): return {'class': self.__class__.__name__, 'type': 'embedding_provider'} def from_config(cls, config): return cls()
def test_strava_widget_popup(manager_nospawn, strava): manager_nospawn.start(strava) data_parsed(manager_nospawn) manager_nospawn.c.bar['top'].fake_button_press(0, 'top', 0, 0, 1) assert (len(manager_nospawn.c.internal_windows()) == 2) (_, text) = manager_nospawn.c.widget['stravawidget'].eval('self.popup.text') assert (text == ' Date Title km time pace \n20 Nov: Test Activity 1 10.0 0:45:00 4:30\n21 Nov: Test Activity 2 21.1 1:45:00 4:58\n\nNov 21: 2 runs 31.1 2:30:00 4:49\nOct 21: 0 runs 0.0 0:00:00 0:00\nSep 21: 0 runs 0.0 0:00:00 0:00\nAug 21: 0 runs 0.0 0:00:00 0:00\nJul 21: 0 runs 0.0 0:00:00 0:00\nJun 21: 0 runs 0.0 0:00:00 0:00\n\n2021 : 2 runs 31.1 2:30:00 4:49\n\nTOTAL : 2 runs 31.1 2:30:00 4:49')
.parametrize('argset', (mixed_test_argsets() + mixed_trial_argsets())) def test_replace_arg_mixed(mixed_form, argset): new_arg = argset.new_arg idxs = argset.idxs error = argset.error replace_function = argset.replace_function arg_idx = argset.arg_idx mixed_form = mixed_form.label_map((lambda t: t.has_label(subject)), replace_subject(TrialFunction(W)), drop) if (error is None): new_form = mixed_form.label_map(all_terms, map_if_true=replace_function(new_arg, **idxs)) if ('new_idx' in idxs): split_arg = (new_arg if (type(new_arg) is tuple) else split(new_arg)) new_arg = split_arg[idxs['new_idx']].ufl_operands[0] if isinstance(new_arg, Argument): assert (new_form.form.arguments()[arg_idx] is new_arg) elif (type(new_arg) is Function): assert (new_form.form.coefficients()[0] is new_arg) else: with pytest.raises(error): new_form = mixed_form.label_map(all_terms, map_if_true=replace_function(new_arg, **idxs))
def summary_observations(draw, summary_keys, std_cutoff, names): kws = {'name': draw(names), 'key': draw(summary_keys), 'error': draw(st.floats(min_value=std_cutoff, allow_nan=False, allow_infinity=False)), 'error_min': draw(positive_floats), 'error_mode': draw(st.sampled_from(ErrorMode)), 'value': draw(positive_floats)} time_type = draw(st.sampled_from(['date', 'days', 'restart', 'hours'])) if (time_type == 'date'): date = draw(st.datetimes(max_value=datetime.datetime(year=2037, month=1, day=1), min_value=datetime.datetime(year=1999, month=1, day=2))) kws['date'] = date.strftime('%Y-%m-%d') if (time_type in ['days', 'hours']): kws[time_type] = draw(st.floats(min_value=1, max_value=3000)) if (time_type == 'restart'): kws[time_type] = draw(st.integers(min_value=1, max_value=10)) return SummaryObservation(**kws)
class OptionSeriesAreasplineDatalabels(Options): def align(self): return self._config_get('center') def align(self, text: str): self._config(text, js_type=False) def allowOverlap(self): return self._config_get(False) def allowOverlap(self, flag: bool): self._config(flag, js_type=False) def animation(self) -> 'OptionSeriesAreasplineDatalabelsAnimation': return self._config_sub_data('animation', OptionSeriesAreasplineDatalabelsAnimation) def backgroundColor(self): return self._config_get(None) def backgroundColor(self, text: str): self._config(text, js_type=False) def borderColor(self): return self._config_get(None) def borderColor(self, text: str): self._config(text, js_type=False) def borderRadius(self): return self._config_get(0) def borderRadius(self, num: float): self._config(num, js_type=False) def borderWidth(self): return self._config_get(0) def borderWidth(self, num: float): self._config(num, js_type=False) def className(self): return self._config_get(None) def className(self, text: str): self._config(text, js_type=False) def color(self): return self._config_get(None) def color(self, text: str): self._config(text, js_type=False) def crop(self): return self._config_get(True) def crop(self, flag: bool): self._config(flag, js_type=False) def defer(self): return self._config_get(True) def defer(self, flag: bool): self._config(flag, js_type=False) def enabled(self): return self._config_get(False) def enabled(self, flag: bool): self._config(flag, js_type=False) def filter(self) -> 'OptionSeriesAreasplineDatalabelsFilter': return self._config_sub_data('filter', OptionSeriesAreasplineDatalabelsFilter) def format(self): return self._config_get('point.value') def format(self, text: str): self._config(text, js_type=False) def formatter(self): return self._config_get(None) def formatter(self, value: Any): self._config(value, js_type=False) def inside(self): return self._config_get(None) def inside(self, flag: bool): self._config(flag, js_type=False) def nullFormat(self): return self._config_get(None) def nullFormat(self, flag: bool): self._config(flag, js_type=False) def nullFormatter(self): return self._config_get(None) def nullFormatter(self, value: Any): self._config(value, js_type=False) def overflow(self): return self._config_get('justify') def overflow(self, text: str): self._config(text, js_type=False) def padding(self): return self._config_get(5) def padding(self, num: float): self._config(num, js_type=False) def position(self): return self._config_get('center') def position(self, text: str): self._config(text, js_type=False) def rotation(self): return self._config_get(0) def rotation(self, num: float): self._config(num, js_type=False) def shadow(self): return self._config_get(False) def shadow(self, flag: bool): self._config(flag, js_type=False) def shape(self): return self._config_get('square') def shape(self, text: str): self._config(text, js_type=False) def style(self): return self._config_get(None) def style(self, value: Any): self._config(value, js_type=False) def textPath(self) -> 'OptionSeriesAreasplineDatalabelsTextpath': return self._config_sub_data('textPath', OptionSeriesAreasplineDatalabelsTextpath) def useHTML(self): return self._config_get(False) def useHTML(self, flag: bool): self._config(flag, js_type=False) def verticalAlign(self): return self._config_get('bottom') def verticalAlign(self, text: str): self._config(text, js_type=False) def x(self): return self._config_get(0) def x(self, num: float): self._config(num, js_type=False) def y(self): return self._config_get(0) def y(self, num: float): self._config(num, js_type=False) def zIndex(self): return self._config_get(6) def zIndex(self, num: float): self._config(num, js_type=False)
_metrics.timeit def __country_codes(region_field=None, key=None, context=None, randomstate=None, df=None, sampler=None): region = None if (region_field and df): region = df[region_field] key = ((key + '-') + region) if (not context.has_generator(key)): generator = CountryCodeGenerator(randomstate, region) context.add_generator(key, generator) generator = context.get_generator(key) return generator.make()