code
stringlengths
281
23.7M
def _get_settable_columns(source: ETLObjectBase, destination: ETLWritableObjectBase): settable_columns = [c for c in destination.columns if (((c in source.columns) or (c in destination.update_overrides)) and (c not in destination.key_columns))] if (not settable_columns): raise RuntimeError('No settable columns.') return settable_columns
def msgCheck(message): if (message.reply_to_message.content_type == 'text'): msg_check = message.reply_to_message.text elif ((message.reply_to_message.content_type == 'photo') or (message.reply_to_message.content_type == 'document')): msg_check = message.reply_to_message.caption return msg_check
class Application(BaseLogic): name: str connection: ConnectionAPI _behaviors: Tuple[(BehaviorAPI, ...)] = () def add_child_behavior(self, behavior: BehaviorAPI) -> None: self._behaviors += (behavior,) async def apply(self, connection: ConnectionAPI) -> AsyncIterator[asyncio.Task[Any]]: self.connection = connection async with contextlib.AsyncExitStack() as stack: futures: List[asyncio.Task[Any]] = [] for behavior in self._behaviors: if behavior.should_apply_to(connection): fut = (await stack.enter_async_context(behavior.apply(connection))) futures.append(fut) if (not futures): futures.append(create_task(_never_ending_coro(), f'{connection.remote}/Application/{self.name}/no-behaviors-fut')) with connection.add_logic(self.name, self): name = f'{connection.remote}/Application/{self.name}/apply' (yield create_task(wait_first(futures, max_wait_after_cancellation=2), name=name))
def get_duration(duration_idx): if (duration_idx == 0): return Config.DURATION_ONCE elif (duration_idx == 1): return _constants.DURATION_30s elif (duration_idx == 2): return _constants.DURATION_5m elif (duration_idx == 3): return _constants.DURATION_15m elif (duration_idx == 4): return _constants.DURATION_30m elif (duration_idx == 5): return _constants.DURATION_1h elif (duration_idx == 6): return Config.DURATION_UNTIL_RESTART else: return Config.DURATION_ALWAYS
def CatchException(f): (f) def decorated(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT=(- 1)): try: (yield from f(txt, top_p, temperature, chatbot, history, systemPromptTxt, WEB_PORT)) except Exception as e: from check_proxy import check_proxy from toolbox import get_conf (proxies,) = get_conf('proxies') tb_str = (('```\n' + trimmed_format_exc()) + '```') if (len(chatbot) == 0): chatbot.clear() chatbot.append(['', '']) chatbot[(- 1)] = (chatbot[(- 1)][0], f'''[Local Message] : {tb_str} : {check_proxy(proxies)}''') (yield from update_ui(chatbot=chatbot, history=history, msg=f' {e}')) return decorated
def audit_systems(url: str, headers: Dict[(str, str)], include_keys: Optional[List]=None) -> None: system_resources: Optional[Union[(List[FidesModel], List[Dict])]] if include_keys: system_resources = get_server_resources(url, 'system', include_keys, headers) else: system_resources = list_server_resources(url, headers, 'system', exclude_keys=[]) if (not system_resources): print('No system resources were found.') return print(f'Found {len(system_resources)} System resource(s) to audit...') audit_findings = 0 for system in system_resources: pretty_echo(f"Auditing System: {(system.name if isinstance(system, FidesModel) else system['name'])}") assert isinstance(system, System) new_findings = validate_system_attributes(system, url, headers) audit_findings = (audit_findings + new_findings) if (audit_findings > 0): print(f'{audit_findings} issue(s) were detected in auditing system completeness.') else: echo_green('All audited system resource(s) compliant!')
(expression='^Starting phase (\\d+)/4: (Forward Propagation into tmp files\\.\\.\\. (.+))?') def phase_major(match: typing.Match[str], info: SpecificInfo) -> SpecificInfo: major = int(match.group(1)) timestamp = match.group(3) new_info = attr.evolve(info, phase=plotman.job.Phase(major=major, minor=0)) if (timestamp is None): return new_info return attr.evolve(new_info, started_at=parse_chia_plot_time(s=match.group(3)))
class HWB(base.HWB): def to_string(self, parent: 'Color', *, alpha: Optional[bool]=None, precision: Optional[int]=None, fit: Union[(str, bool)]=True, none: bool=False, color: bool=False, **kwargs: Any) -> str: return serialize.serialize_css(parent, func='hwb', alpha=alpha, precision=precision, fit=fit, none=none, color=color) def match(self, string: str, start: int=0, fullmatch: bool=True) -> Optional[Tuple[(Tuple[(Vector, float)], int)]]: return parse.parse_css(self, string, start, fullmatch)
class AMIClientListener(object): methods = ['on_action', 'on_response', 'on_event', 'on_connect', 'on_disconnect', 'on_unknown'] def __init__(self, **kwargs): for (k, v) in kwargs.items(): if (k not in self.methods): raise TypeError(("'%s' is an invalid keyword argument for this function" % k)) setattr(self, k, v) def on_action(self, source, action): raise NotImplementedError() def on_response(self, source, response): raise NotImplementedError() def on_event(self, source, event): raise NotImplementedError() def on_connect(self, source): raise NotImplementedError() def on_disconnect(self, source, error=None): raise NotImplementedError() def on_unknown(self, source, pack): raise NotImplementedError()
class OptionPlotoptionsWaterfallSonificationTracksMappingNoteduration(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class DictEventTestCase(unittest.TestCase): def test_setitem(self): cb = Callback(self, changed={'c': 'cherry'}) foo = MyClass(cb) foo.d['c'] = 'coconut' self.assertTrue(cb.called) cb = Callback(self, added={'g': 'guava'}) bar = MyClass(cb) bar.d['g'] = 'guava' self.assertTrue(cb.called) def test_delitem(self): cb = Callback(self, removed={'b': 'banana'}) foo = MyClass(cb) del foo.d['b'] self.assertTrue(cb.called) def test_clear(self): removed = MyClass(None).d.copy() cb = Callback(self, removed=removed) foo = MyClass(cb) foo.d.clear() self.assertTrue(cb.called) def test_update(self): update_dict = {'a': 'artichoke', 'f': 'fig'} cb = Callback(self, changed={'a': 'apple'}, added={'f': 'fig'}) foo = MyClass(cb) foo.d.update(update_dict) self.assertTrue(cb.called) def test_setdefault(self): cb = Callback(self) foo = MyClass(cb) self.assertEqual(foo.d.setdefault('a', 'dummy'), 'apple') self.assertFalse(cb.called) cb = Callback(self, added={'f': 'fig'}) bar = MyClass(cb) self.assertTrue((bar.d.setdefault('f', 'fig') == 'fig')) self.assertTrue(cb.called) def test_pop(self): cb = Callback(self) foo = MyClass(cb) self.assertEqual(foo.d.pop('x', 'dummy'), 'dummy') self.assertFalse(cb.called) cb = Callback(self, removed={'c': 'cherry'}) bar = MyClass(cb) self.assertEqual(bar.d.pop('c'), 'cherry') self.assertTrue(cb.called) def test_popitem(self): foo = MyClass(None) foo.d.clear() foo.d['x'] = 'xylophone' cb = Callback(self, removed={'x': 'xylophone'}) foo.callback = cb self.assertEqual(foo.d.popitem(), ('x', 'xylophone')) self.assertTrue(cb.called) def test_dynamic_listener(self): foo = MyOtherClass() func = Callback(self, added={'g': 'guava'}) foo.on_trait_change(func.__call__, 'd_items') foo.d['g'] = 'guava' foo.on_trait_change(func.__call__, 'd_items', remove=True) self.assertTrue(func.called) func2 = Callback(self, removed={'a': 'apple'}) foo.on_trait_change(func2.__call__, 'd_items') del foo.d['a'] foo.on_trait_change(func2.__call__, 'd_items', remove=True) self.assertTrue(func2.called) func3 = Callback(self, changed={'b': 'banana'}) foo.on_trait_change(func3.__call__, 'd_items') foo.d['b'] = 'broccoli' foo.on_trait_change(func3.__call__, 'd_items', remove=True) self.assertTrue(func3.called)
def attention_ref(qkv, attn_mask, dropout_p, upcast=False, causal=False): if (True or (causal and (attn_mask is not None))): return attention_ref_math(qkv, attn_mask, dropout_p, upcast=upcast, causal=causal) return attention_ref_sdpa(qkv, attn_mask, dropout_p, upcast=upcast, causal=causal)
def test_circular_dependency_minimization(graph_circular_dependency, variable, copy_variable): (nodes, _, cfg) = graph_circular_dependency run_out_of_ssa(cfg, SSAOptions.minimization) variable[0].is_aliased = True variable[1].is_aliased = True assert ((nodes[0].instructions == [Assignment(ListOperation([]), Call(imp_function_symbol('printf'), [Constant()])), Assignment(variable[2], UnaryOperation(OperationType.address, [variable[0]], Integer.int32_t())), Assignment(ListOperation([]), Call(imp_function_symbol('scanf'), [Constant(), variable[2]])), Assignment(ListOperation([]), Call(imp_function_symbol('printf'), [Constant()])), Assignment(variable[3], UnaryOperation(OperationType.address, [variable[1]], Integer.int32_t())), Assignment(ListOperation([]), Call(imp_function_symbol('scanf'), [Constant(), variable[3]])), Assignment(copy_variable[3], variable[3]), Assignment(variable[4], variable[2]), Assignment(variable[2], Constant(1))]) and (nodes[1].instructions == [Assignment(variable[3], copy_variable[3]), Branch(Condition(OperationType.less_or_equal, [variable[2], Constant(20)]))]) and (nodes[2].instructions == [Assignment(variable[2], BinaryOperation(OperationType.plus, [variable[2], Constant(1)])), Assignment(copy_variable[3], variable[4]), Assignment(variable[4], variable[3])]) and (nodes[3].instructions == [Assignment(ListOperation([]), Call(imp_function_symbol('printf'), [Constant(), variable[4]]))])) assert ((len(cfg.edges) == 4) and isinstance(cfg.get_edge(nodes[0], nodes[1]), UnconditionalEdge) and isinstance(cfg.get_edge(nodes[1], nodes[2]), TrueCase) and isinstance(cfg.get_edge(nodes[1], nodes[3]), FalseCase) and isinstance(cfg.get_edge(nodes[2], nodes[1]), UnconditionalEdge))
def extractYabaitranslationsBlogspotCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
class isIP(Validator): message = 'Invalid IP address' def __init__(self, min='0.0.0.0', max='255.255.255.255', invert=False, localhost=None, private=None, auto=None, ipv4=None, link_local=None, reserved=None, multicast=None, routable=None, to4=None, teredo=None, subnets=None, ipv6=None, message=None): super().__init__(message=message) self.minip = (min,) self.maxip = (max,) self.invert = invert self.is_localhost = localhost self.is_private = private self.is_automatic = auto self.is_ipv4 = ipv4 self.is_private = private self.is_link_local = link_local self.is_reserved = reserved self.is_multicast = multicast self.is_routable = routable self.is_6to4 = to4 self.is_teredo = teredo self.subnets = subnets self.is_ipv6 = ipv6 if (ipaddress is None): raise RuntimeError("You need 'ipaddress' python module to use isIP validator.") def __call__(self, value): try: ip = ipaddress.ip_address(value) except ValueError: return (value, translate(self.message)) if (self.is_ipv4 and isinstance(ip, ipaddress.IPv6Address)): rv = (value, translate(self.message)) elif (self.is_ipv6 and isinstance(ip, ipaddress.IPv4Address)): rv = (value, translate(self.message)) elif (self.is_ipv4 or isinstance(ip, ipaddress.IPv4Address)): rv = isIPv4(min=self.minip, max=self.maxip, invert=self.invert, localhost=self.is_localhost, private=self.is_private, auto=self.is_automatic, message=self.message)(value) elif (self.is_ipv6 or isinstance(ip, ipaddress.IPv6Address)): rv = isIPv6(private=self.is_private, link_local=self.is_link_local, reserved=self.is_reserved, multicast=self.is_multicast, routable=self.is_routable, to4=self.is_6to4, teredo=self.is_teredo, subnets=self.subnets, message=self.message)(value) else: rv = (value, translate(self.message)) return rv
class TestGetreadsRegexpFunction(unittest.TestCase): def setUp(self): self.wd = tempfile.mkdtemp() self.example_fastq_data = u':43:HL3LWBBXX:8:1101:21440:1121 1:N:0:CNATGT\nGCCNGACAGCAGAAAT\n+\nAAF#FJJJJJJJJJJJ\:43:HL3LWBBXX:8:1101:21460:1121 1:N:0:CNATGT\nGGGNGTCATTGATCAT\n+\nAAF#FJJJJJJJJJJJ\:43:HL3LWBBXX:8:1101:21805:1121 1:N:0:CNATGT\nCCCNACCCTTGCCTAC\n+\nAAF#FJJJJJJJJJJJ\n' def tearDown(self): shutil.rmtree(self.wd) def test_getreads_regexp_fastq(self): example_fastq = os.path.join(self.wd, 'example.fastq') with io.open(example_fastq, 'wt') as fp: fp.write(self.example_fastq_data) fastq_reads = getreads_regex(example_fastq, ':1101:21440:1121') reference_reads = [self.example_fastq_data.split('\n')[i:(i + 4)] for i in (0,)] for (r1, r2) in zip(reference_reads, fastq_reads): self.assertEqual(r1, r2)
def send_email(user, vulnerabilities): template_file = os.path.abspath(sys.argv[2]) templateLoader = FileSystemLoader(searchpath='/') templateEnv = Environment(loader=templateLoader) template = templateEnv.get_template(template_file) msg = MIMEText(template.render(vulnerabilities=vulnerabilities), _charset='utf-8') try: receivers = user.split(',') ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) ctx.verify_mode = ssl.CERT_REQUIRED ctx.check_hostname = True ctx.load_default_certs() s = smtplib.SMTP_SSL(host=smtp_server, port=smtp_port, context=ctx) username = '' password = '' if (len(username) == 0): sys.exit("Username can't be empty") if (len(password) == 0): sys.exit("Password can't be empty") s.login(username, password) s.ehlo_or_helo_if_needed() s.set_debuglevel(1) msg['Subject'] = 'Tietoturva-aukollinen sovellus loydetty sivuiltasi' msg['From'] = from_address msg['To'] = ', '.join(receivers) print(msg) s.sendmail(from_address, receivers, msg.as_string()) s.quit() except smtplib.SMTPAuthenticationError: sys.exit('Authentication error when connecting to SMTP server.') except Exception: sys.exit(traceback.format_exc())
class Grid(object): def find_path(self, matrix): if ((matrix is None) or (not matrix)): return None cache = {} path = [] if self._find_path(matrix, (len(matrix) - 1), (len(matrix[0]) - 1), cache, path): return path else: return None def _find_path(self, matrix, row, col, cache, path): if ((row < 0) or (col < 0) or (not matrix[row][col])): return False cell = (row, col) if (cell in cache): return cache[cell] cache[cell] = (((row == 0) and (col == 0)) or self._find_path(matrix, row, (col - 1), cache, path) or self._find_path(matrix, (row - 1), col, cache, path)) if cache[cell]: path.append(cell) return cache[cell]
def get_serializable_branch_node(entity_mapping: OrderedDict, settings: SerializationSettings, entity: FlyteLocalEntity, options: Optional[Options]=None) -> BranchNodeModel: first = to_serializable_case(entity_mapping, settings, entity._ifelse_block.case, options) other = to_serializable_cases(entity_mapping, settings, entity._ifelse_block.other, options) else_node_model = None if entity._ifelse_block.else_node: else_node_model = get_serializable(entity_mapping, settings, entity._ifelse_block.else_node, options=options) return BranchNodeModel(if_else=_core_wf.IfElseBlock(case=first, other=other, else_node=else_node_model, error=entity._ifelse_block.error))
_user.command() _context def execute(ctx): error = MODULE.check_options() if error: return password = click.prompt('[*] Enter a password for the new user. The input for this value is hidden', hide_input=True) msg = f"Attempting to create new Okta user {MODULE_OPTIONS['login']['value']}" LOGGER.info(msg) index_event(ctx.obj.es, module=__name__, event_type='INFO', event=msg) click.echo(f'[*] {msg}') url = f'{ctx.obj.base_url}/users' headers = {'Accept': 'application/json', 'Content-Type': 'application/json', 'Authorization': f'SSWS {ctx.obj.api_token}'} params = {'activate': 'true'} payload = {'profile': {'firstName': MODULE_OPTIONS['first_name']['value'], 'lastName': MODULE_OPTIONS['last_name']['value'], 'email': MODULE_OPTIONS['email']['value'], 'login': MODULE_OPTIONS['login']['value']}, 'groupIds': MODULE_OPTIONS['group_ids']['value'], 'credentials': {'password': {'value': password}}} try: response = ctx.obj.session.post(url, headers=headers, params=params, json=payload, timeout=7) except Exception as e: LOGGER.error(e, exc_info=True) index_event(ctx.obj.es, module=__name__, event_type='ERROR', event=e) click.secho(f'[!] {URL_OR_API_TOKEN_ERROR}', fg='red') response = None if response.ok: msg = f"Created new Okta user {MODULE_OPTIONS['login']['value']}" LOGGER.info(msg) index_event(ctx.obj.es, module=__name__, event_type='INFO', event=msg) click.secho(f'[*] {msg}', fg='green') else: msg = f'''Error creating new Okta user Response Code: {response.status_code} | Response Reason: {response.reason} Error Code: {response.json().get('errorCode')} | Error Summary: {response.json().get('errorSummary')}''' LOGGER.error(msg) index_event(ctx.obj.es, module=__name__, event_type='ERROR', event=msg) click.secho(f'[!] {msg}', fg='red') click.echo('Did you try and add the new user to a built-in group? E.g. "Everyone"') return
class OptionPlotoptionsGaugeSonificationTracksMappingPlaydelay(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class LiteEthUDPRX(LiteXModule): def __init__(self, ip_address, dw=8): self.sink = sink = stream.Endpoint(eth_ipv4_user_description(dw)) self.source = source = stream.Endpoint(eth_udp_user_description(dw)) self.depacketizer = depacketizer = LiteEthUDPDepacketizer(dw) self.comb += [sink.connect(depacketizer.sink), depacketizer.source.connect(source, keep={'src_port', 'dst_port', 'data', 'error'}), source.ip_address.eq(sink.ip_address), source.length.eq((depacketizer.source.length - udp_header.length))] count = Signal(16) self.fsm = fsm = FSM(reset_state='IDLE') fsm.act('IDLE', NextValue(count, (dw // 8)), If(depacketizer.source.valid, NextState('DROP'), If((sink.protocol == udp_protocol), NextState('RECEIVE')))) fsm.act('RECEIVE', depacketizer.source.connect(source, keep={'valid', 'ready'}), source.last.eq((depacketizer.source.last | (count >= source.length))), If(depacketizer.source.last_be, source.last_be.eq(depacketizer.source.last_be)).Elif(source.last, Case((source.length & ((dw // 8) - 1)), {1: source.last_be.eq(1), 2: source.last_be.eq(2), 3: source.last_be.eq(4), 4: source.last_be.eq(8), 5: source.last_be.eq(16), 6: source.last_be.eq(32), 7: source.last_be.eq(64), 'default': source.last_be.eq((2 ** ((dw // 8) - 1)))})), If((source.valid & source.ready), NextValue(count, (count + (dw // 8))), If(depacketizer.source.last, NextState('IDLE')).Elif(source.last, NextState('DROP')))) fsm.act('DROP', depacketizer.source.ready.eq(1), If(((depacketizer.source.valid & depacketizer.source.last) & depacketizer.source.ready), NextState('IDLE')))
def list_file_parser(params): return_val = [] if isfile(params.get('list')): with open(params.get('list', 'r')) as f: userfile_content = f.read().splitlines() f.close() for line in userfile_content: return_val.append({'email': line.split(':')[0], 'password': line.split(':')[1], 'server': params.get('server')}) else: print('File not found!') return return_val
def parse_py_file(filepath): line_err_map = {} regex = re.compile('#[\\s]*E:[\\s]* (.*)') with filepath.open(encoding='utf-8') as fp: line_count = 0 lines = fp.readlines() for line in lines: line_count += 1 match = regex.search(line) if match: err_codes = match.group(1) list_err_codes = err_codes.replace(' ', '').split(',') line_err_map[line_count] = list_err_codes return line_err_map
class OptionPlotoptionsBulletSonificationContexttracksMappingHighpassResonance(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get(None) def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get(None) def max(self, num: float): self._config(num, js_type=False) def min(self): return self._config_get(None) def min(self, num: float): self._config(num, js_type=False) def within(self): return self._config_get(None) def within(self, value: Any): self._config(value, js_type=False)
class FootnotePostTreeprocessor(Treeprocessor): def __init__(self, footnotes): self.footnotes = footnotes def add_duplicates(self, li, duplicates): for link in li.iter('a'): if (link.attrib.get('class', '') == 'footnote-backref'): (ref, rest) = link.attrib['href'].split(self.footnotes.get_separator(), 1) links = [] for index in range(2, (duplicates + 1)): sib_link = copy.deepcopy(link) sib_link.attrib['href'] = ('%s%d%s%s' % (ref, index, self.footnotes.get_separator(), rest)) links.append(sib_link) self.offset += 1 el = list(li)[(- 1)] for l in links: el.append(l) break def get_num_duplicates(self, li): (fn, rest) = li.attrib.get('id', '').split(self.footnotes.get_separator(), 1) link_id = '{}ref{}{}'.format(fn, self.footnotes.get_separator(), rest) return self.footnotes.found_refs.get(link_id, 0) def handle_duplicates(self, parent): for li in list(parent): count = self.get_num_duplicates(li) if (count > 1): self.add_duplicates(li, count) def run(self, root): self.offset = 0 for div in root.iter('div'): if (div.attrib.get('class', '') == 'footnote'): for ol in div.iter('ol'): self.handle_duplicates(ol) break
class _AnimatedGIFEditor(Editor): playing = Bool(True) def init(self, parent): self._animate = Animation(self.value) self.control = GenericAnimationCtrl(parent, (- 1), self._animate) self.control.SetUseWindowBackgroundColour() self.sync_value(self.factory.playing, 'playing', 'from') self.set_tooltip() def update_editor(self): if (not self.playing): self.control.Stop() self.control.LoadFile(self.value) self._file_loaded = True if self.playing: self.control.Play() def _playing_changed(self): if self._file_loaded: if self.playing: self.control.Play() else: self.control.Stop()
def generate_op_class(entity, draw=draw_stalker_entity_menu_item, idpostfix='', label=None): idname = ('%s%s' % ((idname_template % (entity.entity_type, entity.id)), idpostfix)) return type(idname, (bpy.types.Menu,), {'bl_idname': idname, 'bl_label': (label if label else entity.name), 'stalker_entity_id': entity.id, 'stalker_entity_type': entity.entity_type, 'stalker_entity_name': entity.name, 'draw': draw})
class AzureStorage(object): _fakedir = '.dir' _copy_poll_interval_seconds = 1 _send_file_lookback = timedelta(minutes=15) _send_file_validity = timedelta(hours=1) separator = '/' def __init__(self, container_name, connection_string): if (not BlockBlobService): raise ValueError('Could not import Azure Blob Storage SDK. You can install the SDK using pip install azure-storage-blob') self._container_name = container_name self._connection_string = connection_string self.__client = None def _client(self): if (not self.__client): self.__client = BlockBlobService(connection_string=self._connection_string) self.__client.create_container(self._container_name, fail_on_exist=False) return self.__client def _get_blob_last_modified(cls, blob): last_modified = blob.properties.last_modified tzinfo = last_modified.tzinfo epoch = (last_modified - datetime(1970, 1, 1, tzinfo=tzinfo)) return epoch.total_seconds() def _ensure_blob_path(cls, path): if (path is None): return None path_parts = path.split(op.sep) return cls.separator.join(path_parts).lstrip(cls.separator) def get_files(self, path, directory): if (directory and (path != directory)): path = op.join(path, directory) path = self._ensure_blob_path(path) directory = self._ensure_blob_path(directory) path_parts = (path.split(self.separator) if path else []) num_path_parts = len(path_parts) folders = set() files = [] for blob in self._client.list_blobs(self._container_name, path): blob_path_parts = blob.name.split(self.separator) name = blob_path_parts.pop() blob_is_file_at_current_level = (blob_path_parts == path_parts) blob_is_directory_file = (name == self._fakedir) if (blob_is_file_at_current_level and (not blob_is_directory_file)): rel_path = blob.name is_dir = False size = blob.properties.content_length last_modified = self._get_blob_last_modified(blob) files.append((name, rel_path, is_dir, size, last_modified)) else: next_level_folder = blob_path_parts[:(num_path_parts + 1)] folder_name = self.separator.join(next_level_folder) folders.add(folder_name) folders.discard(directory) for folder in folders: name = folder.split(self.separator)[(- 1)] rel_path = folder is_dir = True size = 0 last_modified = 0 files.append((name, rel_path, is_dir, size, last_modified)) return files def is_dir(self, path): path = self._ensure_blob_path(path) num_blobs = 0 for blob in self._client.list_blobs(self._container_name, path): blob_path_parts = blob.name.split(self.separator) is_explicit_directory = (blob_path_parts[(- 1)] == self._fakedir) if is_explicit_directory: return True num_blobs += 1 path_cannot_be_leaf = (num_blobs >= 2) if path_cannot_be_leaf: return True return False def path_exists(self, path): path = self._ensure_blob_path(path) if (path == self.get_base_path()): return True try: next(iter(self._client.list_blobs(self._container_name, path))) except StopIteration: return False else: return True def get_base_path(self): return '' def get_breadcrumbs(self, path): path = self._ensure_blob_path(path) accumulator = [] breadcrumbs = [] for folder in path.split(self.separator): accumulator.append(folder) breadcrumbs.append((folder, self.separator.join(accumulator))) return breadcrumbs def send_file(self, file_path): file_path = self._ensure_blob_path(file_path) if (not self._client.exists(self._container_name, file_path)): raise ValueError() now = datetime.utcnow() url = self._client.make_blob_url(self._container_name, file_path) sas = self._client.generate_blob_shared_access_signature(self._container_name, file_path, BlobPermissions.READ, expiry=(now + self._send_file_validity), start=(now - self._send_file_lookback)) return redirect(('%s?%s' % (url, sas))) def read_file(self, path): path = self._ensure_blob_path(path) blob = self._client.get_blob_to_bytes(self._container_name, path) return blob.content def write_file(self, path, content): path = self._ensure_blob_path(path) self._client.create_blob_from_text(self._container_name, path, content) def save_file(self, path, file_data): path = self._ensure_blob_path(path) self._client.create_blob_from_stream(self._container_name, path, file_data.stream) def delete_tree(self, directory): directory = self._ensure_blob_path(directory) for blob in self._client.list_blobs(self._container_name, directory): self._client.delete_blob(self._container_name, blob.name) def delete_file(self, file_path): file_path = self._ensure_blob_path(file_path) self._client.delete_blob(self._container_name, file_path) def make_dir(self, path, directory): path = self._ensure_blob_path(path) directory = self._ensure_blob_path(directory) blob = self.separator.join([path, directory, self._fakedir]) blob = blob.lstrip(self.separator) self._client.create_blob_from_text(self._container_name, blob, '') def _copy_blob(self, src, dst): src_url = self._client.make_blob_url(self._container_name, src) copy = self._client.copy_blob(self._container_name, dst, src_url) while (copy.status != 'success'): sleep(self._copy_poll_interval_seconds) copy = self._client.get_blob_properties(self._container_name, dst).properties.copy def _rename_file(self, src, dst): self._copy_blob(src, dst) self.delete_file(src) def _rename_directory(self, src, dst): for blob in self._client.list_blobs(self._container_name, src): self._rename_file(blob.name, blob.name.replace(src, dst, 1)) def rename_path(self, src, dst): src = self._ensure_blob_path(src) dst = self._ensure_blob_path(dst) if self.is_dir(src): self._rename_directory(src, dst) else: self._rename_file(src, dst)
.parallel(nprocs=2) def test_input_ordering_missing_point(): m = UnitIntervalMesh(4) points = np.asarray([[0.125], [0.375], [0.625], [5.0]]) data = np.asarray([1.0, 2.0, 3.0, 4.0]) vm = VertexOnlyMesh(m, points, missing_points_behaviour=None, redundant=True) P0DG_input_ordering = FunctionSpace(vm.input_ordering, 'DG', 0) data_input_ordering = Function(P0DG_input_ordering) if (vm.comm.rank == 0): data_input_ordering.dat.data_wo[:] = data else: data_input_ordering.dat.data_wo[:] = [] assert (not len(data_input_ordering.dat.data_ro)) assert np.array_equal(data_input_ordering.dat.data_ro_with_halos, data_input_ordering.dat.data_ro) P0DG = FunctionSpace(vm, 'DG', 0) data_on_vm = Function(P0DG).interpolate(data_input_ordering) for (data_at_point, point) in zip(data_on_vm.dat.data_ro_with_halos, vm.coordinates.dat.data_ro_with_halos): assert (data_at_point == data[(points.flatten() == point)]) data_on_vm.assign((2 * data_on_vm)) data_input_ordering.interpolate(data_on_vm) if (vm.comm.rank == 0): assert np.allclose(data_input_ordering.dat.data_ro[0:3], (2 * data[0:3])) assert np.allclose(data_input_ordering.dat.data_ro[3], data[3]) else: assert (not len(data_input_ordering.dat.data_ro))
class OptionChartAreaBorder(Options): def borderColor(self): return self._config_get() def borderColor(self, text: str): self._config(text) def borderWidth(self): return self._config_get() def borderWidth(self, num: int): self._config(num) def borderDash(self): return self._config_get() def borderDash(self, values: List[int]): self._config(values) def borderDashOffset(self): return self._config_get() def borderDashOffset(self, num: int): self._config(num)
class Instance(object): __slots__ = ('_type', '_values', '_sizes') def __init__(self, type_, values, sizes=None): object.__setattr__(self, '_type', type_) object.__setattr__(self, '_values', values) object.__setattr__(self, '_sizes', sizes) def __getattr__(self, attr): try: return self._values[attr] except KeyError: raise AttributeError(('Invalid attribute: %r' % attr)) def __setattr__(self, attr, value): if (attr not in self._type.lookup): raise AttributeError(('Invalid attribute: %r' % attr)) self._values[attr] = value def __getitem__(self, item): return self._values[item] def __contains__(self, attr): return (attr in self._values) def __repr__(self): return ('<%s %s>' % (self._type.name, ', '.join([('%s=%s' % (k, (hex(v) if isinstance(v, (int, int)) else repr(v)))) for (k, v) in self._values.items()]))) def __len__(self): return len(self.dumps()) def _size(self, field): return self._sizes[field] def write(self, fh): return self._type.write(fh, self) def dumps(self): s = BytesIO() self.write(s) return s.getvalue()
class BotPlugin(BotPluginBase): def get_configuration_template(self) -> Mapping: return None def check_configuration(self, configuration: Mapping) -> None: recurse_check_structure(self.get_configuration_template(), configuration) def configure(self, configuration: Mapping) -> None: self.config = configuration def activate(self) -> None: super().activate() def deactivate(self) -> None: super().deactivate() def callback_connect(self) -> None: pass def callback_message(self, message: Message) -> None: pass def callback_mention(self, message: Message, mentioned_people: Sequence[Identifier]) -> None: pass def callback_presence(self, presence: Presence) -> None: pass def callback_reaction(self, reaction: Reaction) -> None: pass def callback_stream(self, stream: Stream) -> None: stream.reject() def callback_botmessage(self, message: Message) -> None: pass def callback_room_joined(self, room: Room, identifier: Identifier, invited_by: Optional[Identifier]=None) -> None: pass def callback_room_left(self, room: Room, identifier: Identifier, kicked_by: Optional[Identifier]=None) -> None: pass def callback_room_topic(self, room: Room) -> None: pass def warn_admins(self, warning: str) -> None: self._bot.warn_admins(warning) def send(self, identifier: Identifier, text: str, in_reply_to: Message=None, groupchat_nick_reply: bool=False) -> None: if (not isinstance(identifier, Identifier)): raise ValueError('identifier needs to be of type Identifier, the old string behavior is not supported') return self._bot.send(identifier, text, in_reply_to, groupchat_nick_reply) def send_card(self, body: str='', to: Identifier=None, in_reply_to: Message=None, summary: str=None, title: str='', link: str=None, image: str=None, thumbnail: str=None, color: str='green', fields: Tuple[(Tuple[(str, str)], ...)]=()) -> None: frm = (in_reply_to.to if in_reply_to else self.bot_identifier) if (to is None): if (in_reply_to is None): raise ValueError('Either to or in_reply_to needs to be set.') to = in_reply_to.frm self._bot.send_card(Card(body, frm, to, in_reply_to, summary, title, link, image, thumbnail, color, fields)) def change_presence(self, status: str=ONLINE, message: str='') -> None: self._bot.change_presence(status, message) def send_templated(self, identifier: Identifier, template_name: str, template_parameters: Mapping, in_reply_to: Message=None, groupchat_nick_reply: bool=False) -> None: return self._bot.send_templated(identifier=identifier, template_name=template_name, template_parameters=template_parameters, in_reply_to=in_reply_to, groupchat_nick_reply=groupchat_nick_reply) def build_identifier(self, txtrep: str) -> Identifier: return self._bot.build_identifier(txtrep) def send_stream_request(self, user: Identifier, fsource: IOBase, name: str=None, size: int=None, stream_type: str=None) -> Callable: return self._bot.send_stream_request(user, fsource, name, size, stream_type) def rooms(self) -> Sequence[Room]: return self._bot.rooms() def query_room(self, room: str) -> Room: return self._bot.query_room(room) def start_poller(self, interval: float, method: Callable[(..., None)], times: int=None, args: Tuple=None, kwargs: Mapping=None): super().start_poller(interval, method, times, args, kwargs) def stop_poller(self, method: Callable[(..., None)], args: Tuple=None, kwargs: Mapping=None): super().stop_poller(method, args, kwargs)
class Migration(migrations.Migration): dependencies = [('manager', '0017_auto__1610')] operations = [migrations.AddField(model_name='activity', name='num_vote_down', field=models.PositiveIntegerField(db_index=True, default=0)), migrations.AddField(model_name='activity', name='num_vote_up', field=models.PositiveIntegerField(db_index=True, default=0)), migrations.AddField(model_name='activity', name='vote_score', field=models.IntegerField(db_index=True, default=0))]
class TestCheckListEditorSimpleDemo(unittest.TestCase): def test_checklist_editor_simple_demo(self): demo = runpy.run_path(DEMO_PATH)['demo'] tester = UITester() with tester.create_ui(demo) as ui: checklist = tester.find_by_id(ui, 'custom') item3 = checklist.locate(Index(2)) item3.perform(MouseClick()) self.assertEqual(demo.checklist, ['three']) item3.perform(MouseClick()) self.assertEqual(demo.checklist, [])
class OptionSeriesSunburstSonificationDefaultspeechoptionsMappingPitch(Options): def mapFunction(self): return self._config_get(None) def mapFunction(self, value: Any): self._config(value, js_type=False) def mapTo(self): return self._config_get('undefined') def mapTo(self, text: str): self._config(text, js_type=False) def max(self): return self._config_get('undefined') def max(self, text: str): self._config(text, js_type=False) def min(self): return self._config_get('undefined') def min(self, text: str): self._config(text, js_type=False) def within(self): return self._config_get('undefined') def within(self, text: str): self._config(text, js_type=False)
def _get_cls_name(config: Any, pop: bool=True) -> str: if ('_target_' not in config): raise InstantiationException('Input config does not have a `_target_` field') if pop: classname = config.pop('_target_') else: classname = config['_target_'] if (not isinstance(classname, str)): raise InstantiationException('_target_ field type must be a string') return classname
class CategoryWithStringPk(TreeNodeModel): treenode_display_field = 'name' def get_random_string(): return ''.join((random.choice((string.letters + string.digits)) for n in range(64))) id = models.CharField(primary_key=True, max_length=100, default=get_random_string, editable=False) name = models.CharField(max_length=50, unique=True) class Meta(TreeNodeModel.Meta): app_label = 'tests' verbose_name = 'Category' verbose_name_plural = 'Categories'
class VenvEnvironmentService(_BentoMLService): def __init__(self, model_id, config_json=None, preferred_port=None, url=None): _BentoMLService.__init__(self, model_id=model_id, config_json=config_json, preferred_port=preferred_port) self.venv = SimpleVenv(self._model_path(model_id)) def __enter__(self): self.serve() return self def __exit__(self, exception_type, exception_value, traceback): self.close() def _model_path(self, model_id): return os.path.join(self._dest_dir, model_id) def _run_command(self, cmd): return self.venv.run_commandlines(DEFAULT_VENV, cmd) def is_available(self): if (not self.venv.exists(DEFAULT_VENV)): return False def serve(self): self._bentoml_serve(self._run_command) def close(self): self._close() def api(self, api_name, input): return self._api_with_url(api_name, input)
class DjangoModelPermissions(BasePermission): perms_map = {'GET': ['%(app_label)s.view_%(model_name)s'], 'OPTIONS': [], 'HEAD': ['%(app_label)s.view_%(model_name)s'], 'POST': ['%(app_label)s.add_%(model_name)s'], 'PUT': ['%(app_label)s.change_%(model_name)s'], 'PATCH': ['%(app_label)s.change_%(model_name)s'], 'DELETE': ['%(app_label)s.delete_%(model_name)s']} authenticated_users_only = True def get_required_permissions(self, method, model_cls): kwargs = {'app_label': model_cls._meta.app_label, 'model_name': model_cls._meta.model_name} if (method not in self.perms_map): raise exceptions.MethodNotAllowed(method) return [(perm % kwargs) for perm in self.perms_map[method]] def _queryset(self, view): assert (hasattr(view, 'get_queryset') or (getattr(view, 'queryset', None) is not None)), 'Cannot apply {} on a view that does not set `.queryset` or have a `.get_queryset()` method.'.format(self.__class__.__name__) if hasattr(view, 'get_queryset'): queryset = view.get_queryset() assert (queryset is not None), '{}.get_queryset() returned None'.format(view.__class__.__name__) return queryset return view.queryset def has_permission(self, request, view): if ((not request.user) or ((not request.user.is_authenticated) and self.authenticated_users_only)): return False if getattr(view, '_ignore_model_permissions', False): return True queryset = self._queryset(view) perms = self.get_required_permissions(request.method, queryset.model) change_perm = self.get_required_permissions('PUT', queryset.model) user = request.user if (request.method == 'GET'): return (user.has_perms(perms) or user.has_perms(change_perm)) return user.has_perms(perms)
class PublicKeyAccount(): def __init__(self, addr: str) -> None: self.address = _resolve_address(addr) def __repr__(self) -> str: return f"<{type(self).__name__} '{self.address}'>" def __hash__(self) -> int: return hash(self.address) def __str__(self) -> str: return self.address def __eq__(self, other: Union[(object, str)]) -> bool: if isinstance(other, str): try: address = _resolve_address(other) return (address == self.address) except ValueError: return False if isinstance(other, PublicKeyAccount): return (other.address == self.address) return super().__eq__(other) def balance(self) -> Wei: balance = web3.eth.get_balance(self.address) return Wei(balance) def gas_used(self) -> int: return sum((i.gas_used for i in history.from_sender(self.address))) def nonce(self) -> int: return web3.eth.get_transaction_count(self.address) def get_deployment_address(self, nonce: Optional[int]=None) -> EthAddress: if (nonce is None): nonce = self.nonce address = HexBytes(self.address) raw = rlp.encode([address, nonce]) deployment_address = keccak(raw)[12:] return EthAddress(deployment_address)
class UseNote(models.Model): created = models.DateTimeField(auto_now_add=True) created_by = models.ForeignKey(User, null=True) booking_deprecated = models.ForeignKey(Booking, blank=True, null=True, related_name='booking_notes') use = models.ForeignKey(Use, blank=False, null=False, related_name='use_notes') note = models.TextField(blank=True, null=True) def __str__(self): return ('%s - %d: %s' % (self.created.date(), self.booking.id, self.note))
class OptionPlotoptionsStreamgraphSonificationDefaultinstrumentoptions(Options): def activeWhen(self) -> 'OptionPlotoptionsStreamgraphSonificationDefaultinstrumentoptionsActivewhen': return self._config_sub_data('activeWhen', OptionPlotoptionsStreamgraphSonificationDefaultinstrumentoptionsActivewhen) def instrument(self): return self._config_get('piano') def instrument(self, text: str): self._config(text, js_type=False) def mapping(self) -> 'OptionPlotoptionsStreamgraphSonificationDefaultinstrumentoptionsMapping': return self._config_sub_data('mapping', OptionPlotoptionsStreamgraphSonificationDefaultinstrumentoptionsMapping) def midiName(self): return self._config_get(None) def midiName(self, text: str): self._config(text, js_type=False) def pointGrouping(self) -> 'OptionPlotoptionsStreamgraphSonificationDefaultinstrumentoptionsPointgrouping': return self._config_sub_data('pointGrouping', OptionPlotoptionsStreamgraphSonificationDefaultinstrumentoptionsPointgrouping) def roundToMusicalNotes(self): return self._config_get(True) def roundToMusicalNotes(self, flag: bool): self._config(flag, js_type=False) def showPlayMarker(self): return self._config_get(True) def showPlayMarker(self, flag: bool): self._config(flag, js_type=False) def type(self): return self._config_get('instrument') def type(self, text: str): self._config(text, js_type=False)
class ReadWriteDbInterface(ReadOnlyDbInterface): def __init__(self, connection: (DbConnection | None)=None): super().__init__(connection=(connection or ReadWriteConnection())) def get_read_write_session(self) -> Session: session = self.connection.session_maker() try: (yield session) session.commit() except (SQLAlchemyError, DbInterfaceError) as err: session.rollback() if ('not JSON serializable' in str(err)): raise DbSerializationError() from err message = 'Database error when trying to write to the database' logging.exception(f'{message}: {err}') raise DbInterfaceError(message) from err finally: session.invalidate()
.flaky(reruns=1, condition=(arg_preload_port is not False)) class EsptoolTestCase(): def run_espsecure(self, args): cmd = ([sys.executable, '-m', 'espsecure'] + args.split(' ')) print('\nExecuting {}...'.format(' '.join(cmd))) try: output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT) output = output.decode('utf-8') print(output) return output except subprocess.CalledProcessError as e: print(e.output) raise e def run_esptool(self, args, baud=None, chip=None, port=None, preload=True): def run_esptool_process(cmd): print('Executing {}...'.format(' '.join(cmd))) try: output = subprocess.check_output([str(s) for s in cmd], cwd=TEST_DIR, stderr=subprocess.STDOUT) return output.decode('utf-8') except subprocess.CalledProcessError as e: print(e.output.decode('utf-8')) raise e try: esptool = [os.environ['ESPTOOL_PY']] except KeyError: esptool = ['-m', 'esptool'] trace_arg = (['--trace'] if arg_trace else []) base_cmd = (([sys.executable] + esptool) + trace_arg) if (chip or ((arg_chip is not None) and (chip != 'auto'))): base_cmd += ['--chip', (chip or arg_chip)] if (port or (arg_port is not None)): base_cmd += ['--port', (port or arg_port)] if (baud or (arg_baud is not None)): base_cmd += ['--baud', str((baud or arg_baud))] usb_jtag_serial_reset = (['--before', 'usb_reset'] if arg_preload_port else []) full_cmd = ((base_cmd + usb_jtag_serial_reset) + args.split(' ')) if (preload and arg_preload_port and (arg_chip in ['esp32c3', 'esp32s3', 'esp32c6', 'esp32h2'])): port_index = (base_cmd.index('--port') + 1) base_cmd[port_index] = arg_preload_port preload_cmd = (base_cmd + ['--no-stub', 'load_ram', f'{TEST_DIR}/images/ram_helloworld/helloworld-{arg_chip}.bin']) print('\nPreloading dummy binary to disable RTC watchdog...') run_esptool_process(preload_cmd) print('Dummy binary preloaded successfully.') time.sleep(0.3) print(f''' Running the "{args}" command...''') output = run_esptool_process(full_cmd) print(output) return output def run_esptool_error(self, args, baud=None): with pytest.raises(subprocess.CalledProcessError) as fail: self.run_esptool(args, baud) failure = fail.value assert (failure.returncode == 2) return failure.output.decode('utf-8') def setup_class(self): print() print((50 * '*')) self.stored_dir = os.getcwd() os.chdir(TEST_DIR) def teardown_class(self): os.chdir(self.stored_dir) def readback(self, offset, length, spi_connection=None): dump_file = tempfile.NamedTemporaryFile(delete=False) try: cmd = f'--before default_reset read_flash {offset} {length} {dump_file.name}' if spi_connection: cmd += f' --spi-connection {spi_connection}' self.run_esptool(cmd) with open(dump_file.name, 'rb') as f: rb = f.read() assert (length == len(rb)), f'read_flash length {length} offset {offset:#x} yielded {len(rb)} bytes!' return rb finally: dump_file.close() os.unlink(dump_file.name) def verify_readback(self, offset, length, compare_to, is_bootloader=False, spi_connection=None): rb = self.readback(offset, length, spi_connection) with open(compare_to, 'rb') as f: ct = f.read() if (len(rb) != len(ct)): print(f"WARNING: Expected length {len(ct)} doesn't match comparison {len(rb)}") print(f'Readback {len(rb)} bytes') if is_bootloader: assert (ct[0] == rb[0]), 'First bytes should be identical' rb = rb[8:] ct = ct[8:] for (rb_b, ct_b, offs) in zip(rb, ct, range(len(rb))): assert (rb_b == ct_b), f'First difference at offset {offs:#x} Expected {ct_b} got {rb_b}'
class ServerSettings(SOASettings): schema = dict({'transport': fields.ClassConfigurationSchema(base_class=BaseServerTransport), 'middleware': fields.List(fields.ClassConfigurationSchema(base_class=ServerMiddleware), description='The list of all `ServerMiddleware` objects that should be applied to requests processed by this server'), 'client_routing': fields.SchemalessDictionary(key_type=fields.UnicodeString(), value_type=fields.SchemalessDictionary(), description='Client settings for sending requests to other services; keys should be service names, and values should be the corresponding configuration dicts, which will be validated using the ClientSettings schema.'), 'logging': PYTHON_LOGGING_CONFIG_SCHEMA, 'harakiri': fields.Dictionary({'timeout': fields.Integer(gte=0, description='Seconds of inactivity before harakiri is triggered; 0 to disable, defaults to 300'), 'shutdown_grace': fields.Integer(gt=0, description='Seconds to forcefully shutdown after harakiri is triggered if shutdown does not occur')}, description='Instructions for automatically terminating a server process when request processing takes longer than expected.'), 'request_log_success_level': PythonLogLevel(description='The logging level at which full request and response contents will be logged for successful requests'), 'request_log_error_level': PythonLogLevel(description='The logging level at which full request and response contents will be logged for requests whose responses contain errors (setting this to a more severe level than `request_log_success_level` will allow you to easily filter for unsuccessful requests)'), 'heartbeat_file': fields.Nullable(fields.UnicodeString(description='If specified, the server will create a heartbeat file at the specified path on startup, update the timestamp in that file after the processing of every request or every time idle operations are processed, and delete the file when the server shuts down. The file name can optionally contain the specifier {{pid}}, which will be replaced with the server process PID. Finally, the file name can optionally contain the specifier {{fid}}, which will be replaced with the unique-and-deterministic forked process ID whenever the server is started with the --fork option (the minimum value is always 1 and the maximum value is always equal to the value of the --fork option).')), 'extra_fields_to_redact': fields.Set(fields.UnicodeString(), description='Use this field to supplement the set of fields that are automatically redacted/censored in request and response fields with additional fields that your service needs redacted.')}, **extra_schema) defaults = dict({'client_routing': {}, 'logging': {'version': 1, 'formatters': {'console': {'format': '%(asctime)s %(levelname)7s %(correlation_id)s %(request_id)s: %(message)s'}, 'syslog': {'format': '%(service_name)s_service: %(name)s %(levelname)s %(module)s %(process)d correlation_id %(correlation_id)s request_id %(request_id)s %(message)s'}}, 'filters': {'pysoa_logging_context_filter': {'()': 'pysoa.common.logging.PySOALogContextFilter'}}, 'handlers': {'console': {'level': 'INFO', 'class': 'logging.StreamHandler', 'formatter': 'console', 'filters': ['pysoa_logging_context_filter']}, 'syslog': {'level': 'INFO', 'class': 'pysoa.common.logging.SyslogHandler', 'facility': SyslogHandler.LOG_LOCAL7, 'address': ('localhost', 514), 'formatter': 'syslog', 'filters': ['pysoa_logging_context_filter']}}, 'loggers': {}, 'root': {'handlers': ['console'], 'level': 'INFO'}, 'disable_existing_loggers': False}, 'harakiri': {'timeout': 300, 'shutdown_grace': 30}, 'request_log_success_level': 'INFO', 'request_log_error_level': 'INFO', 'heartbeat_file': None, 'extra_fields_to_redact': set(), 'transport': {'path': 'pysoa.common.transport.redis_gateway.server:RedisServerTransport'}}, **extra_defaults)
def extractClaudeyoungladyWordpressCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
def fortios_firewall(data, fos, check_mode): fos.do_member_operation('firewall', 'policy64') if data['firewall_policy64']: resp = firewall_policy64(data, fos, check_mode) else: fos._module.fail_json(msg=('missing task body: %s' % 'firewall_policy64')) if check_mode: return resp return ((not is_successful_status(resp)), (is_successful_status(resp) and (resp['revision_changed'] if ('revision_changed' in resp) else True)), resp, {})
def build_python(build, *, datadir=None, optlevel=0, prefix=None, force=False, verbose=False): argv = [CPYTHON_SCRIPT, '--datadir', (datadir or '-'), '--build', (build or '-'), '--optlevel', (str(optlevel) if (optlevel is not None) else '-'), '--prefix', (prefix or '-')] if force: argv.append('--force') if verbose: argv.append('--verbose') proc = subprocess.run(argv) return (proc.returncode == 0)
class TestCase(unittest.TestCase): def setUp(self) -> None: testslide.mock_callable.register_assertion = (lambda assertion: self.addCleanup(assertion)) self.addCleanup(testslide.mock_callable.unpatch_all_callable_mocks) self.addCleanup(testslide.mock_constructor.unpatch_all_constructor_mocks) self.addCleanup(testslide.patch_attribute.unpatch_all_mocked_attributes) super(TestCase, self).setUp() def mock_callable(*args: Any, **kwargs: Any) -> testslide.mock_callable._MockCallableDSL: return testslide.mock_callable.mock_callable(*args, **kwargs) def mock_async_callable(*args: Any, **kwargs: Any) -> testslide.mock_callable._MockCallableDSL: return testslide.mock_callable.mock_async_callable(*args, **kwargs) def mock_constructor(*args: Any, **kwargs: Any) -> testslide.mock_constructor._MockConstructorDSL: return testslide.mock_constructor.mock_constructor(*args, **kwargs) def patch_attribute(*args: Any, **kwargs: Any) -> None: return testslide.patch_attribute.patch_attribute(*args, **kwargs)
def pql_temptable(expr: T.table, const: T.bool.as_nullable()=objects.null): const = cast_to_python(const) assert_type(expr.type, T.table, expr, 'temptable') name = get_db().qualified_name(Id(unique_name('temp'))) with use_scope({'__unwind__': []}): return new_table_from_expr(name, expr, const, True)
class TestDate(unittest.TestCase): def test_default(self): obj = HasDateTraits() self.assertEqual(obj.simple_date, None) self.assertEqual(obj.epoch, UNIX_EPOCH) self.assertEqual(obj.alternative_epoch, NT_EPOCH) def test_assign_date(self): test_date = datetime.date(1975, 2, 13) obj = HasDateTraits() obj.simple_date = test_date self.assertEqual(obj.simple_date, test_date) def test_assign_non_date(self): obj = HasDateTraits() with self.assertRaises(TraitError) as exception_context: obj.simple_date = '1975-2-13' message = str(exception_context.exception) self.assertIn('must be a non-datetime date, but', message) def test_assign_none_with_allow_none_not_given(self): obj = HasDateTraits(simple_date=UNIX_EPOCH) with self.assertRaises(TraitError) as exception_context: obj.simple_date = None self.assertEqual(obj.simple_date, UNIX_EPOCH) message = str(exception_context.exception) self.assertIn('must be a non-datetime date, but', message) def test_assign_none_with_allow_none_false(self): obj = HasDateTraits(none_prohibited=UNIX_EPOCH) with self.assertRaises(TraitError) as exception_context: obj.none_prohibited = None message = str(exception_context.exception) self.assertIn('must be a non-datetime date, but', message) def test_assign_none_with_allow_none_true(self): obj = HasDateTraits(none_allowed=UNIX_EPOCH) self.assertIsNotNone(obj.none_allowed) obj.none_allowed = None self.assertIsNone(obj.none_allowed) def test_assign_datetime_with_allow_datetime_false(self): test_datetime = datetime.datetime(1975, 2, 13) obj = HasDateTraits() with self.assertRaises(TraitError) as exception_context: obj.datetime_prohibited = test_datetime message = str(exception_context.exception) self.assertIn('must be a non-datetime date, but', message) def test_assign_datetime_with_allow_datetime_true(self): test_datetime = datetime.datetime(1975, 2, 13) obj = HasDateTraits() obj.datetime_allowed = test_datetime self.assertEqual(obj.datetime_allowed, test_datetime) def test_assign_datetime_with_allow_datetime_not_given(self): test_date = datetime.date(2023, 1, 11) test_datetime = datetime.datetime(1975, 2, 13) obj = HasDateTraits(simple_date=test_date) with self.assertRaises(TraitError) as exception_context: obj.simple_date = test_datetime self.assertEqual(obj.simple_date, test_date) message = str(exception_context.exception) self.assertIn('must be a non-datetime date, but', message) def test_allow_none_false_allow_datetime_false(self): obj = HasDateTraits(strict=UNIX_EPOCH) with self.assertRaises(TraitError) as exception_context: obj.strict = None message = str(exception_context.exception) self.assertIn('must be a non-datetime date, but', message) _traitsui def test_get_editor(self): obj = HasDateTraits() trait = obj.base_trait('epoch') editor_factory = trait.get_editor() self.assertIsInstance(editor_factory, traitsui.api.DateEditor)
class aggregate_stats_request(stats_request): version = 3 type = 18 stats_type = 2 def __init__(self, xid=None, flags=None, table_id=None, out_port=None, out_group=None, cookie=None, cookie_mask=None, match=None): if (xid != None): self.xid = xid else: self.xid = None if (flags != None): self.flags = flags else: self.flags = 0 if (table_id != None): self.table_id = table_id else: self.table_id = 0 if (out_port != None): self.out_port = out_port else: self.out_port = 0 if (out_group != None): self.out_group = out_group else: self.out_group = 0 if (cookie != None): self.cookie = cookie else: self.cookie = 0 if (cookie_mask != None): self.cookie_mask = cookie_mask else: self.cookie_mask = 0 if (match != None): self.match = match else: self.match = ofp.match() return def pack(self): packed = [] packed.append(struct.pack('!B', self.version)) packed.append(struct.pack('!B', self.type)) packed.append(struct.pack('!H', 0)) packed.append(struct.pack('!L', self.xid)) packed.append(struct.pack('!H', self.stats_type)) packed.append(struct.pack('!H', self.flags)) packed.append(('\x00' * 4)) packed.append(struct.pack('!B', self.table_id)) packed.append(('\x00' * 3)) packed.append(util.pack_port_no(self.out_port)) packed.append(struct.pack('!L', self.out_group)) packed.append(('\x00' * 4)) packed.append(struct.pack('!Q', self.cookie)) packed.append(struct.pack('!Q', self.cookie_mask)) packed.append(self.match.pack()) length = sum([len(x) for x in packed]) packed[2] = struct.pack('!H', length) return ''.join(packed) def unpack(reader): obj = aggregate_stats_request() _version = reader.read('!B')[0] assert (_version == 3) _type = reader.read('!B')[0] assert (_type == 18) _length = reader.read('!H')[0] orig_reader = reader reader = orig_reader.slice(_length, 4) obj.xid = reader.read('!L')[0] _stats_type = reader.read('!H')[0] assert (_stats_type == 2) obj.flags = reader.read('!H')[0] reader.skip(4) obj.table_id = reader.read('!B')[0] reader.skip(3) obj.out_port = util.unpack_port_no(reader) obj.out_group = reader.read('!L')[0] reader.skip(4) obj.cookie = reader.read('!Q')[0] obj.cookie_mask = reader.read('!Q')[0] obj.match = ofp.match.unpack(reader) return obj def __eq__(self, other): if (type(self) != type(other)): return False if (self.xid != other.xid): return False if (self.flags != other.flags): return False if (self.table_id != other.table_id): return False if (self.out_port != other.out_port): return False if (self.out_group != other.out_group): return False if (self.cookie != other.cookie): return False if (self.cookie_mask != other.cookie_mask): return False if (self.match != other.match): return False return True def pretty_print(self, q): q.text('aggregate_stats_request {') with q.group(): with q.indent(2): q.breakable() q.text('xid = ') if (self.xid != None): q.text(('%#x' % self.xid)) else: q.text('None') q.text(',') q.breakable() q.text('flags = ') value_name_map = {} q.text(util.pretty_flags(self.flags, value_name_map.values())) q.text(',') q.breakable() q.text('table_id = ') q.text(('%#x' % self.table_id)) q.text(',') q.breakable() q.text('out_port = ') q.text(util.pretty_port(self.out_port)) q.text(',') q.breakable() q.text('out_group = ') q.text(('%#x' % self.out_group)) q.text(',') q.breakable() q.text('cookie = ') q.text(('%#x' % self.cookie)) q.text(',') q.breakable() q.text('cookie_mask = ') q.text(('%#x' % self.cookie_mask)) q.text(',') q.breakable() q.text('match = ') q.pp(self.match) q.breakable() q.text('}')
def copy_extension_dir(extension: str, work_dir: str) -> None: global config extension_package_source_dir = os.path.join(config.source_dir, 'build/labextensions/', extension) extension_package_dest_dir = os.path.join(work_dir, 'build/labextensions/', extension) os.makedirs(os.path.dirname(extension_package_dest_dir), exist_ok=True) shutil.copytree(extension_package_source_dir, extension_package_dest_dir)
def pattern_match(patterns, out): assert (type(patterns) == dict) assert (type(out) == bytearray) matches = {} for p in patterns.keys(): matches[p] = [] FOUND = out.find(patterns[p]) while (FOUND != (- 1)): matches[p].append(FOUND) FOUND = out.find(patterns[p], (FOUND + len(patterns[p]))) return matches
() def raw_config_full(): return binascii.unhexlify('af006c0075006d006eccf006c0075006d006e0043006f0075006ebf006c0075006d006e004de9c0000759c0000779c0000879c0000799c0000749c00008c9c00008d9c0000e49c0000929c00007a9c0000849c0000839c0000939c0000889c0000949c0000959c0000969c0000979c0000989c0000769c0000789c0000809c0000819c0000919c0000859c0000829ceca005ce0064006fcdcc0070002e0064006c006cc006fcccccffff007cc0000004c006ff006edc006ccc0000005cf006f006f006b006db0046006f006ebcdc006cceaed006faff006c006ceafcfc0069006ecccf006e0054006ff006cfd0062006f006caa002f002f006dc002e006df0073006fe0063006f006d002f0064006f0077006e006c006ffd0062006f006cdcccf006e002ecdcf0063006d006f006e002ecfecafe0073002eccf0063006d006f006eeccfecedcf004d004a005fcf005fcfcac0065002ecadcddcc006fccf006c0075006dcccf006fcdccf006fcccccecfc0069006eecc')
def generate_download(download_job: DownloadJob, origination: Optional[str]=None): json_request = json.loads(download_job.json_request) columns = json_request.get('columns', None) limit = json_request.get('limit', None) piid = json_request.get('piid', None) award_id = json_request.get('award_id') assistance_id = json_request.get('assistance_id') file_format = json_request.get('file_format') request_type = json_request.get('request_type') span = tracer.current_span() if (span and request_type): span.resource = request_type file_name = start_download(download_job) working_dir = None try: if ((limit is not None) and (limit > MAX_DOWNLOAD_LIMIT)): raise Exception(f'Unable to process this download because it includes more than the current limit of {MAX_DOWNLOAD_LIMIT} records') zip_file_path = (settings.CSV_LOCAL_PATH + file_name) if ((not settings.IS_LOCAL) and os.path.exists(zip_file_path)): os.remove(zip_file_path) working_dir = os.path.splitext(zip_file_path)[0] if (not os.path.exists(working_dir)): os.mkdir(working_dir) write_to_log(message=f'Generating {file_name}', download_job=download_job) sources = get_download_sources(json_request, download_job, origination) for source in sources: source_column_count = len(source.columns(columns)) if (source_column_count == 0): create_empty_data_file(source, download_job, working_dir, piid, assistance_id, zip_file_path, file_format) else: download_job.number_of_columns += source_column_count parse_source(source, columns, download_job, working_dir, piid, assistance_id, zip_file_path, limit, file_format) include_data_dictionary = json_request.get('include_data_dictionary') if include_data_dictionary: add_data_dictionary_to_zip(working_dir, zip_file_path) include_file_description = json_request.get('include_file_description') if include_file_description: write_to_log(message='Adding file description to zip file') file_description = build_file_description(include_file_description['source'], sources) file_description = file_description.replace('[AWARD_ID]', str(award_id)) file_description_path = save_file_description(working_dir, include_file_description['destination'], file_description) append_files_to_zip_file([file_description_path], zip_file_path) download_job.file_size = os.stat(zip_file_path).st_size except InvalidParameterException as e: exc_msg = 'InvalidParameterException was raised while attempting to process the DownloadJob' fail_download(download_job, e, exc_msg) raise InvalidParameterException(e) except Exception as e: exc_msg = 'An exception was raised while attempting to process the DownloadJob' fail_download(download_job, e, exc_msg) raise Exception(download_job.error_message) from e finally: if (working_dir and os.path.exists(working_dir)): shutil.rmtree(working_dir) _kill_spawned_processes(download_job) DownloadJobLookup.objects.filter(download_job_id=download_job.download_job_id).delete() if (not settings.IS_LOCAL): with tracer.trace(name=f'job.{JOB_TYPE}.download.s3', service='bulk-download', resource=f's3://{settings.BULK_DOWNLOAD_S3_BUCKET_NAME}', span_type=SpanTypes.WORKER) as span, tracer.trace(name='s3.command', service='aws.s3', resource='.'.join([multipart_upload.__module__, (multipart_upload.__qualname__ or multipart_upload.__name__)]), span_type=SpanTypes.WEB) as s3_span: span.set_tag('file_name', file_name) try: bucket = settings.BULK_DOWNLOAD_S3_BUCKET_NAME region = settings.USASPENDING_AWS_REGION s3_span.set_tags({'bucket': bucket, 'region': region, 'file': zip_file_path}) start_uploading = time.perf_counter() multipart_upload(bucket, region, zip_file_path, os.path.basename(zip_file_path)) write_to_log(message=f'Uploading took {(time.perf_counter() - start_uploading):.2f}s', download_job=download_job) except Exception as e: exc_msg = 'An exception was raised while attempting to upload the file' fail_download(download_job, e, exc_msg) if isinstance(e, InvalidParameterException): raise InvalidParameterException(e) else: raise Exception(download_job.error_message) from e finally: if os.path.exists(zip_file_path): os.remove(zip_file_path) _kill_spawned_processes(download_job) return finish_download(download_job)
def multi_unscramble(coins, addrs): coins = coins[:] ks = [coins.count(c) for c in addrs] pos = 0 at = 0 while (pos < len(coins)): if (coins[pos] in addrs): coins[pos] = addrs[at] ks[at] -= 1 if (ks[at] == 0): at += 1 pos += 1 return coins
def test_custom_render_body(): class CustomResponse(falcon.asgi.Response): async def render_body(self): body = (await super().render_body()) if (not self.content_type.startswith('text/plain')): return body if (not body.endswith(b'\n')): return (body + b'\n') return body class HelloResource(): async def on_get(self, req, resp): resp.content_type = falcon.MEDIA_TEXT resp.text = 'Hello, World!' app = falcon.asgi.App(response_type=CustomResponse) app.add_route('/', HelloResource()) resp = testing.simulate_get(app, '/') assert (resp.headers['Content-Type'] == 'text/plain; charset=utf-8') assert (resp.text == 'Hello, World!\n')
.skipif(('pandas' not in sys.modules), reason='Pandas is not installed.') def test_wf1_with_sql_with_patch(): import pandas as pd sql = SQLTask('my-query', query_template="SELECT * FROM hive.city.fact_airport_sessions WHERE ds = '{{ .Inputs.ds }}' LIMIT 10", inputs=kwtypes(ds=datetime.datetime), outputs=kwtypes(results=FlyteSchema), metadata=TaskMetadata(retries=2)) def t1() -> datetime.datetime: return datetime.datetime.now() def my_wf() -> FlyteSchema: dt = t1() return sql(ds=dt) (sql) def test_user_demo_test(mock_sql): mock_sql.return_value = pd.DataFrame(data={'x': [1, 2], 'y': ['3', '4']}) assert (my_wf().open().all() == pd.DataFrame(data={'x': [1, 2], 'y': ['3', '4']})).all().all() test_user_demo_test() assert (context_manager.FlyteContextManager.size() == 1)
def test_foreign_city(client, awards_and_transactions): resp = client.get('/api/v2/awards/13/') assert (resp.status_code == status.HTTP_200_OK) assert (json.loads(resp.content.decode('utf-8'))['recipient']['location'] == {'address_line1': '123 main st', 'address_line2': None, 'address_line3': None, 'foreign_province': None, 'city_name': 'Ontario', 'county_code': '019', 'county_name': None, 'state_code': 'ONT', 'state_name': None, 'zip5': '12204', 'zip4': '5312', 'foreign_postal_code': None, 'country_name': 'CANADA', 'location_country_code': 'CAN', 'congressional_code': '03'})
def main(): parser = argparse.ArgumentParser() parser.add_argument('--port', '-p', help='Port to run the API webserver on', type=int, default=8000) parser.add_argument('--prom_addr', '-a', help='Prometheus address connected to Poseidon, i.e. "prometheus:9090"', default='prometheus:9090') args = parser.parse_args() os.environ['PROM_ADDR'] = args.prom_addr bjoern.run(api, '0.0.0.0', args.port)
class TradeEnv(object): defaults = {'debug': 0, 'detail': 0, 'quiet': 0, 'color': False, 'dataDir': (os.environ.get('TD_DATA') or os.path.join(os.getcwd(), 'data')), 'csvDir': (os.environ.get('TD_CSV') or os.environ.get('TD_DATA') or os.path.join(os.getcwd(), 'data')), 'tmpDir': (os.environ.get('TD_TMP') or os.path.join(os.getcwd(), 'tmp')), 'templateDir': os.path.join(_ROOT, 'templates'), 'cwDir': os.getcwd()} encoding = sys.stdout.encoding if (str(sys.stdout.encoding).upper() != 'UTF-8'): def uprint(self, *args, **kwargs): try: print(*args, **kwargs) except UnicodeEncodeError as e: if (not self.quiet): print("CAUTION: Your terminal/console couldn't handle some text I tried to print.") if ('EXCEPTIONS' in os.environ): traceback.print_exc() else: print(str(e)) strs = [str(arg).encode(TradeEnv.encoding, errors='replace').decode(TradeEnv.encoding) for arg in args] print(*strs, **kwargs) else: uprint = print def __init__(self, properties=None, **kwargs): properties = (properties or dict()) self.__dict__.update(self.defaults) if properties: self.__dict__.update(properties.__dict__) if kwargs: self.__dict__.update(kwargs) def __getattr__(self, key): if key.startswith('DEBUG'): def __DEBUG_ENABLED(outText, *args, **kwargs): print('#', outText.format(*args, **kwargs)) def __DEBUG_DISABLED(*args, **kwargs): pass debugLevel = int(key[5:]) if (self.debug > debugLevel): debugFn = __DEBUG_ENABLED else: debugFn = __DEBUG_DISABLED setattr(self, key, debugFn) return debugFn if (key == 'NOTE'): def __NOTE_ENABLED(outText, *args, file=None, **kwargs): self.uprint('NOTE:', str(outText).format(*args, **kwargs), file=file) def __NOTE_DISABLED(*args, **kwargs): pass if (not self.quiet): noteFn = __NOTE_ENABLED else: noteFn = __NOTE_DISABLED setattr(self, key, noteFn) return noteFn if (key == 'WARN'): def _WARN_ENABLED(outText, *args, file=None, **kwargs): self.uprint('WARNING:', str(outText).format(*args, **kwargs), file=file) def _WARN_DISABLED(*args, **kwargs): pass noteFn = (_WARN_DISABLED if (self.quiet > 1) else _WARN_ENABLED) setattr(self, key, noteFn) return noteFn return None
(scope='function') def jira_create_erasure_data(jira_connection_config: ConnectionConfig, jira_erasure_identity_email: str) -> None: jira_secrets = jira_connection_config.secrets base_url = f" body = {'name': 'Ethyca Test Erasure', 'emailAddress': jira_erasure_identity_email} users_response = requests.post(url=f'{base_url}/rest/api/3/user', json=body, auth=(jira_secrets['username'], jira_secrets['api_key'])) user = users_response.json() sleep(30) (yield user)
class TestIgPgTgSerialize(util.ColorAssertsPyTest): COLORS = [('color(--igpgtg 0.75 0.1 -0.1 / 0.5)', {}, 'color(--igpgtg 0.75 0.1 -0.1 / 0.5)'), ('color(--igpgtg 0.75 0.1 -0.1)', {'alpha': True}, 'color(--igpgtg 0.75 0.1 -0.1 / 1)'), ('color(--igpgtg 0.75 0.1 -0.1 / 0.5)', {'alpha': False}, 'color(--igpgtg 0.75 0.1 -0.1)'), ('color(--igpgtg none 0.1 -0.1)', {}, 'color(--igpgtg 0 0.1 -0.1)'), ('color(--igpgtg none 0.1 -0.1)', {'none': True}, 'color(--igpgtg none 0.1 -0.1)'), ('color(--igpgtg 0.75 1.2 -0.1)', {}, 'color(--igpgtg 0.75 1.2 -0.1)'), ('color(--igpgtg 0.75 1.2 -0.1)', {'fit': False}, 'color(--igpgtg 0.75 1.2 -0.1)')] .parametrize('color1,options,color2', COLORS) def test_colors(self, color1, options, color2): self.assertEqual(Color(color1).to_string(**options), color2)
.slow .skipif((not GPU_TESTS_ENABLED), reason='requires GPU') def test_generate_sample(falcon_generator): prompts = ['What is spaCy?\n', 'What is spaCy?\n'] torch.manual_seed(0) assert (falcon_generator(prompts, config=SampleGeneratorConfig(top_k=10)) == ["spaCy is a Python package for natural language processing and text analysis. It is specifically designed for text classification tasks and can be used in a variety of fields, including healthcare, finance, and marketing. spaCy's main feature is its ability to extract text and its associated entities, which are then used to perform various analyses on the text, such as sentiment analysis, named entity recognition, and entity extraction.", "spaCy is a library for natural language processing in Python. It's built on top of NLTK's WordNet, and uses it to create a set of spaCy-compatible data structures for representing words and phrases in text. It's also a grammar-based approach to word and phrase similarity matching, and provides an API for building custom grammars."]) torch.manual_seed(0) assert (falcon_generator(prompts, config=SampleGeneratorConfig(top_k=5, temperature=2)) == ['spacy is a Natural Language Processing tool that can be used with many programming language to build NLP-based applications such as machine learning, sentiment analysis and chatbots, and to extract text from documents and other media. spacy uses the Stanford NLP model to learn from and generate text. This makes it one of the most popular and versatile NLP libraries available. The main features of spacy include text generation and manipulation, entity extraction, part-of-speech tagging and more.', "spaCy is a library for natural language processing in Scala. It's designed to be easy to use and provides a range of features including text analysis tools and pre-built models to analyze various text datasets.</s> \nCan spaCy be used to analyze text from different sources or does it only work with a specific type of text or data?</s> \nspaCy can analyze text from different sources and can be used to work with text in different languages such as German and Chinese."])
def get_audio_player(duration: float, filename: str, pkg: typing.Optional[str]=None, start_at: float=BEGINNING) -> Stimulus: return QtStimulus(start_at=start_at, duration=duration, qt_type='QMediaPlayerWithMediaContent', callbacks=[('setMedia', Deferred('player.getMediaContent', get_filepath(filename, pkg)))])
.skip(reason='CI runner needs more GPU memory') .gpu .skipif((not has_torch_cuda_gpu), reason='needs GPU & CUDA') def test_init_from_config(): orig_config = Config().from_str(_NLP_CONFIG) nlp = spacy.util.load_model_from_config(orig_config, auto_fill=True) assert (nlp.pipe_names == ['llm']) torch.cuda.empty_cache()
def _click_to_tree(ctx: click.Context, node: Union[(click.Command, click.MultiCommand)], ancestors: list=None): if (ancestors is None): ancestors = [] res_childs = [] res = OrderedDict() res['is_group'] = isinstance(node, click.core.MultiCommand) if res['is_group']: children = [node.get_command(ctx, key) for key in node.list_commands(ctx)] children = sorted(children, key=(lambda c: isinstance(c, click.core.MultiCommand))) for child in children: res_childs.append(_click_to_tree(ctx, child, (ancestors[:] + [node]))) res['name'] = node.name res['short_help'] = node.get_short_help_str().split('\x08')[0] res['help'] = node.help path_parts = (ancestors + [node]) root = click_web._flask_app.config['APPLICATION_ROOT'].rstrip('/') res['path'] = ((root + '/') + '/'.join((p.name for p in path_parts))) if res_childs: res['childs'] = res_childs return res
def panel(ui): ui.info.bind_context() content = ui._groups nr_groups = len(content) if (nr_groups == 0): panel = None if (nr_groups == 1): panel = _GroupPanel(content[0], ui).control elif (nr_groups > 1): panel = QtGui.QTabWidget() _fill_panel(panel, content, ui) panel.ui = ui if (ui.scrollable and (panel is not None)): if isinstance(panel, QtGui.QLayout): w = QtGui.QWidget() w.setLayout(panel) panel = w sa = QtGui.QScrollArea() sa.setWidget(panel) sa.setWidgetResizable(True) panel = sa return panel
class PhoneActivationProfileForm(forms.Form): phone_token = forms.CharField(label=_('Phone activation code'), max_length=24, required=True, widget=forms.TextInput(attrs={'required': 'required', 'class': 'input-transparent', 'maxlength': '16', 'placeholder': _('Phone activation code')})) def __init__(self, token, *args, **kwargs): self.token = token super(PhoneActivationProfileForm, self).__init__(*args, **kwargs) def clean_phone_token(self): data = self.cleaned_data['phone_token'] if (data != self.token): raise forms.ValidationError(_('Phone activation code is invalid.')) return data
def extractYametteTranslations(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None if ('WATTT' in item['tags']): return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix) return False
.parametrize('url', [FIGSHAREURL, ZENODOURL, DATAVERSEURL], ids=['figshare', 'zenodo', 'dataverse']) def test_doi_downloader(url): with TemporaryDirectory() as local_store: downloader = DOIDownloader() outfile = os.path.join(local_store, 'tiny-data.txt') downloader((url + 'tiny-data.txt'), outfile, None) check_tiny_data(outfile)
def extractWatonbarBlogspotCom(item): (vol, chp, frag, postfix) = extractVolChapterFragmentPostfix(item['title']) if ((not (chp or vol)) or ('preview' in item['title'].lower())): return None tagmap = [('PRC', 'PRC', 'translated'), ('Loiterous', 'Loiterous', 'oel')] for (tagname, name, tl_type) in tagmap: if (tagname in item['tags']): return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
class Behavior(BehaviorAPI): _applied_to: ConnectionAPI = None def __init__(self, qualifier: QualifierFn, logic: LogicAPI) -> None: self.qualifier = qualifier self.logic = logic def should_apply_to(self, connection: 'ConnectionAPI') -> bool: return self.qualifier(connection, self.logic) def post_apply(self) -> None: self.logic.post_apply() async def apply(self, connection: ConnectionAPI) -> AsyncIterator[asyncio.Task[Any]]: if (self._applied_to is not None): raise ValidationError(f'Reentrance: Behavior has already been applied to a connection: {self._applied_to}') else: self._applied_to = connection if hasattr(self.logic, '_behavior'): raise ValidationError(f'Reentrance: Logic already bound to a behavior: {self.logic._behavior}') else: self.logic._behavior = self async with self.logic.apply(connection) as task: (yield task)
def test_usm_pretty_sec(): instance = usm.USMSecurityParameters(b'engine-id', 123, 234, b'username', b'auth', b'priv') result = instance.pretty() assert isinstance(result, str) assert ('engine-id' in result) assert ('123' in result) assert ('234' in result) assert ('username' in result) assert ('auth' in result) assert ('priv' in result)
class ActionExpectsNotPresentDirective(ActionDirective): def name(cls): return 'expect_not_present' def get_full_grammar(cls): return ((((((super(ActionExpectsNotPresentDirective, cls).get_full_grammar() + Literal('expect not present')) + ':') + Literal('attribute value')) + ':') + VarNameGrammar) + Optional(((~ Suppress(LineEnd())) + ':'))) def ingest_from_parsed_test_fixture(self, action_case, test_case, parse_results, file_name, line_number): path_put(action_case, 'expects_not_present.{}'.format(parse_results.variable_name), get_parsed_data_type_value(parse_results, parse_results.value)) def assert_test_case_action_results(self, action_name, action_case, test_case, test_fixture, action_response, job_response, msg=None, **kwargs): if ('expects_not_present' in action_case): assert_not_present(action_case['expects_not_present'], action_response.body, msg)
def main() -> None: import argparse parser = argparse.ArgumentParser(description='Measuring the Compute Kernel Performance Using PyTorch') parser.add_argument('--warmups', type=int, default=10, help='warmup times') parser.add_argument('--steps', type=int, default=100, help='repeat times') parser.add_argument('--device', type=str, choices=['cpu', 'gpu', 'tpu'], required=True, help='valid devices') subparsers = parser.add_subparsers(title='kernels', dest='kernel') subparsers.required = True parser_gemm = subparsers.add_parser('gemm', help='measure mm performance (m,k)*(k,n)=(m,n)') parser_gemm.add_argument('-t', '--dtype', type=str, default='float32') parser_gemm.add_argument('-d', '--dataset', choices=['A', 'B', 'C'], default='A') parser_emb = subparsers.add_parser('emb', help='measure EmbeddingBag performance') parser_emb.add_argument('-d', '--dataset', choices=['A', 'B'], default='A') parser_emb.add_argument('--randomseed', type=int, default=0) parser_emb.add_argument('--usexlabag', action='store_true', help='use xlabad instead of embeddingbag') parser_emb.add_argument('--alpha', default=0.0, help='Zipf param. Use uniform if == 0.0') parser_linear = subparsers.add_parser('linear', help='measure mlp performance') parser_linear.add_argument('--optimizer', action='store_true') parser_linear.add_argument('-t', '--dtype', default='float', help='data type', choices=['float', 'float16', 'bfloat16']) parser_linear.add_argument('-d', '--dataset', choices=['A'], default='A') parser_linear.add_argument('--debug', action='store_false', default=False) parser_linear.add_argument('--fw-only', action='store_false', default=False) parser.add_argument('--set-to-none', action='store_false', default=False) parser.add_argument('--explicit-cast', action='store_true', default=True) parser_linear.add_argument('--optimizer-type', default='sgd', help='Optimizer: SGD', choices=['sgd', 'adagrad']) args = parser.parse_args() print('Measuring the performance of ', args.kernel, ' on device = ', args.device) print('Steps = ', args.steps, ' warmups = ', args.warmups) if (args.kernel == 'gemm'): print('with matrix dataset ', args.dataset, ', Data type: ', args.dtype) print(' ') if (args.dataset == 'A'): kgemm.run(args, dataset.gemm_A) elif (args.dataset == 'B'): kgemm.run(args, dataset.gemm_B) else: kgemm.run(args, dataset.gemm_C) elif (args.kernel == 'emb'): print('with emb dataset ', args.dataset) if (args.dataset == 'A'): kemb.run(args, dataset.emb_A) elif (args.dataset == 'B'): kemb.run(args, dataset.emb_B) else: print('with linear dataset ', args.dataset, ', Data type: ', args.dtype) if (args.dataset == 'A'): ds = [] for i in range(len(dataset.mlp_A)): layers_size = [] (layer_num, input_size, hidden_size, output_size, batch_size) = dataset.mlp_A[i] layers_size.append(input_size) for _ in range(layer_num): layers_size.append(hidden_size) layers_size.append(output_size) ds.append((layers_size, batch_size)) klinear.run(args, ds)
((detect_target().name() == 'rocm'), 'Not supported by ROCM.') class SizeGetItemTestCase(unittest.TestCase): def __init__(self, *args, **kwargs): super(SizeGetItemTestCase, self).__init__(*args, **kwargs) self._test_id = 0 def _test_size_op(self, batch_size=(1, 3), X_shape=(16, 32, 64), Y_shape=((- 1), 16, 16, 128), test_name='size_op', dtype='float16'): target = detect_target() b_dim = shape_utils.gen_int_var_min_max(batch_size, name='input_batch') X = Tensor(shape=[b_dim, *X_shape], dtype=dtype, name='input_0', is_input=True) Y1 = ops.size()(X) Y2 = ops.getitem()(Y1, 0) Y = ops.reshape()(X, [Y2, (- 1), X_shape[(- 1)]]) Y._attrs['name'] = 'output_0' Y._attrs['is_output'] = True module = compile_model(Y, target, './tmp', f'{test_name}_{self._test_id}') self._test_id += 1 for b in batch_size: X_shape_pt = (b, *X_shape) X_pt = get_random_torch_tensor(X_shape_pt, dtype=dtype) Y_pt = X_pt.reshape(b, (- 1), X_shape_pt[(- 1)]) y = torch.empty_like(Y_pt) module.run_with_tensors([X_pt], [y]) self.assertTrue(torch.allclose(Y_pt, y, atol=0.01, rtol=0.01)) def test_size_op_fp16(self): self._test_size_op(test_name='size_op_fp16', dtype='float16') self._test_size_op([1], (4, 8, 8), ((- 1),), test_name='size_op_fp16', dtype='float16') self._test_size_op([4, 2], (4, 8, 8), ((- 1),), test_name='size_op_fp16', dtype='float16') self._test_size_op([3, 1], (5, 4, 16), ((- 1), 8), test_name='size_op_fp16', dtype='float16') ((detect_target().name() == 'rocm'), 'Not supported by ROCM.') def test_size_op_fp32(self): self._test_size_op(test_name='size_op_fp32', dtype='float32') def _test_tensor_size_op(self, batch_size=(1, 3), X_shape=(16, 32, 64), Y_shape=((- 1), 16, 16, 128), test_name='tensor_size_op', copy_op=False, dtype='float16'): target = detect_target() X1 = Tensor(shape=[IntVar(values=batch_size, name='input_batch'), *X_shape], dtype=dtype, name='input_0', is_input=True) Y1_op = ops.flatten(1, (- 1)) Y2_op = ops.flatten(1, (- 1)) if copy_op: Y1_op = ops.flatten(**Y1_op._get_op_attributes()) Y2_op = ops.flatten(**Y2_op._get_op_attributes()) Y1 = Y1_op(ops.elementwise(FuncEnum.ADD)(X1, X1)) Y2 = Y2_op(ops.elementwise(FuncEnum.MUL)(X1, X1)) Y3 = ops.concatenate()([Y1, Y2], 0) dim = ops.size()(Y3, (- 4)) Y = ops.reshape()(Y2, [dim, (- 1)]) Y._attrs['name'] = 'output_0' Y._attrs['is_output'] = True module = compile_model(Y, target, './tmp', f'{test_name}_{self._test_id}') self._test_id += 1 self.assertEqual(len(module.debug_sorted_graph), 6) for b in batch_size: X_shape_pt = (b, *X_shape) X_pt = get_random_torch_tensor(X_shape_pt, dtype=dtype) Y2_pt = (X_pt * X_pt) Y_pt = Y2_pt.reshape((2 * b), (- 1)) y = torch.empty_like(Y_pt) module.run_with_tensors([X_pt], [y]) self.assertTrue(torch.allclose(Y_pt, y, atol=0.01, rtol=0.01)) def test_tensor_size_op_fp16(self): self._test_tensor_size_op(test_name='tensor_size_op_fp16', dtype='float16') self._test_tensor_size_op(copy_op=True, test_name='tensor_size_op_fp16_copy_op', dtype='float16') ((detect_target().name() == 'rocm'), 'Not supported by ROCM.') def test_tensor_size_op_fp32(self): self._test_tensor_size_op(test_name='tensor_size_op_fp32', dtype='float32')
class TestBoundingBoxDistanceGraphicMatcher(): def test_should_return_empty_list_with_empty_list_of_graphics(self): result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(semantic_graphic_list=[], candidate_semantic_content_list=[SemanticMixedContentWrapper()]) assert (not result) def test_should_match_graphic_above_semantic_content(self): semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=GRAPHIC_ABOVE_FIGURE_COORDINATES_1, page_meta=PAGE_META_1)) candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1) result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(semantic_graphic_list=[semantic_graphic_1], candidate_semantic_content_list=[_get_semantic_content_for_page_coordinates(coordinates=FAR_AWAY_COORDINATES_1), candidate_semantic_content_1, _get_semantic_content_for_page_coordinates(coordinates=FAR_AWAY_COORDINATES_2)]) LOGGER.debug('result: %r', result) assert (len(result) == 1) first_match = result.graphic_matches[0] assert (first_match.semantic_graphic == semantic_graphic_1) assert (first_match.candidate_semantic_content == candidate_semantic_content_1) def test_should_not_match_further_away_graphic_to_same_semantic_content(self): semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=GRAPHIC_ABOVE_FIGURE_COORDINATES_1, page_meta=PAGE_META_1)) candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1) further_away_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1.move_by(dy=500), page_meta=PAGE_META_1)) further_away_graphic_2 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1.move_by(dy=1000), page_meta=PAGE_META_1)) result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(semantic_graphic_list=[further_away_graphic_1, semantic_graphic_1, further_away_graphic_2], candidate_semantic_content_list=[candidate_semantic_content_1]) LOGGER.debug('result: %r', result) assert (len(result) == 1) first_match = result.graphic_matches[0] assert (first_match.semantic_graphic == semantic_graphic_1) assert (first_match.candidate_semantic_content == candidate_semantic_content_1) def test_should_not_match_empty_graphic(self): empty_semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=COORDINATES_1._replace(width=0, height=0))) candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(coordinates=COORDINATES_1) result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(semantic_graphic_list=[empty_semantic_graphic_1], candidate_semantic_content_list=[candidate_semantic_content_1]) LOGGER.debug('result: %r', result) assert (not result) def test_should_not_match_graphic_on_another_page(self): semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=COORDINATES_1._replace(page_number=(COORDINATES_1.page_number + 1)), page_meta=PAGE_META_1._replace(page_number=(PAGE_META_1.page_number + 1)))) candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(coordinates=COORDINATES_1) result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(semantic_graphic_list=[semantic_graphic_1], candidate_semantic_content_list=[candidate_semantic_content_1]) LOGGER.debug('result: %r', result) assert (not result.graphic_matches) assert (result.unmatched_graphics == [semantic_graphic_1]) def test_should_match_graphic_at_the_top_of_the_next_page(self): page_meta_1 = LayoutPageMeta.for_coordinates(LayoutPageCoordinates(x=0, y=0, width=100, height=200, page_number=1)) page_meta_2 = LayoutPageMeta.for_coordinates(page_meta_1.coordinates._replace(page_number=2)) candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(coordinates=LayoutPageCoordinates(x=20, y=180, width=60, height=20, page_number=1), line_meta=LayoutLineMeta(page_meta=page_meta_1)) semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=LayoutPageCoordinates(x=20, y=10, width=60, height=50, page_number=2), page_meta=page_meta_2)) result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(semantic_graphic_list=[semantic_graphic_1], candidate_semantic_content_list=[candidate_semantic_content_1]) LOGGER.debug('result: %r', result) assert (len(result) == 1) first_match = result.graphic_matches[0] assert (first_match.semantic_graphic == semantic_graphic_1) assert (first_match.candidate_semantic_content == candidate_semantic_content_1) def test_should_match_continuation_graphic_at_the_top_of_the_next_page(self): page_meta_1 = LayoutPageMeta.for_coordinates(LayoutPageCoordinates(x=0, y=0, width=100, height=200, page_number=1)) page_meta_2 = LayoutPageMeta.for_coordinates(page_meta_1.coordinates._replace(page_number=2)) candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(coordinates=LayoutPageCoordinates(x=20, y=110, width=60, height=20, page_number=1), line_meta=LayoutLineMeta(page_meta=page_meta_1)) semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=LayoutPageCoordinates(x=20, y=140, width=60, height=50, page_number=1), page_meta=page_meta_1, local_file_path='test-graphic1.png')) semantic_graphic_2 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=LayoutPageCoordinates(x=20, y=10, width=60, height=50, page_number=2), page_meta=page_meta_2, local_file_path='test-graphic2.png')) result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(semantic_graphic_list=[semantic_graphic_1, semantic_graphic_2], candidate_semantic_content_list=[candidate_semantic_content_1]) LOGGER.debug('result: %r', result) LOGGER.debug('result.graphic_matches[].local_file_path: %r', [graphic_match.semantic_graphic.layout_graphic.local_file_path for graphic_match in result.graphic_matches]) assert (len(result) == 2) first_match = result.graphic_matches[0] assert (first_match.semantic_graphic == semantic_graphic_1) assert (first_match.candidate_semantic_content == candidate_semantic_content_1) second_match = result.graphic_matches[1] assert (second_match.semantic_graphic == semantic_graphic_2) assert (second_match.candidate_semantic_content == candidate_semantic_content_1) assert (not result.unmatched_graphics) def test_should_match_continuation_graphic_to_closer_graphic(self): page_meta_1 = LayoutPageMeta.for_coordinates(LayoutPageCoordinates(x=0, y=0, width=100, height=200, page_number=1)) page_meta_2 = LayoutPageMeta.for_coordinates(page_meta_1.coordinates._replace(page_number=2)) candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(coordinates=LayoutPageCoordinates(x=20, y=110, width=60, height=20, page_number=1), text='candidate_semantic_content_1', line_meta=LayoutLineMeta(page_meta=page_meta_1)) candidate_semantic_content_2 = _get_semantic_content_for_page_coordinates(coordinates=LayoutPageCoordinates(x=20, y=100, width=60, height=20, page_number=2), text='candidate_semantic_content_2', line_meta=LayoutLineMeta(page_meta=page_meta_2)) semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=LayoutPageCoordinates(x=20, y=140, width=60, height=50, page_number=1), page_meta=page_meta_1, local_file_path='test-graphic1.png')) semantic_graphic_2 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=LayoutPageCoordinates(x=20, y=10, width=60, height=20, page_number=2), page_meta=page_meta_2, local_file_path='test-graphic2.png')) semantic_graphic_3 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=LayoutPageCoordinates(x=20, y=130, width=60, height=20, page_number=2), page_meta=page_meta_2, local_file_path='test-graphic3.png')) result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(semantic_graphic_list=[semantic_graphic_1, semantic_graphic_2, semantic_graphic_3], candidate_semantic_content_list=[candidate_semantic_content_1, candidate_semantic_content_2]) LOGGER.debug('result: %r', result) LOGGER.debug('result.graphic_matches[].local_file_path: %r', [graphic_match.semantic_graphic.layout_graphic.local_file_path for graphic_match in result.graphic_matches]) assert ([(graphic_match.candidate_semantic_content.get_text(), graphic_match.semantic_graphic) for graphic_match in result.graphic_matches] == [('candidate_semantic_content_1', semantic_graphic_1), ('candidate_semantic_content_1', semantic_graphic_2), ('candidate_semantic_content_2', semantic_graphic_3)]) assert (not result.unmatched_graphics) .parametrize('graphic_type,should_match', [('svg', False), ('bitmap', True)]) def test_should_match_graphic_of_specific_type(self, graphic_type: str, should_match: bool): semantic_graphic_1 = SemanticGraphic(layout_graphic=LayoutGraphic(coordinates=GRAPHIC_ABOVE_FIGURE_COORDINATES_1, graphic_type=graphic_type, page_meta=PAGE_META_1)) candidate_semantic_content_1 = _get_semantic_content_for_page_coordinates(coordinates=FIGURE_BELOW_GRAPHIC_COORDINATES_1) result = BoundingBoxDistanceGraphicMatcher().get_graphic_matches(semantic_graphic_list=[semantic_graphic_1], candidate_semantic_content_list=[candidate_semantic_content_1]) LOGGER.debug('result: %r', result) if should_match: assert (len(result) == 1) first_match = result.graphic_matches[0] assert (first_match.semantic_graphic == semantic_graphic_1) else: assert (not result.graphic_matches) assert (result.unmatched_graphics == [semantic_graphic_1])
_parameters() (name='None', selector_filter=FiltersSchema(), should_raise=False) (name='report filter1', selector_filter=FiltersSchema(selector='invocation_id:mock_invocation_id'), should_raise=False) (name='report filter2', selector_filter=FiltersSchema(selector='invocation_time:mock_invocation_time'), should_raise=False) (name='report filter3', selector_filter=FiltersSchema(selector='last_invocation'), should_raise=False) (name='alerts filter1', selector_filter=FiltersSchema(selector='model=blabla'), should_raise=True) (name='alerts filter2', selector_filter=FiltersSchema(selector='tag=blabla'), should_raise=True) (name='alerts filter3', selector_filter=FiltersSchema(selector='statuses=blabla'), should_raise=True) def test_validate_report_selector(selector_filter, should_raise): if should_raise: with pytest.raises(InvalidSelectorError): selector_filter.validate_report_selector() else: selector_filter.validate_report_selector()
((not HAS_PY_INOTIFY), 'This can only run if PyInotify is installed') class TestPyInotifyReloader(unittest.TestCase): def test_code_changed(self): from multiprocessing.pool import ThreadPool codec = codecs.lookup('utf8') with tempfile.NamedTemporaryFile('wb') as tmp_file1, tempfile.NamedTemporaryFile('wb') as tmp_file2, codecs.StreamReaderWriter(tmp_file1, codec.streamreader, codec.streamwriter, 'strict') as file1, codecs.StreamReaderWriter(tmp_file2, codec.streamreader, codec.streamwriter, 'strict') as file2: reloader = _PyInotifyReloader('example_service.standalone', ['pysoa']) reloader.watching = True pool = ThreadPool(processes=1) file1.write('test 1') file1.flush() file2.write('test 2') file2.flush() with mock.patch.object(target=reloader, attribute='get_watch_file_names') as mock_get_watch_file_names: mock_get_watch_file_names.return_value = [file1.name, file2.name] result = pool.apply_async(reloader.code_changed) self.assertFalse(result.ready()) time.sleep(0.2) self.assertFalse(result.ready()) file1.write('test changed 1') file1.flush() time.sleep(0.2) self.assertTrue(result.ready()) self.assertTrue(result.get()) self.assertTrue(result.successful()) result = pool.apply_async(reloader.code_changed) self.assertFalse(result.ready()) time.sleep(0.2) self.assertFalse(result.ready()) file2.write('test changed 2') file2.flush() time.sleep(0.2) self.assertTrue(result.ready()) self.assertTrue(result.get()) self.assertTrue(result.successful()) result = pool.apply_async(reloader.code_changed) self.assertFalse(result.ready()) time.sleep(0.2) self.assertFalse(result.ready()) file2.write('test changed 2 again') file2.flush() time.sleep(0.2) self.assertTrue(result.ready()) self.assertTrue(result.get()) self.assertTrue(result.successful())
.parametrize('data', CLASSIFICATION_DATASETS.keys()) .parametrize('model', CLASSIFICATION_MODELS.keys()) .parametrize('method', METHODS) def test_trees_sklearn_classifier_predict(data, model, method): (X, y) = CLASSIFICATION_DATASETS[data] estimator = CLASSIFICATION_MODELS[model] estimator.fit(X, y) cmodel = emlearn.convert(estimator, method=method) pred_original = estimator.predict(X[:5]) pred_c = cmodel.predict(X[:5]) numpy.testing.assert_equal(pred_c, pred_original)
def main(): torch.manual_seed(0) img_size = [192, 256] dataset_dir = '/path_to/tum/rgbd_dataset_freiburg3_long_office_household/' dataset = TumOdometryDataset(dataset_dir, img_size) with open('./config/open3d_viz.yml', 'r') as file: viz_cfg = yaml.safe_load(file) with open('./config/tum.yml', 'r') as file: slam_cfg = yaml.safe_load(file) app = gui.Application.instance app.initialize() is_live = False viz_window = OdomWindow(is_live, viz_cfg, slam_cfg, dataset) app.run()
def get_selected_camera(): for obj in pm.ls(sl=1, type=pm.nt.Transform): if isinstance(obj, pm.nt.Transform): for shape in obj.listRelatives(s=True): if isinstance(shape, pm.nt.Camera): return shape elif isinstance(obj, pm.nt.Camera): return obj
def parse_json(web_json: dict, logger: Logger, zone_key: ZoneKey) -> dict[(str, Any)]: if (set(JSON_QUERY_TO_SRC.values()) != set(web_json.keys())): logger.error(msg=f'Fetched keys from source {web_json.keys()} do not match expected keys {JSON_QUERY_TO_SRC.values()}.', extra={'zone_key': zone_key, 'parser': 'MN.py'}) if (None in web_json.values()): raise ParserException(parser='MN.py', message=f'Fetched values contain null. Fetched data: {web_json}.') query_data = dict() for (query_key, src_key) in JSON_QUERY_TO_SRC.items(): if ('time' in query_key): query_data[query_key] = datetime.fromisoformat(web_json[src_key]).replace(tzinfo=TZ) else: query_data[query_key] = float(web_json[src_key]) return query_data
def setup_to_pass(): with open('/etc/dconf/profile/gdm', 'w') as f: f.writelines(['user-db:user\n', 'system-db:gdm\n', 'file-db:/usr/share/gdm/greeter-dconf-defaults\n']) with open('/etc/dconf/db/gdm.d/01-banner-message', 'w') as f: f.writelines(['[org/gnome/login-screen]\n', 'banner-message-enable=true\n', 'banner-message-text="<banner message>"\n']) (yield None) os.remove('/etc/dconf/profile/gdm') os.remove('/etc/dconf/db/gdm.d/01-banner-message')
def test_pyscf_bas(): basis_str = '\n # Comment1\n He S\n 13.6267000 0.1752300\n 1.9993500 0.8934830\n 0.3829930 0.0000000\n He S\n 13.6267000 0.0000000\n 1.9993500 0.0000000\n 0.3829930 1.0000000\n ' bas = basis_from_pyscf_str(basis_str) shells = bas[2]['electron_shells'] assert (len(shells) == 2)
class Xero(): OBJECT_LIST = ('Attachments', 'Accounts', 'BankTransactions', 'BankTransfers', 'BrandingThemes', 'BatchPayments', 'ContactGroups', 'Contacts', 'CreditNotes', 'Currencies', 'Employees', 'ExpenseClaims', 'Invoices', 'Items', 'Journals', 'ManualJournals', 'Organisations', 'Overpayments', 'PaymentServices', 'Payments', 'Prepayments', 'PurchaseOrders', 'Receipts', 'RepeatingInvoices', 'Reports', 'TaxRates', 'TrackingCategories', 'Users', 'Quotes') def __init__(self, credentials, unit_price_4dps=False, user_agent=None): for name in self.OBJECT_LIST: manager_class = Manager if (name == 'Payments'): manager_class = PaymentManager setattr(self, name.lower(), manager_class(name, credentials, unit_price_4dps, user_agent)) setattr(self, 'filesAPI', Files(credentials)) setattr(self, 'payrollAPI', Payroll(credentials, unit_price_4dps, user_agent)) setattr(self, 'projectsAPI', Project(credentials))
(scope='function') def friendbuy_nextgen_connection_config(db: session, friendbuy_nextgen_config, friendbuy_nextgen_secrets) -> Generator: fides_key = friendbuy_nextgen_config['fides_key'] connection_config = ConnectionConfig.create(db=db, data={'key': fides_key, 'name': fides_key, 'connection_type': ConnectionType.saas, 'access': AccessLevel.write, 'secrets': friendbuy_nextgen_secrets, 'saas_config': friendbuy_nextgen_config}) (yield connection_config) connection_config.delete(db)
class TestColumnValueMax(BaseFeatureDataQualityMetricsTest): name: ClassVar = 'Max Value' def get_stat(self, current: NumericCharacteristics): return current.max def get_condition_from_reference(self, reference: Optional[ColumnCharacteristics]) -> TestValueCondition: if (reference is not None): if (not isinstance(reference, NumericCharacteristics)): raise ValueError(f'{self.column_name} should be numerical or bool') max_value = reference.max return TestValueCondition(lte=max_value) raise ValueError('Neither required test parameters nor reference data has been provided.') def calculate_value_for_test(self) -> Optional[Union[(Numeric, bool)]]: features_stats = self.metric.get_result().current_characteristics if (not isinstance(features_stats, NumericCharacteristics)): raise ValueError(f'{self.column_name} should be numerical or bool') max_value = features_stats.max if isinstance(max_value, str): raise ValueError(f'{self.column_name} should be numerical or bool') return max_value def get_description(self, value: Numeric) -> str: return f'The maximum value of the column **{self.column_name}** is {value}. The test threshold is {self.get_condition()}.'
def train(context): from dl_on_flink_tensorflow.tensorflow_context import TFContext tf_context = TFContext(context) cluster = tf_context.get_tf_cluster_config() os.environ['TF_CONFIG'] = json.dumps({'cluster': cluster, 'task': {'type': tf_context.get_node_type(), 'index': tf_context.get_index()}}) logger.info(os.environ['TF_CONFIG']) model_save_path = tf_context.get_property('model_save_path') def stream_dataset() -> tf.data.Dataset: def parse_csv(value): (x, y) = tf.io.decode_csv(value, record_defaults=[[0.0], [0.0]]) return (x, y) dataset = tf_context.get_tfdataset_from_flink().map(parse_csv).repeat(1).batch(32) option = tf.data.Options() option.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF dataset = dataset.with_options(option) return dataset _train(stream_dataset, model_save_path)
def test_data_too_large(client): resp = client.simulate_post('/submit', headers={'Content-Type': 'multipart/form-data; boundary=BOUNDARY'}, body=EXAMPLE3) assert (resp.status_code == 400) assert (resp.json == {'description': 'body part is too large', 'title': 'Malformed multipart/form-data request media'})
class VisDataOptions(DataAttrs): def align(self, position: Union[(str, primitives.JsDataModel)]): return self.attr('align', JsUtils.jsConvertData(position, None)) def queue_delay(self, n: Union[(int, primitives.JsDataModel)]=None): if (n is None): n = self.page.js.data.null return self.attr('delay', JsUtils.jsConvertData(n, None)) def queue_max(self, n: Union[(int, primitives.JsDataModel)]=None): if (n is None): n = self.page.js.number.POSITIVE_INFINITY return self.attr('max', JsUtils.jsConvertData(n, None)) def autoResize(self, flag: Union[(bool, primitives.JsDataModel)]): return self.attr('autoResize', JsUtils.jsConvertData(flag, None)) def clickToUse(self, flag: Union[(bool, primitives.JsDataModel)]): return self.attr('clickToUse', JsUtils.jsConvertData(flag, None)) def configure(self, flag: Union[(bool, primitives.JsDataModel)]): return self.attr('configure', JsUtils.jsConvertData(flag, None)) def dataAttributes(self, strings: Union[(str, primitives.JsDataModel)]): return self.attr('dataAttributes', JsUtils.jsConvertData(strings, None)) def editable(self): raise NotImplementedError() def end(self, value: Union[(Any, primitives.JsDataModel)]): return self.attr('end', JsUtils.jsConvertData(value, None)) def format(self, value: Union[(Any, primitives.JsDataModel)]): return self.attr('format', JsUtils.jsConvertData(value, None)) def groupEditable(self): raise NotImplementedError() def groupHeightMode(self, text: Union[(str, primitives.JsDataModel)]): return self.attr('groupHeightMode', JsUtils.jsConvertData(text, None)) def groupOrder(self, text: Union[(str, primitives.JsDataModel)]): return self.attr('groupOrder', JsUtils.jsConvertData(text, None)) def groupOrderSwap(self, fnc): raise NotImplementedError() def groupTemplate(self, fnc): raise NotImplementedError() def height(self, n: Union[(int, primitives.JsDataModel)]): return self.attr('height', JsUtils.jsConvertData(n, None)) def hiddenDates(self, value: Union[(Any, primitives.JsDataModel)]): raise NotImplementedError() def horizontalScroll(self, flag: Union[(bool, primitives.JsDataModel)]): return self.attr('horizontalScroll', JsUtils.jsConvertData(flag, None))
def dataset_config(db: Session, connection_config: ConnectionConfig, dataset: Dict[(str, Any)]) -> DatasetConfig: fides_key = dataset['fides_key'] connection_config.name = fides_key connection_config.key = fides_key connection_config.save(db=db) ctl_dataset = CtlDataset.create_from_dataset_dict(db, dataset) dataset = DatasetConfig.create(db=db, data={'connection_config_id': connection_config.id, 'fides_key': fides_key, 'ctl_dataset_id': ctl_dataset.id}) return dataset
class PDSResponseDecryptor(BaseStantinkoDecryptor): def __init__(self): super(self.__class__, self).__init__() self.param = None def parse_response(self, response): try: data = base64.b64decode(response) except TypeError as e: self.errors.append(e) return param1 = b'g=' sep = b'&' param2 = b'p=' param3 = b'a=' marker = b'***' i1 = data.find(param1, 0) i2 = data.find(sep, i1) if (i2 == (- 1)): self.payload = '' return time = data[(i1 + len(param1)):i2] i3 = data.find(param2, (i2 + len(sep)), data.find(marker)) self.param = 'p' if (i3 == (- 1)): i3 = data.find(param3, (i2 + len(sep)), data.find(marker)) self.param = 'a' if (i3 == (- 1)): self.payload = '' return checksum = struct.unpack('<I', data[(i3 + 6):(i3 + 10)])[0] length = struct.unpack('<I', data[(i3 + 2):(i3 + 6)])[0] i4 = data.find(marker, ((i3 + len(self.param)) + 1)) cipher = data[(i4 + len(marker)):] headers_len = 8 self.name = data[(((i3 + len(self.param)) + 1) + headers_len):i4] data = self._decrypt(cipher, time) if (not ((zlib.crc32(data) & ) == checksum)): self.errors.append('Invalid checksum') if (not (len(data) == length)): self.errors.append('Invalid length') try: self.payload = zlib.decompress(data) except zlib.error as e: self.errors.append(e)
class OptionSeriesBulletSonificationDefaultspeechoptionsPointgrouping(Options): def algorithm(self): return self._config_get('last') def algorithm(self, text: str): self._config(text, js_type=False) def enabled(self): return self._config_get(True) def enabled(self, flag: bool): self._config(flag, js_type=False) def groupTimespan(self): return self._config_get(15) def groupTimespan(self, num: float): self._config(num, js_type=False) def prop(self): return self._config_get('y') def prop(self, text: str): self._config(text, js_type=False)
def path_put(out, path, value): (slot, path_rest) = _path_get_next_path_element(path) if (path_rest is None): if (slot is None): return if isinstance(out, list): assert isinstance(slot, int), path _vivify_array(out, slot, dict) out[slot] = value else: (next_slot, x) = _path_get_next_path_element(path_rest) if ((next_slot is not None) and isinstance(next_slot, int)): prototype = [] else: prototype = {} assert (slot is not None), path if (isinstance(out, MutableMapping) and (slot not in out)): out[slot] = prototype if isinstance(out, list): assert isinstance(slot, int), path _vivify_array(out, slot, type(prototype)) path_put(out[slot], path_rest, value)
class AITInterpreter(torch.fx.Interpreter): def __init__(self, module: torch.fx.GraphModule, input_specs: List[Union[(TensorSpec, List[TensorSpec])]], workdir: str, name: str, dll_name: str='test.so', dynamic_profile_strategy=DynamicProfileStrategy.MAX, profile_devs=None, use_fp16_acc=True, dump_ait_dir: Optional[str]=None, keep_constants: Optional[bool]=None, load_ait_dir: Optional[str]=None, remote_cache_file_path: Optional[str]=None, save_remote_cache: Optional[bool]=False, do_optimize_graph: bool=True, use_fast_math: bool=True, use_tanh_for_sigmoid: bool=False, profile_timeout: int=500, optimize_for_compilation_time: bool=False, allow_cutlass_sm90: bool=False, force_cutlass_sm90: bool=False): super().__init__(module) missing_ops = self.validate_conversion() if missing_ops: warnings.warn(('Interpretation will fail due to missing operations \n' + '\n'.join((f'{i}' for i in missing_ops)))) self.remote_cache_file_path = remote_cache_file_path self.save_remote_cache: bool = (True if (save_remote_cache and self.remote_cache_file_path) else False) self.remote_cache_bytes = self._load_profile_cache() if self.save_remote_cache: self.cache_dir = os.path.join(tempfile.mkdtemp(prefix='aitemplate_'), '.aitemplate') os.environ['CACHE_DIR'] = self.cache_dir _LOGGER.info(f'Set CACHE_DIR to {self.cache_dir}') self.use_fp16_acc = use_fp16_acc self.use_fast_math = use_fast_math self.use_tanh_for_sigmoid = use_tanh_for_sigmoid self.allow_cutlass_sm90 = allow_cutlass_sm90 self.force_cutlass_sm90 = force_cutlass_sm90 self.optimize_for_compilation_time = optimize_for_compilation_time self.hardware_target = self._create_target() self.input_specs = input_specs self.input_specs_iter = 0 self.workdir = workdir self.name = name self.dll_name = dll_name self.dynamic_profile_strategy = dynamic_profile_strategy self.profile_devs = profile_devs self._input_names: List[str] = [] self._input_dtypes: List[str] = [] self._output_names: List[str] = [] self._output_dtypes: List[str] = [] self._fx_input_names: List[str] = [] self._loaded_params: Dict[(str, AITTensor)] = {} self.dump_ait_dir = dump_ait_dir self.keep_constants = keep_constants self.load_ait_dir = load_ait_dir self.do_optimize_graph = do_optimize_graph self.profile_timeout = profile_timeout def _create_target(self): return detect_target(use_fp16_acc=self.use_fp16_acc, remote_cache_bytes=self.remote_cache_bytes, use_fast_math=self.use_fast_math, use_tanh_for_sigmoid=self.use_tanh_for_sigmoid, allow_cutlass_sm90=self.allow_cutlass_sm90, force_cutlass_sm90=self.force_cutlass_sm90, optimize_for_compilation_time=self.optimize_for_compilation_time) def _load_profile_cache(self) -> bytes: if (not self.remote_cache_file_path): return cache_bytes = io.BytesIO() cache.load_profile_cache(self.remote_cache_file_path, cache_bytes) remote_cache_bytes = cache_bytes.getvalue() _LOGGER.info(f'Loaded profile cache from remote: {self.remote_cache_file_path} with length {len(remote_cache_bytes)}') return remote_cache_bytes def _upload_profile_cache(self, hardware_target) -> None: cache_path = os.path.join(self.cache_dir, hardware_target._get_cache_file_name()) if ((not self.save_remote_cache) or (not cache_path)): return _LOGGER.info(f'Uploading profile cache to remote: {self.remote_cache_file_path}') cache.save_profile_cache(self.remote_cache_file_path, cache_path) _LOGGER.info(f'Upload AIT cache file to path {self.remote_cache_file_path} completed.') def validate_conversion(self): missing_converter = set() for node in self.module.graph.nodes: if ((node.op == 'call_function') and (not AIT_CONVERTERS.get(node.target))): missing_converter.add(f'{node.op} {_get_qualified_name(node.target)}') elif ((node.op == 'call_method') and (not AIT_CONVERTERS.get(node.target))): missing_converter.add(f'{node.op} torch.Tensor.{node.target}') elif (node.op == 'call_module'): submod = self.fetch_attr(node.target) submod_type = getattr(submod, '_base_class_origin', type(submod)) if (not AIT_CONVERTERS.get(submod_type)): missing_converter.add(f'{node.op} {torch.typename(submod_type)}') return missing_converter def run(self) -> AITInterpreterResult: run_module_start_time = datetime.now() output_tensors = super().run() _LOGGER.info(f'Run Module elapsed time: {(datetime.now() - run_module_start_time)}') profile_dir = (os.path.join(self.workdir, self.name[0:self.name.rindex('/')]) if (self.name.find('/') != (- 1)) else self.workdir) args = {'tensor': output_tensors, 'target': self.hardware_target, 'workdir': self.workdir, 'test_name': self.name, 'profile_devs': self.profile_devs, 'dynamic_profiling_strategy': self.dynamic_profile_strategy, 'dll_name': self.dll_name, 'profile_dir': profile_dir, 'do_optimize_graph': self.do_optimize_graph, 'profile_timeout': self.profile_timeout} if self.dump_ait_dir: dump_ait_path = os.path.join(self.dump_ait_dir, (self.name + '.py')) random_constants = (not self.keep_constants) dump_program(output_tensors, dump_ait_path, random_constants=random_constants) _LOGGER.info(f'Dumped AIT model to {dump_ait_path}') if self.load_ait_dir: load_ait_path = os.path.join(self.load_ait_dir, (self.name + '.py')) _LOGGER.info(f'Loaded AIT model from {load_ait_path}') (output_tensors, _) = get_program(load_ait_path) if isinstance(output_tensors, AITTensor): output_tensors = (output_tensors,) args['tensor'] = output_tensors self.engine = compile_model(**args) ait_input_names = [n._attrs['name'] for n in self.engine.debug_sorted_graph if n._attrs['is_input']] ait_input_dtypes = {n._attrs['name']: n._attrs['dtype'] for n in self.engine.debug_sorted_graph if n._attrs['is_input']} for name in ait_input_names: assert (self._fx_input_names.count(name) == 1), f"Cannot find AIT's compiled input: {name} in fx graph!" for name in self._fx_input_names: if (name in ait_input_names): self._input_names.append(name) self._input_dtypes.append(ait_input_dtypes[name]) for (i, input_name) in enumerate(self._fx_input_names): _LOGGER.info('Set input{}: {}'.format(i, input_name)) if (self.engine is None): raise RuntimeError('Engine is missing!') if self.save_remote_cache: self._upload_profile_cache(self.hardware_target) return AITInterpreterResult(self.engine, self._input_names, self._output_names, self._fx_input_names, self._input_dtypes, self._output_dtypes) def run_node(self, n): self._cur_node_name = str(n) return super().run_node(n) def placeholder(self, target, args, kwargs): input_spec = self.input_specs[self.input_specs_iter] self.input_specs_iter += 1 if isinstance(input_spec, List): ait_tensors = [] for (i, inp_spec) in enumerate(input_spec): target_name = f'{target}{ARG_SPLITTER_KEYWORD}{i}' self._fx_input_names.append(target_name) ait_tensors.append(AITTensor(shape=inp_spec.shape, dtype=dtype_to_str(inp_spec.dtype), name=target_name, is_input=True)) return ait_tensors elif (isinstance(input_spec, TensorSpec) or isinstance(input_spec, torch.Tensor)): self._fx_input_names.append(target) return AITTensor(shape=input_spec.shape, dtype=dtype_to_str(input_spec.dtype), name=target, is_input=True) else: raise AssertionError('Input spec must be a Tensor(Spec) or List of Tensor(Spec).') def get_attr(self, target, args, kwargs): attr_val = getattr_recursive(self.module, target) if (not isinstance(attr_val, (torch.Tensor, torch.nn.Parameter))): raise RuntimeError(f'Unexpected get_attr value for {target}: {attr_val}') ait_friendly_name = make_str_ait_friendly(target) ait_dtype = dtype_to_str(attr_val.dtype) ait_val = attr_val.contiguous() if (ait_friendly_name in self._loaded_params): existing_tensor = self._loaded_params[ait_friendly_name] assert (existing_tensor._attrs['dtype'] == ait_dtype) assert (existing_tensor._attrs['data'].tensor == ait_val) return existing_tensor data = _TorchConstantTensorData(ait_val) tensor = AITTensor(shape=attr_val.shape, dtype=ait_dtype, name=ait_friendly_name, original_name=target) tensor._bind_data(data) self._loaded_params[ait_friendly_name] = tensor return tensor def call_function(self, target, args, kwargs): converter = AIT_CONVERTERS.get(target) if (not converter): raise RuntimeError(f'Conversion of function {torch.typename(target)} not currently supported!') assert (self._cur_node_name is not None) return converter(target, args, kwargs, self._cur_node_name) def call_method(self, target, args, kwargs): assert isinstance(target, str) converter = AIT_CONVERTERS.get(target) if (not converter): raise RuntimeError(f'Conversion of method {target} not currently supported!') assert (self._cur_node_name is not None) return converter(target, args, kwargs, self._cur_node_name) def call_module(self, target, args, kwargs): assert isinstance(target, str) submod = self.fetch_attr(target) submod_type = getattr(submod, '_base_class_origin', type(submod)) converter = AIT_CONVERTERS.get(submod_type) if (not converter): raise RuntimeError(f'Conversion of module of type {submod_type} not currently supported!') assert (self._cur_node_name is not None) return converter(target, submod, args, kwargs, self._cur_node_name) def output(self, target, args, kwargs): assert (len(args) == 1) if isinstance(args[0], tuple): outputs = args[0] elif isinstance(args[0], list): outputs = tuple(args[0]) else: outputs = (args[0],) for (i, output) in enumerate(outputs): name = f'output_{i}' output._attrs['name'] = name output._attrs['is_output'] = True self._output_names.append(name) self._output_dtypes.append(output._attrs['dtype']) return outputs
class WebServerValidator(metaclass=Singleton): def __init__(self): self.request_handler = RequestHandler() def validate_target_webserver(self, host): try: self.request_handler.send('GET', timeout=20, url='{}://{}:{}'.format(host.protocol, host.target, host.port)) return True except RequestHandlerException: raise WebServerValidatorException