code
stringlengths
281
23.7M
def test_while_exec_iteration_stop_evals_true(): wd = WhileDecorator({'stop': '{stop}'}) context = Context({'stop': True}) mock = MagicMock() assert wd.exec_iteration(2, context, mock) assert (context['whileCounter'] == 2) assert (wd.while_counter == 2) assert (len(context) == 2) mock.assert_called_once_with({'stop': True, 'whileCounter': 2})
def run_step(context): logger.debug('started') context.assert_key_has_value(key='pathCheck', caller=__name__) paths_to_check = context['pathCheck'] if (not paths_to_check): raise KeyInContextHasNoValueError(f"context['pathCheck'] must have a value for {__name__}.") if isinstance(paths_to_check, list): check_me = paths_to_check else: check_me = [paths_to_check] out = {} total_found = 0 for path in check_me: logger.debug('checking path: %s', path) formatted_path = context.get_formatted_value(path) found_paths = pypyr.utils.filesystem.get_glob(formatted_path) no_of_paths = len(found_paths) out[path] = {'exists': (no_of_paths > 0), 'count': no_of_paths, 'found': found_paths} total_found = (total_found + no_of_paths) context['pathCheckOut'] = out logger.info(f'checked {len(out)} path(s) and found {total_found}') logger.debug('done')
class DecodedCorpus(): def __init__(self, path_decoded_doc2sents): self.path_decoded_doc2sents = Path(path_decoded_doc2sents) self.decoded_doc2sents = utils.Json.load(path_decoded_doc2sents) def dump_html(self): path_output = self.path_decoded_doc2sents.with_name(self.path_decoded_doc2sents.name.replace('doc2sents', 'html')).with_suffix('.html') html_lines = [] for (doc, sents) in self.decoded_doc2sents.items(): html_lines.append(f'DOC {doc}') for sent in sents: tokens = sent['tokens'] for (l, r, _) in sent['spans']: tokens[l] = (consts.HTML_BP + tokens[l]) tokens[r] = ((tokens[r] + consts.HTML_EP) + ' |') html_lines.append(consts.roberta_tokens_to_str(tokens)) html_lines = [f'<p>{line}<p>' for line in html_lines] utils.TextFile.dumplist(html_lines, path_output)
def test(): spi = SPI(1, baudrate=, sck=Pin(14), mosi=Pin(13)) display = Display(spi, dc=Pin(4), cs=Pin(16), rst=Pin(17)) print('Loading fonts...') print('Loading arcadepix') arcadepix = XglcdFont('fonts/ArcadePix9x11.c', 9, 11) print('Loading bally') bally = XglcdFont('fonts/Bally7x9.c', 7, 9) print('Loading broadway') broadway = XglcdFont('fonts/Broadway17x15.c', 17, 15) print('Loading espresso_dolce') espresso_dolce = XglcdFont('fonts/EspressoDolce18x24.c', 18, 24) print('Loading fixed_font') fixed_font = XglcdFont('fonts/FixedFont5x8.c', 5, 8) print('Loading neato') neato = XglcdFont('fonts/Neato5x7.c', 5, 7, letter_count=223) print('Loading robotron') robotron = XglcdFont('fonts/Robotron13x21.c', 13, 21) print('Loading unispace') unispace = XglcdFont('fonts/Unispace12x24.c', 12, 24) print('Loading wendy') wendy = XglcdFont('fonts/Wendy7x8.c', 7, 8) print('Fonts loaded.') display.draw_text(0, 0, 'Arcade Pix 9x11', arcadepix, color565(255, 0, 0)) display.draw_text(0, 22, 'Bally 7x9', bally, color565(0, 255, 0)) display.draw_text(0, 43, 'Broadway 17x15', broadway, color565(0, 0, 255)) display.draw_text(0, 66, 'Espresso Dolce 18x24', espresso_dolce, color565(0, 255, 255)) display.draw_text(0, 104, 'Fixed Font 5x8', fixed_font, color565(255, 0, 255)) display.draw_text(0, 125, 'Neato 5x7', neato, color565(255, 255, 0)) display.draw_text(0, 155, 'ROBOTRON 13X21', robotron, color565(255, 255, 255)) display.draw_text(0, 190, 'Unispace 12x24', unispace, color565(255, 128, 0)) display.draw_text(0, 220, 'Wendy 7x8', wendy, color565(255, 0, 128)) sleep(9) display.clear() display.draw_text(0, 255, 'Arcade Pix 9x11', arcadepix, color565(255, 0, 0), landscape=True) display.draw_text(22, 255, 'Bally 7x9', bally, color565(0, 255, 0), landscape=True) display.draw_text(43, 255, 'Broadway 17x15', broadway, color565(0, 0, 255), landscape=True) display.draw_text(66, 255, 'Espresso Dolce 18x24', espresso_dolce, color565(0, 255, 255), landscape=True) display.draw_text(104, 255, 'Fixed Font 5x8', fixed_font, color565(255, 0, 255), landscape=True) display.draw_text(125, 255, 'Neato 5x7', neato, color565(255, 255, 0), landscape=True) display.draw_text(155, 255, 'ROBOTRON 13X21', robotron, color565(255, 255, 255), landscape=True) display.draw_text(190, 255, 'Unispace 12x24', unispace, color565(255, 128, 0), landscape=True) display.draw_text(220, 255, 'Wendy 7x8', wendy, color565(255, 0, 128), landscape=True) sleep(9) display.clear() display.draw_text(0, 0, 'Arcade Pix 9x11', arcadepix, color565(255, 0, 0), background=color565(0, 255, 255)) display.draw_text(0, 22, 'Bally 7x9', bally, color565(0, 255, 0), background=color565(0, 0, 128)) display.draw_text(0, 43, 'Broadway', broadway, color565(0, 0, 255), background=color565(255, 255, 0)) display.draw_text(0, 66, 'Espresso', espresso_dolce, color565(0, 255, 255), background=color565(255, 0, 0)) display.draw_text(0, 104, 'Fixed Font 5x8', fixed_font, color565(255, 0, 255), background=color565(0, 128, 0)) display.draw_text(0, 125, 'Neato 5x7', neato, color565(255, 255, 0), background=color565(0, 0, 255)) display.draw_text(0, 155, 'ROBOTRON 13X21', robotron, color565(255, 255, 255), background=color565(128, 128, 128)) display.draw_text(0, 190, 'Unispace', unispace, color565(255, 128, 0), background=color565(0, 128, 255)) display.draw_text(0, 220, 'Wendy 7x8', wendy, color565(255, 0, 128), background=color565(255, 255, 255)) sleep(9) display.cleanup()
class Multitask_Iterator_Wrapper(): def __init__(self, multitask_dataloader): self.multitask_dataloader = multitask_dataloader self.dataloaders = OrderedDict([(k, iter(x)) for (k, x) in self.multitask_dataloader.items()]) self._index = 0 self.max_n_iters = min([len(x) for (k, x) in self.dataloaders.items()]) def __iter__(self): return self def reset(self): self.dataloaders = OrderedDict([(k, iter(x)) for (k, x) in self.multitask_dataloader.items()]) self._index = 0 self.max_n_iters = min([len(x) for (k, x) in self.dataloaders.items()]) def __next__(self): if (self._index < self.max_n_iters): data_batch = dict() for (task, dataloader_iter) in self.dataloaders.items(): batch_per_task = next(dataloader_iter) data_batch[task] = batch_per_task self._index += 1 return data_batch else: raise StopIteration def __len__(self): return self.max_n_iters
.parametrize('enabled_extra', ['one', 'two', None]) def test_solver_returns_extras_when_multiple_extras_use_same_dependency(solver: Solver, repo: Repository, package: ProjectPackage, enabled_extra: (bool | None)) -> None: package.add_dependency(Factory.create_dependency('A', '*')) package_a = get_package('A', '1.0') package_b = get_package('B', '1.0') package_c = get_package('C', '1.0') dep = get_dependency('C', '*', optional=True) dep._in_extras.append(canonicalize_name('one')) dep._in_extras.append(canonicalize_name('two')) package_b.extras = {canonicalize_name('one'): [dep], canonicalize_name('two'): [dep]} package_b.add_dependency(dep) extras = ([enabled_extra] if (enabled_extra is not None) else []) package_a.add_dependency(Factory.create_dependency('B', {'version': '*', 'extras': extras})) repo.add_package(package_a) repo.add_package(package_b) repo.add_package(package_c) transaction = solver.solve() expected = [{'job': 'install', 'package': package_b}, {'job': 'install', 'package': package_a}] if (enabled_extra is not None): expected.insert(0, {'job': 'install', 'package': package_c}) ops = check_solver_result(transaction, expected) assert ops[(- 1)].package.marker.is_any() assert ops[0].package.marker.is_any()
class TestNearestUnequalElements(ZiplineTestCase): _space(tz=['UTC', 'US/Eastern'], __fail_fast=True) def test_nearest_unequal_elements(self, tz): dts = pd.to_datetime(['2014-01-01', '2014-01-05', '2014-01-06', '2014-01-09']).tz_localize(tz) def t(s): return (None if (s is None) else pd.Timestamp(s, tz=tz)) for (dt, before, after) in (('2013-12-30', None, '2014-01-01'), ('2013-12-31', None, '2014-01-01'), ('2014-01-01', None, '2014-01-05'), ('2014-01-02', '2014-01-01', '2014-01-05'), ('2014-01-03', '2014-01-01', '2014-01-05'), ('2014-01-04', '2014-01-01', '2014-01-05'), ('2014-01-05', '2014-01-01', '2014-01-06'), ('2014-01-06', '2014-01-05', '2014-01-09'), ('2014-01-07', '2014-01-06', '2014-01-09'), ('2014-01-08', '2014-01-06', '2014-01-09'), ('2014-01-09', '2014-01-06', None), ('2014-01-10', '2014-01-09', None), ('2014-01-11', '2014-01-09', None)): computed = nearest_unequal_elements(dts, t(dt)) expected = (t(before), t(after)) self.assertEqual(computed, expected) _space(tz=['UTC', 'US/Eastern'], __fail_fast=True) def test_nearest_unequal_elements_short_dts(self, tz): dts = pd.to_datetime(['2014-01-01']).tz_localize(tz) def t(s): return (None if (s is None) else pd.Timestamp(s, tz=tz)) for (dt, before, after) in (('2013-12-31', None, '2014-01-01'), ('2014-01-01', None, None), ('2014-01-02', '2014-01-01', None)): computed = nearest_unequal_elements(dts, t(dt)) expected = (t(before), t(after)) self.assertEqual(computed, expected) dts = pd.to_datetime([]).tz_localize(tz) for (dt, before, after) in (('2013-12-31', None, None), ('2014-01-01', None, None), ('2014-01-02', None, None)): computed = nearest_unequal_elements(dts, t(dt)) expected = (t(before), t(after)) self.assertEqual(computed, expected) def test_nearest_unequal_bad_input(self): with self.assertRaises(ValueError) as e: nearest_unequal_elements(pd.to_datetime(['2014', '2014']), pd.Timestamp('2014')) self.assertEqual(str(e.exception), 'dts must be unique') with self.assertRaises(ValueError) as e: nearest_unequal_elements(pd.to_datetime(['2014', '2013']), pd.Timestamp('2014')) self.assertEqual(str(e.exception), 'dts must be sorted in increasing order')
class UnBan(ScrimsButton): def __init__(self): super().__init__(label='Unban Users', style=discord.ButtonStyle.green) async def callback(self, interaction: discord.Interaction): (await interaction.response.defer()) if (not (banned_teams := (await self.view.record.banned_teams.order_by('id')))): return (await self.view.ctx.error('No banned user found.', 5)) v = QuotientView(self.view.ctx) for chunk in get_chunks(banned_teams, 25): v.add_item(BanSelector(self.view.ctx, chunk)) v.message = (await interaction.followup.send('', view=v, ephemeral=True)) (await v.wait()) if v.custom_id: banlog = (await BanLog.get_or_none(guild_id=interaction.guild_id)) for b in v.custom_id: slot = (await BannedTeam.get_or_none(pk=b)) if (not slot): continue (await slot.delete()) if banlog: (await banlog.log_unban(slot.user_id, self.view.ctx.author, [self.view.record], '```No reason given```')) (await self.view.ctx.success(f'Successfully unbanned `{plural(v.custom_id):user|users}`.', 6)) return (await self.view.refresh_view())
def test_set_as_str_things(echoes_resource_database): item = echoes_resource_database.get_item_by_name req_set = RequirementSet([RequirementList([ResourceRequirement.simple(item('Screw Attack')), ResourceRequirement.simple(item('Space Jump Boots'))]), RequirementList([ResourceRequirement.simple(item('Power Bomb'))])]) assert (req_set.as_str == '(Power Bomb 1) or (Screw Attack 1, Space Jump Boots 1)')
def _warn_incompatibility_with_xunit2(request: FixtureRequest, fixture_name: str) -> None: from _pytest.warning_types import PytestWarning xml = request.config.stash.get(xml_key, None) if ((xml is not None) and (xml.family not in ('xunit1', 'legacy'))): request.node.warn(PytestWarning("{fixture_name} is incompatible with junit_family '{family}' (use 'legacy' or 'xunit1')".format(fixture_name=fixture_name, family=xml.family)))
def test_is_same_crs(): crs1 = CRS({'init': 'epsg:4326'}) crs2 = CRS({'init': 'epsg:3857'}) assert (crs1 == crs1) assert (crs1 != crs2) wgs84_crs = CRS.from_string('+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs') assert (crs1 == wgs84_crs) lcc_crs1 = CRS.from_string('+lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=True +proj=lcc +x_0=0 +units=m +lat_2=77 +lat_1=49 +lat_0=0') lcc_crs2 = CRS.from_string('+lon_0=-95 +ellps=GRS80 +y_0=0 +no_defs=True +proj=lcc +x_0=0 +units=m +lat_2=77 +lat_1=45 +lat_0=0') assert (lcc_crs1 != lcc_crs2)
def target_from_node(module: str, node: ((FuncDef | MypyFile) | OverloadedFuncDef)) -> (str | None): if isinstance(node, MypyFile): if (module != node.fullname): return None return module elif node.info: return f'{node.info.fullname}.{node.name}' else: return f'{module}.{node.name}'
def init_tokenizer(): tokenizer = BertTokenizer.from_pretrained('bert-base-uncased') tokenizer.add_special_tokens({'bos_token': '[DEC]'}) tokenizer.add_special_tokens({'additional_special_tokens': ['[ENC]']}) tokenizer.enc_token_id = tokenizer.additional_special_tokens_ids[0] return tokenizer
class MqlLexer(CppLexer): name = 'MQL' aliases = ['mql', 'mq4', 'mq5', 'mql4', 'mql5'] filenames = ['*.mq4', '*.mq5', '*.mqh'] mimetypes = ['text/x-mql'] version_added = '2.0' tokens = {'statements': [(words(_mql_builtins.keywords, suffix='\\b'), Keyword), (words(_mql_builtins.c_types, suffix='\\b'), Keyword.Type), (words(_mql_builtins.types, suffix='\\b'), Name.Function), (words(_mql_builtins.constants, suffix='\\b'), Name.Constant), (words(_mql_builtins.colors, prefix='(clr)?', suffix='\\b'), Name.Constant), inherit]}
def get_string(prompt, default=None, none_ok=False): full_prompt = prompt if default: full_prompt += ' [{}]'.format(default) if none_ok: full_prompt += ' [enter "none" to clear]' full_prompt += ' ' answer = input(full_prompt) if (answer == ''): answer = default if (answer == 'none'): answer = '' if (answer is None): return get_string(prompt, default) return answer
class SARIce(GenericCompositor): def __call__(self, projectables, *args, **kwargs): (mhh, mhv) = projectables ch1attrs = mhh.attrs ch2attrs = mhv.attrs mhh = (np.sqrt((mhh + 0.002)) - 0.04) mhv = (np.sqrt((mhv + 0.002)) - 0.04) mhh.attrs = ch1attrs mhv.attrs = ch2attrs green = (overlay(mhh, mhv, 30) * 1000) green.attrs = combine_metadata(mhh, mhv) return super(SARIce, self).__call__((mhv, green, mhh), *args, **kwargs)
(name='i18n-compile') def i18n_compile(ctx): env = create_env('build', requirements=True) dest_dir = os.path.join(BASE, PROJECT, 'i18n') orig_dir = os.path.join(BASE, 'i18n', 'langs') os.makedirs(dest_dir, exist_ok=True) for lang in i18n_available(): invoke.run(('%s/bin/pybabel compile -i %s/%s.po -o %s/%s.mo -l %s' % (env, orig_dir, lang, dest_dir, lang, lang)))
def login_with_token(client: GMatrixClient, user_id: str, access_token: str) -> Optional[User]: client.set_access_token(user_id=user_id, token=access_token) try: client.api.get_devices() except MatrixRequestError as ex: log.debug("Couldn't use previous login credentials", node=node_address_from_userid(client.user_id), prev_user_id=user_id, _exception=ex) return None log.debug('Success. Valid previous credentials', node=node_address_from_userid(client.user_id), user_id=user_id) return client.get_user(client.user_id)
def run_on_leader(pg: dist.ProcessGroup, rank: int): def callable(func: Callable[(..., T)]) -> T: (func) def wrapped(*args: Any, **kwargs: Any) -> T: return invoke_on_rank_and_broadcast_result(pg, rank, func, *args, **kwargs) return wrapped return callable
class PaneConfig(): def __init__(self, row_pattern): parts = [p.replace('\\:', ':') for p in re.split('(?<!\\\\):', row_pattern)] def is_numeric(s): return ((s[:2] == '~#') and ('~' not in s[2:])) def is_pattern(s): return ('<' in s) def f_round(s): return ((isinstance(s, float) and ('%.2f' % s)) or s) def is_date(s): return (s in TIME_TAGS) disp = (parts[1] if (len(parts) >= 2) else "[i][span alpha='40%']<~#tracks>[/span][/i]") cat = parts[0] if is_pattern(cat): title = util.pattern(cat, esc=True, markup=True) try: pc = XMLFromPattern(cat) except ValueError: pc = XMLFromPattern('') tags = pc.tags format = pc.format_list has_markup = True else: title = util.tag(cat) tags = util.tagsplit(cat) has_markup = False if is_date(cat): def format(song: AudioFile) -> list[tuple[(str, str)]]: fmt = config.gettext('settings', 'datecolumn_timestamp_format') date_str = format_date(song(cat), fmt) return [(date_str, date_str)] elif is_numeric(cat): def format(song: AudioFile) -> list[tuple[(str, str)]]: v = str(f_round(song(cat))) return [(v, v)] else: def format(song: AudioFile) -> list[tuple[(str, str)]]: return song.list_separate(cat) if is_pattern(disp): try: pd = XMLFromPattern(disp) except ValueError: pd = XMLFromPattern('') format_display = pd.format elif is_numeric(disp): def format_display(coll): return str(f_round(coll(disp))) else: def format_display(coll): return util.escape(coll.comma(disp)) self.title = title self.tags = set(tags) self.format = format self.format_display = format_display self.has_markup = has_markup def __repr__(self): return f'<{self.__class__.__name__} title={self.title!r} tags={self.tags!r}>'
class Effect1264(BaseEffect): type = 'passive' def handler(fit, ship, context, projectionRange, **kwargs): fit.modules.filteredItemBoost((lambda mod: mod.item.requiresSkill('Small Hybrid Turret')), 'trackingSpeed', ship.getModifiedItemAttr('eliteBonusInterceptor2'), skill='Interceptors', **kwargs)
class TestWaitForRequest(BaseTestCase): async def test_wait_for_request(self): (await self.page.goto((self.url + 'empty'))) results = (await asyncio.gather(self.page.waitForRequest((self.url + 'static/digits/2.png')), self.page.evaluate("() => {\n fetch('/static/digits/1.png');\n fetch('/static/digits/2.png');\n fetch('/static/digits/3.png');\n }"))) request = results[0] self.assertEqual(request.url, (self.url + 'static/digits/2.png')) async def test_predicate(self): (await self.page.goto((self.url + 'empty'))) def predicate(req): return (req.url == (self.url + 'static/digits/2.png')) results = (await asyncio.gather(self.page.waitForRequest(predicate), self.page.evaluate("() => {\n fetch('/static/digits/1.png');\n fetch('/static/digits/2.png');\n fetch('/static/digits/3.png');\n }"))) request = results[0] self.assertEqual(request.url, (self.url + 'static/digits/2.png')) async def test_no_timeout(self): (await self.page.goto((self.url + 'empty'))) results = (await asyncio.gather(self.page.waitForRequest((self.url + 'static/digits/2.png'), timeout=0), self.page.evaluate("() => setTimeout(() => {\n fetch('/static/digits/1.png');\n fetch('/static/digits/2.png');\n fetch('/static/digits/3.png');\n }, 50)"))) request = results[0] self.assertEqual(request.url, (self.url + 'static/digits/2.png'))
def tokenize_caption(input_json: str, keep_punctuation: bool=False, host_address: str=None, character_level: bool=False, zh: bool=False, output_json: str=None): data = json.load(open(input_json, 'r'))['audios'] if zh: from nltk.parse.corenlp import CoreNLPParser from zhon.hanzi import punctuation parser = CoreNLPParser(host_address) for audio_idx in tqdm(range(len(data)), leave=False, ascii=True): for cap_idx in range(len(data[audio_idx]['captions'])): caption = data[audio_idx]['captions'][cap_idx]['caption'] if (not keep_punctuation): caption = re.sub('[{}]'.format(punctuation), '', caption) if character_level: tokens = list(caption) else: tokens = list(parser.tokenize(caption)) data[audio_idx]['captions'][cap_idx]['tokens'] = ' '.join(tokens) else: from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer captions = {} for audio_idx in range(len(data)): audio_id = data[audio_idx]['audio_id'] captions[audio_id] = [] for cap_idx in range(len(data[audio_idx]['captions'])): caption = data[audio_idx]['captions'][cap_idx]['caption'] captions[audio_id].append({'audio_id': audio_id, 'id': cap_idx, 'caption': caption}) tokenizer = PTBTokenizer() captions = tokenizer.tokenize(captions) for audio_idx in tqdm(range(len(data)), leave=False, ascii=True): audio_id = data[audio_idx]['audio_id'] for cap_idx in range(len(data[audio_idx]['captions'])): tokens = captions[audio_id][cap_idx] data[audio_idx]['captions'][cap_idx]['tokens'] = tokens if output_json: json.dump({'audios': data}, open(output_json, 'w'), indent=4, ensure_ascii=(not zh)) else: json.dump({'audios': data}, open(input_json, 'w'), indent=4, ensure_ascii=(not zh))
def dsystem_dt(request): sys = rss(3, 1, 1) A = [[(- 3.0), 4.0, 2.0], [(- 1.0), (- 3.0), 0.0], [2.0, 5.0, 3.0]] B = [[1.0, 4.0], [(- 3.0), (- 3.0)], [(- 2.0), 1.0]] C = [[4.0, 2.0, (- 3.0)], [1.0, 4.0, 3.0]] D = [[(- 2.0), 4.0], [0.0, 1.0]] dt = request.param systems = {'sssiso': StateSpace(sys.A, sys.B, sys.C, sys.D, dt), 'ssmimo': StateSpace(A, B, C, D, dt), 'tf': TransferFunction([2, 1], [2, 1, 1], dt)} return systems
def colorForName(name): list = [('c1', '#ec9999'), ('c2', '#ffc1a6'), ('c3', '#fff0a6'), ('c4', '#adf199'), ('c5', '#9fadea'), ('c6', '#a699c1'), ('c7', '#ad99b4'), ('c8', '#eaffea'), ('c9', '#dcecfb'), ('c10', '#ffffea')] i = 0 total = 0 count = len(list) while (i < len(name)): total += ord(name[i]) i += 1 return list[(total % count)]
def filed_based_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer, output_file): writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ((ex_index % 10000) == 0): tf.logging.info(('Writing example %d of %d' % (ex_index, len(examples)))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features['input_ids'] = create_int_feature(feature.input_ids) features['input_mask'] = create_int_feature(feature.input_mask) features['segment_ids'] = create_int_feature(feature.segment_ids) features['label_ids'] = create_int_feature([feature.label_id]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString())
class Scope(): def __init__(self, ctx): self.ctx = ctx self._locals = [] self._parent_locals = [] def add_reference(self, ref): self._locals.append(ref) def add_parent_reference(self, ref): new_ref = self.ctx.cache.process_pool.save(ref) if (self.ctx.cache.named_pool is not None): self.ctx.cache.named_pool.save(new_ref) self._parent_locals.append(new_ref) self._parent_locals.append(ref) return new_ref def destroy(self, local_references_only=False): ctx = self.ctx local_refs = self._locals parent_refs = self._parent_locals del self.ctx del self._locals del self._parent_locals for ref in local_refs: ref._destructor() if local_references_only: return parent_refs for ref in parent_refs: ref._destructor() ctx.cache.garbage_collection() return []
def read_nyt(id_json): f = open(id_json, 'r') ids = f.readlines() f.close() print(ids[:2]) f = open(label_f, 'r') label_vocab_s = f.readlines() f.close() label_vocab = [] for label in label_vocab_s: label = label.strip() label_vocab.append(label) id_list = [] for i in ids: id_list.append(int(i[13:(- 5)])) print(id_list[:2]) corpus = [] for file_name in tqdm(ids): xml_path = file_name.strip() try: sample = {} dom = xml.dom.minidom.parse(xml_path) root = dom.documentElement tags = root.getElementsByTagName('p') text = '' for tag in tags[1:]: text += tag.firstChild.data if (text == ''): continue source.append(text) sample_label = [] tags = root.getElementsByTagName('classifier') for tag in tags: type = tag.getAttribute('type') if (type != 'taxonomic_classifier'): continue hier_path = tag.firstChild.data hier_list = hier_path.split('/') if (len(hier_list) < 3): continue for l in range(1, (len(hier_list) + 1)): label = '/'.join(hier_list[:l]) if (label == 'Top'): continue if ((label not in sample_label) and (label in label_vocab)): sample_label.append(label) labels.append(sample_label) sentence_ids.append(file_name) sample['doc_topic'] = [] sample['doc_keyword'] = [] corpus.append(sample) except AssertionError: print(xml_path) print('Something went wrong...') continue
.parametrize('dynamic', [False, True]) def test_control_dict_str_map(dynamic): class Fake(FakeBase): x = CommonBase.control('', '%d', '', validator=strict_discrete_set, values={'X': 1, 'Y': 2, 'Z': 3}, map_values=True, dynamic=dynamic) fake = Fake() fake.x = 'X' assert (fake.read() == '1') fake.x = 'Y' assert (fake.x == 'Y') fake.x = 'Z' assert (fake.read() == '3')
class GetPass(object): def _unix_getch(self): fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) try: tty.setraw(sys.stdin.fileno()) ch = sys.stdin.read(1) finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) return ch def _getuser(self, prompt='Username : '): if (sys.version_info[:2] >= (3, 0)): sys.stdout.write('{}'.format(prompt)) sys.stdout.flush() username = input() else: sys.stdout.write('{}'.format(prompt)) sys.stdout.flush() username = raw_input() return username def _getpass(self, prompt='Password : '): sys.stdout.write('{}'.format(prompt)) sys.stdout.flush() pw = '' while True: c = (_win_getch() if (os.name == 'nt') else self._unix_getch()) if (os.name == 'nt'): if (ord(c) == 13): break if (ord(c) == 3): raise KeyboardInterrupt if (ord(c) == 8): if (len(pw) > 0): pw = pw[:(- 1)] s = ('*' * len(pw)) sys.stdout.write('\x1b[2K\x1b[1G') sys.stdout.flush() sys.stdout.write('\r\r\r{}{}'.format(prompt, s)) sys.stdout.flush() else: pass elif (ord(c) == 27): pass elif (ord(c) == 224): c = _win_getch() pass else: if (sys.version_info[:2] >= (3, 0)): pw = (pw + c.decode('utf-8')) else: pw = (pw + c) sys.stdout.write('*') sys.stdout.flush() else: if (ord(c) == 13): break if (ord(c) == 3): raise KeyboardInterrupt if (ord(c) == 127): if (len(pw) > 0): pw = pw[:(- 1)] s = ('*' * len(pw)) sys.stdout.write('\x1b[2K\x1b[1G') sys.stdout.flush() sys.stdout.write('\r\r\r{}{}'.format(prompt, s)) sys.stdout.flush() else: pass elif (ord(c) == 27): pass elif ((ord(c) == 91) or (ord(c) == 27)): c = self._unix_getch() pass else: pw = (pw + c) sys.stdout.write('*') sys.stdout.flush() return pw
def macos_kernel_api(params: Mapping[(str, Any)]={}, passthru: bool=False): def decorator(func): def wrapper(ql: Qiling, pc: int, api_name: str): onenter = ql.os.user_defined_api[QL_INTERCEPT.ENTER].get(api_name) onexit = ql.os.user_defined_api[QL_INTERCEPT.EXIT].get(api_name) return ql.os.call(pc, func, params, onenter, onexit, passthru=passthru) return wrapper return decorator
def ConvertPatchMatrix(matrix, patch_number): (h, w) = (matrix.shape[0], matrix.shape[1]) (sub_h, sub_w) = ((h / patch_number), (w / patch_number)) new_matrix = np.zeros((patch_number, patch_number), dtype=float) for i in range(patch_number): for j in range(patch_number): new_matrix[i][j] = np.sum(matrix[int((i * sub_h)):int(((i + 1) * sub_h))][int((j * sub_w)):int(((j + 1) * sub_w))]) return new_matrix
def test_file_remote(testdir): key = 'goog:chromeOptions' capabilities = {'browserName': 'chrome', key: {'args': ['foo']}} variables = testdir.makefile('.json', '{{"capabilities": {}}}'.format(json.dumps(capabilities))) file_test = testdir.makepyfile("\n import pytest\n .nondestructive\n def test_capabilities(session_capabilities, capabilities):\n assert session_capabilities['{0}']['args'] == ['foo']\n assert capabilities['{0}']['args'] == ['foo']\n ".format(key)) testdir.quick_qa('--driver', 'Remote', '--variables', variables, file_test, passed=1)
def parse_date_or_none(date_string, delta=None, dayfirst=True, **timedelta_kwargs): if ((not isinstance(date_string, str)) or (not date_string)): return None if (delta and (delta not in ('positive', 'negative'))): raise ValueError("Invalid delta option. Options are 'positive' or 'negative'") try: parsed_date = parse(date_string, dayfirst=dayfirst) except (ValueError, OverflowError): return None if (delta == 'negative'): parsed_date = (parsed_date - datetime.timedelta(**timedelta_kwargs)) elif (delta == 'positive'): parsed_date = (parsed_date + datetime.timedelta(**timedelta_kwargs)) return timezone.make_aware(parsed_date)
def test_mkdm_simple_repr(): dm = data.mkdm(matrix=[[1, 2, 3], [4, 5, 6], [7, 8, 9]], objectives=[min, max, min], weights=[0.1, 0.2, 0.3]) expected = ' C0[ 0.1] C1[ 0.2] C2[ 0.3]\nA0 1 2 3\nA1 4 5 6\nA2 7 8 9\n[3 Alternatives x 3 Criteria]' result = repr(dm) assert (result == expected)
def _get_layer_output_shape(layer: tf.keras.layers) -> Tuple: output_activation_shape = list(layer.output_shape) if (len(output_activation_shape) == 4): reorder = [0, 3, 1, 2] output_activation_shape = [output_activation_shape[idx] for idx in reorder] if isinstance(layer, tf.keras.layers.Dense): output_activation_shape.extend([1, 1]) return tuple(output_activation_shape)
class TestPollBase(): id_ = 'id' question = 'Test?' options = [PollOption('test', 10), PollOption('test2', 11)] total_voter_count = 0 is_closed = True is_anonymous = False type = Poll.REGULAR allows_multiple_answers = True explanation = b'\\U0001f469\\u200d\\U0001f469\\u200d\\U0001f467\\u200d\\U0001f467\\U0001f431 explanation_entities = [MessageEntity(13, 17, MessageEntity.URL)] open_period = 42 close_date = datetime.now(timezone.utc)
class TestSlabInfoCollector(CollectorTestCase): def setUp(self): config = get_collector_config('SlabInfoCollector', {'interval': 1}) self.collector = SlabInfoCollector(config, None) def test_import(self): self.assertTrue(SlabInfoCollector) ('__builtin__.open') ('os.access', Mock(return_value=True)) (Collector, 'publish') def test_should_open_proc_stat(self, publish_mock, open_mock): open_mock.return_value = StringIO('') self.collector.collect() open_mock.assert_called_once_with('/proc/slabinfo', 'r') (Collector, 'publish') def test_should_work_with_real_data(self, publish_mock): SlabInfoCollector.PROC = self.getFixturePath('slabinfo') self.collector.collect() metrics = self.getPickledResults('expected.pkl') self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) self.assertPublishedMany(publish_mock, metrics)
class PdbInvoke(): def pytest_exception_interact(self, node: Node, call: 'CallInfo[Any]', report: BaseReport) -> None: capman = node.config.pluginmanager.getplugin('capturemanager') if capman: capman.suspend_global_capture(in_=True) (out, err) = capman.read_global_capture() sys.stdout.write(out) sys.stdout.write(err) assert (call.excinfo is not None) if (not isinstance(call.excinfo.value, unittest.SkipTest)): _enter_pdb(node, call.excinfo, report) def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None: tb = _postmortem_traceback(excinfo) post_mortem(tb)
def coriolis_sideinfo_simple(qp: QP, env_sys: System): dp_j = env_sys.joint_revolute.apply(qp) dp_j += env_sys.joint_universal.apply(qp) dp_j += env_sys.joint_spherical.apply(qp) dp_vel = (((env_sys.config.velocity_damping * qp.vel) + (dp_j.vel + vec_to_np(env_sys.config.gravity))) * env_sys.active_pos) dp_ang = (((env_sys.config.angular_damping * qp.ang) + dp_j.ang) * env_sys.active_rot) return (dp_vel, dp_ang)
class CmdCopy(ObjManipCommand): key = 'copy' switch_options = ('reset',) locks = 'cmd:perm(copy) or perm(Builder)' help_category = 'Building' def func(self): caller = self.caller args = self.args if (not args): caller.msg('Usage: copy <obj> [=<new_name>[;alias;alias..]][:<new_location>] [, <new_name2>...]') return if (not self.rhs): from_obj_name = self.args from_obj = caller.search(from_obj_name) if (not from_obj): return to_obj_name = ('%s_copy' % from_obj_name) to_obj_aliases = [('%s_copy' % alias) for alias in from_obj.aliases.all()] copiedobj = ObjectDB.objects.copy_object(from_obj, new_key=to_obj_name, new_aliases=to_obj_aliases) if copiedobj: string = ("Identical copy of %s, named '%s' was created." % (from_obj_name, to_obj_name)) else: string = 'There was an error copying %s.' else: from_obj_name = self.lhs_objs[0]['name'] from_obj = caller.search(from_obj_name) if (not from_obj): return for objdef in self.rhs_objs: to_obj_name = objdef['name'] to_obj_aliases = objdef['aliases'] to_obj_location = objdef['option'] if to_obj_location: to_obj_location = caller.search(to_obj_location, global_search=True) if (not to_obj_location): return copiedobj = ObjectDB.objects.copy_object(from_obj, new_key=to_obj_name, new_location=to_obj_location, new_aliases=to_obj_aliases) if copiedobj: string = ("Copied %s to '%s' (aliases: %s)." % (from_obj_name, to_obj_name, to_obj_aliases)) else: string = ("There was an error copying %s to '%s'." % (from_obj_name, to_obj_name)) caller.msg(string)
class ThriftClient(config.Parser): def __init__(self, client_cls: Any, **kwargs: Any): self.client_cls = client_cls self.kwargs = kwargs def parse(self, key_path: str, raw_config: config.RawConfig) -> ContextFactory: pool = thrift_pool_from_config(raw_config, prefix=f'{key_path}.', **self.kwargs) return ThriftContextFactory(pool, self.client_cls)
class Plane(Shape): _p3js_geometry_type = 'Plane' _p3js_attribute_map = {'width': 'width', 'height': 'length'} _p3js_material_attributes = {'side': 'DoubleSide'} def __init__(self, length=10.0, width=5.0, **kwargs): super(Plane, self).__init__(**kwargs) self.geometry_attrs += ['length', 'width'] self.length = length self.width = width
class ParaphraseMiningEvaluator(SentenceEvaluator): def __init__(self, sentences_map: Dict[(str, str)], duplicates_list: List[Tuple[(str, str)]]=None, duplicates_dict: Dict[(str, Dict[(str, bool)])]=None, add_transitive_closure: bool=False, query_chunk_size: int=5000, corpus_chunk_size: int=100000, max_pairs: int=500000, top_k: int=100, show_progress_bar: bool=False, batch_size: int=16, name: str='', write_csv: bool=True): self.sentences = [] self.ids = [] for (id, sentence) in sentences_map.items(): self.sentences.append(sentence) self.ids.append(id) self.name = name self.show_progress_bar = show_progress_bar self.batch_size = batch_size self.query_chunk_size = query_chunk_size self.corpus_chunk_size = corpus_chunk_size self.max_pairs = max_pairs self.top_k = top_k self.duplicates = (duplicates_dict if (duplicates_dict is not None) else defaultdict((lambda : defaultdict(bool)))) if (duplicates_list is not None): for (id1, id2) in duplicates_list: if ((id1 in sentences_map) and (id2 in sentences_map)): self.duplicates[id1][id2] = True self.duplicates[id2][id1] = True if add_transitive_closure: self.duplicates = self.add_transitive_closure(self.duplicates) positive_key_pairs = set() for key1 in self.duplicates: for key2 in self.duplicates[key1]: if ((key1 in sentences_map) and (key2 in sentences_map) and (self.duplicates[key1][key2] or self.duplicates[key2][key1])): positive_key_pairs.add(tuple(sorted([key1, key2]))) self.total_num_duplicates = len(positive_key_pairs) if name: name = ('_' + name) self.csv_file: str = (('paraphrase_mining_evaluation' + name) + '_results.csv') self.csv_headers = ['epoch', 'steps', 'precision', 'recall', 'f1', 'threshold', 'average_precision'] self.write_csv = write_csv def __call__(self, model, output_path: str=None, epoch: int=(- 1), steps: int=(- 1)) -> float: if (epoch != (- 1)): out_txt = (f' after epoch {epoch}:' if (steps == (- 1)) else f' in epoch {epoch} after {steps} steps:') else: out_txt = ':' logger.info(((('Paraphrase Mining Evaluation on ' + self.name) + ' dataset') + out_txt)) pairs_list = paraphrase_mining(model, self.sentences, self.show_progress_bar, self.batch_size, self.query_chunk_size, self.corpus_chunk_size, self.max_pairs, self.top_k) logger.info(('Number of candidate pairs: ' + str(len(pairs_list)))) n_extract = n_correct = 0 threshold = 0 best_f1 = best_recall = best_precision = 0 average_precision = 0 for idx in range(len(pairs_list)): (score, i, j) = pairs_list[idx] id1 = self.ids[i] id2 = self.ids[j] n_extract += 1 if (self.duplicates[id1][id2] or self.duplicates[id2][id1]): n_correct += 1 precision = (n_correct / n_extract) recall = (n_correct / self.total_num_duplicates) f1 = (((2 * precision) * recall) / (precision + recall)) average_precision += precision if (f1 > best_f1): best_f1 = f1 best_precision = precision best_recall = recall threshold = ((pairs_list[idx][0] + pairs_list[min((idx + 1), (len(pairs_list) - 1))][0]) / 2) average_precision = (average_precision / self.total_num_duplicates) logger.info('Average Precision: {:.2f}'.format((average_precision * 100))) logger.info('Optimal threshold: {:.4f}'.format(threshold)) logger.info('Precision: {:.2f}'.format((best_precision * 100))) logger.info('Recall: {:.2f}'.format((best_recall * 100))) logger.info('F1: {:.2f}\n'.format((best_f1 * 100))) if ((output_path is not None) and self.write_csv): csv_path = os.path.join(output_path, self.csv_file) if (not os.path.isfile(csv_path)): with open(csv_path, mode='w', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(self.csv_headers) writer.writerow([epoch, steps, best_precision, best_recall, best_f1, threshold, average_precision]) else: with open(csv_path, mode='a', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow([epoch, steps, best_precision, best_recall, best_f1, threshold, average_precision]) return average_precision def add_transitive_closure(graph): nodes_visited = set() for a in list(graph.keys()): if (a not in nodes_visited): connected_subgraph_nodes = set() connected_subgraph_nodes.add(a) neighbor_nodes_queue = list(graph[a]) while (len(neighbor_nodes_queue) > 0): node = neighbor_nodes_queue.pop(0) if (node not in connected_subgraph_nodes): connected_subgraph_nodes.add(node) neighbor_nodes_queue.extend(graph[node]) connected_subgraph_nodes = list(connected_subgraph_nodes) for i in range((len(connected_subgraph_nodes) - 1)): for j in range((i + 1), len(connected_subgraph_nodes)): graph[connected_subgraph_nodes[i]][connected_subgraph_nodes[j]] = True graph[connected_subgraph_nodes[j]][connected_subgraph_nodes[i]] = True nodes_visited.add(connected_subgraph_nodes[i]) nodes_visited.add(connected_subgraph_nodes[j]) return graph
def call_api(input_json: Dict[(str, Any)]) -> Dict[(str, Any)]: url = ' headers = {'Content-Type': 'application/json'} response = requests.get(url, headers=headers, params=input_json) if (response.status_code == 200): return response.json() else: return {'status_code': response.status_code, 'text': response.text}
class XupPl(BaseDecrypter): __name__ = 'XupPl' __type__ = 'decrypter' __version__ = '0.16' __status__ = 'testing' __pattern__ = ' __config__ = [('enabled', 'bool', 'Activated', True), ('use_premium', 'bool', 'Use premium account if available', True), ('folder_per_package', 'Default;Yes;No', 'Create folder for each package', 'Default')] __description__ = 'Xup.pl decrypter plugin' __license__ = 'GPLv3' __authors__ = [('z00nx', '')] def decrypt(self, pyfile): header = self.load(pyfile.url, just_header=True) if ('location' in header): self.links = [header.get('location')] else: self.fail(self._('Unable to find link'))
def to_np(item, use_copy=False, dtype=None): use_copy = (use_copy or np.isscalar(item) or (not equal(get_dtype(item), dtype))) if isinstance(item, (str, bytes)): return np.array([item], dtype=object) elif is_seq_of(item, Number): return np.array(item, dtype=(get_dtype(item[0]) if (dtype is None) else dtype)) if is_np(item): kwargs = ({} if (dtype is None) else {'dtype': dtype}) return (np.array(item, **kwargs) if use_copy else item) elif is_torch(item): item = item.detach().cpu().numpy() return to_np(item, False, dtype) else: return item
.filterwarnings('default::pytest.PytestUnhandledThreadExceptionWarning') def test_unhandled_thread_exception(pytester: Pytester) -> None: pytester.makepyfile(test_it='\n import threading\n\n def test_it():\n def oops():\n raise ValueError("Oops")\n\n t = threading.Thread(target=oops, name="MyThread")\n t.start()\n t.join()\n\n def test_2(): pass\n ') result = pytester.runpytest() assert (result.ret == 0) assert (result.parseoutcomes() == {'passed': 2, 'warnings': 1}) result.stdout.fnmatch_lines(['*= warnings summary =*', 'test_it.py::test_it', ' * PytestUnhandledThreadExceptionWarning: Exception in thread MyThread', ' ', ' Traceback (most recent call last):', ' ValueError: Oops', ' ', ' warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg))'])
def run_step(context): logger.debug('started') context.assert_key_has_value(key='fetchJson', caller=__name__) fetch_json_input = context.get_formatted('fetchJson') if isinstance(fetch_json_input, str): file_path = fetch_json_input destination_key = None encoding = config.default_encoding else: assert_key_has_value(obj=fetch_json_input, key='path', caller=__name__, parent='fetchJson') file_path = fetch_json_input['path'] destination_key = fetch_json_input.get('key', None) encoding = fetch_json_input.get('encoding', config.default_encoding) logger.debug('attempting to open file: %s', file_path) with open(file_path, encoding=encoding) as json_file: payload = json.load(json_file) if destination_key: logger.debug('json file loaded. Writing to context %s', destination_key) context[destination_key] = payload else: if (not isinstance(payload, Mapping)): raise TypeError('json input should describe an object at the top level when fetchJson.key isn\'t specified. You should have something like {"key1": "value1", "key2": "value2"} in the json top-level, not ["value1", "value2"]') logger.debug('json file loaded. Merging into pypyr context. . .') context.update(payload) logger.info('json file written into pypyr context. Count: %s', len(payload)) logger.debug('done')
_module() class NASFCOS_FPN(nn.Module): def __init__(self, in_channels, out_channels, num_outs, start_level=1, end_level=(- 1), add_extra_convs=False, conv_cfg=None, norm_cfg=None): super(NASFCOS_FPN, self).__init__() assert isinstance(in_channels, list) self.in_channels = in_channels self.out_channels = out_channels self.num_ins = len(in_channels) self.num_outs = num_outs self.norm_cfg = norm_cfg self.conv_cfg = conv_cfg if (end_level == (- 1)): self.backbone_end_level = self.num_ins assert (num_outs >= (self.num_ins - start_level)) else: self.backbone_end_level = end_level assert (end_level <= len(in_channels)) assert (num_outs == (end_level - start_level)) self.start_level = start_level self.end_level = end_level self.add_extra_convs = add_extra_convs self.adapt_convs = nn.ModuleList() for i in range(self.start_level, self.backbone_end_level): adapt_conv = ConvModule(in_channels[i], out_channels, 1, stride=1, padding=0, bias=False, norm_cfg=dict(type='BN'), act_cfg=dict(type='ReLU', inplace=False)) self.adapt_convs.append(adapt_conv) extra_levels = ((num_outs - self.backbone_end_level) + self.start_level) def build_concat_cell(with_input1_conv, with_input2_conv): cell_conv_cfg = dict(kernel_size=1, padding=0, bias=False, groups=out_channels) return ConcatCell(in_channels=out_channels, out_channels=out_channels, with_out_conv=True, out_conv_cfg=cell_conv_cfg, out_norm_cfg=dict(type='BN'), out_conv_order=('norm', 'act', 'conv'), with_input1_conv=with_input1_conv, with_input2_conv=with_input2_conv, input_conv_cfg=conv_cfg, input_norm_cfg=norm_cfg, upsample_mode='nearest') self.fpn = nn.ModuleDict() self.fpn['c22_1'] = build_concat_cell(True, True) self.fpn['c22_2'] = build_concat_cell(True, True) self.fpn['c32'] = build_concat_cell(True, False) self.fpn['c02'] = build_concat_cell(True, False) self.fpn['c42'] = build_concat_cell(True, True) self.fpn['c36'] = build_concat_cell(True, True) self.fpn['c61'] = build_concat_cell(True, True) self.extra_downsamples = nn.ModuleList() for i in range(extra_levels): extra_act_cfg = (None if (i == 0) else dict(type='ReLU', inplace=False)) self.extra_downsamples.append(ConvModule(out_channels, out_channels, 3, stride=2, padding=1, act_cfg=extra_act_cfg, order=('act', 'norm', 'conv'))) def forward(self, inputs): feats = [adapt_conv(inputs[(i + self.start_level)]) for (i, adapt_conv) in enumerate(self.adapt_convs)] for (i, module_name) in enumerate(self.fpn): (idx_1, idx_2) = (int(module_name[1]), int(module_name[2])) res = self.fpn[module_name](feats[idx_1], feats[idx_2]) feats.append(res) ret = [] for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): (feats1, feats2) = (feats[idx], feats[5]) feats2_resize = F.interpolate(feats2, size=feats1.size()[2:], mode='bilinear', align_corners=False) feats_sum = (feats1 + feats2_resize) ret.append(F.interpolate(feats_sum, size=inputs[input_idx].size()[2:], mode='bilinear', align_corners=False)) for submodule in self.extra_downsamples: ret.append(submodule(ret[(- 1)])) return tuple(ret) def init_weights(self): for module in self.fpn.values(): if hasattr(module, 'conv_out'): caffe2_xavier_init(module.out_conv.conv) for modules in [self.adapt_convs.modules(), self.extra_downsamples.modules()]: for module in modules: if isinstance(module, nn.Conv2d): caffe2_xavier_init(module)
('a hyperlink having address {address} and fragment {fragment}') def given_a_hyperlink_having_address_and_fragment(context: Context, address: str, fragment: str): paragraph_idxs: Dict[(Tuple[(str, str)], int)] = {("''", 'linkedBookmark'): 1, (' "''"): 2, (' "''"): 3, (' 'intro'): 4, (' "''"): 5, ('court-exif.jpg', "''"): 7} paragraph_idx = paragraph_idxs[(address, fragment)] document = Document(test_docx('par-hlink-frags')) paragraph = document.paragraphs[paragraph_idx] context.hyperlink = paragraph.hyperlinks[0]
def get_seresnext(blocks, cardinality, bottleneck_width, model_name=None, pretrained=False, root=os.path.join('~', '.torch', 'models'), **kwargs): if (blocks == 50): layers = [3, 4, 6, 3] elif (blocks == 101): layers = [3, 4, 23, 3] else: raise ValueError('Unsupported SE-ResNeXt with number of blocks: {}'.format(blocks)) init_block_channels = 64 channels_per_layers = [256, 512, 1024, 2048] channels = [([ci] * li) for (ci, li) in zip(channels_per_layers, layers)] net = SEResNeXt(channels=channels, init_block_channels=init_block_channels, cardinality=cardinality, bottleneck_width=bottleneck_width, **kwargs) if pretrained: if ((model_name is None) or (not model_name)): raise ValueError('Parameter `model_name` should be properly initialized for loading pretrained model.') from .model_store import download_model download_model(net=net, model_name=model_name, local_model_store_dir_path=root) return net
_export('keras.experimental.WarmupPiecewise') class WarmupPiecewise(LearningRateSchedule): def __init__(self, boundaries, values, warmup_steps, warmup_factor, gradual=True, name=None): super(WarmupPiecewise, self).__init__() if (len(boundaries) != (len(values) - 1)): raise ValueError('The length of boundaries should be 1 less than the length of values') self.boundaries = boundaries self.values = values self.name = name self.warmup_steps = warmup_steps self.warmup_factor = warmup_factor self.gradual = gradual def __call__(self, step): with ops.name_scope_v2((self.name or 'WarmupPiecewise')): boundaries = ops.convert_n_to_tensor(self.boundaries) values = ops.convert_n_to_tensor(self.values) x_recomp = ops.convert_to_tensor_v2(step) x_recomp = math_ops.cast(x_recomp, values[0].dtype) warmup_steps = math_ops.cast(self.warmup_steps, values[0].dtype) w_fac = math_ops.cast(self.warmup_factor, values[0].dtype) for (i, b) in enumerate(boundaries): if (b.dtype.base_dtype != x_recomp.dtype.base_dtype): b = math_ops.cast(b, x_recomp.dtype.base_dtype) boundaries[i] = b def compute_piecewise(): pred_fn_pairs = [] pred_fn_pairs.append(((x_recomp <= boundaries[0]), (lambda : values[0]))) pred_fn_pairs.append(((x_recomp > boundaries[(- 1)]), (lambda : values[(- 1)]))) for (low, high, v) in zip(boundaries[:(- 1)], boundaries[1:], values[1:(- 1)]): pred = ((x_recomp > low) & (x_recomp <= high)) pred_fn_pairs.append((pred, (lambda v=v: v))) default = (lambda : values[0]) return control_flow_ops.case(pred_fn_pairs, default, exclusive=True) def compute_step(warming_up=False): if warming_up: completed_fraction = (x_recomp / warmup_steps) if self.gradual: gain = (w_fac + ((1 - w_fac) * completed_fraction)) else: gain = w_fac return math_ops.multiply(values[0], gain) else: return compute_piecewise() return control_flow_ops.cond(math_ops.less(x_recomp, warmup_steps), (lambda : compute_step(warming_up=True)), (lambda : compute_step(warming_up=False))) def get_config(self): return {'boundaries': self.boundaries, 'values': self.values, 'warmup_steps': self.warmup_steps, 'warmup_factor': self.warmup_factor, 'name': self.name}
def test_battery_background(fake_qtile, fake_window, monkeypatch): ok = BatteryStatus(state=BatteryState.DISCHARGING, percent=0.5, power=15.0, time=1729) low = BatteryStatus(state=BatteryState.DISCHARGING, percent=0.1, power=15.0, time=1729) low_background = 'ff0000' background = '000000' with monkeypatch.context() as manager: manager.setattr(battery, 'load_battery', dummy_load_battery(ok)) batt = Battery(low_percentage=0.2, low_background=low_background, background=background) fakebar = FakeBar([batt], window=fake_window) batt._configure(fake_qtile, fakebar) assert (batt.background == background) batt._battery._status = low batt.poll() assert (batt.background == low_background) batt._battery._status = ok batt.poll() assert (batt.background == background)
class MSELoss(torch.nn.Module): def __init__(self): super(MSELoss, self).__init__() def forward(self, preds, heatmap_gt, weight): losses = ((0.5 * weight) * ((preds - heatmap_gt) ** 2).mean(dim=3).mean(dim=2)) back_loss = losses.mean(dim=1).mean(dim=0) return back_loss
class DebertaV2Converter(SpmConverter): def pre_tokenizer(self, replacement, add_prefix_space): list_pretokenizers = [] if self.original_tokenizer.split_by_punct: list_pretokenizers.append(pre_tokenizers.Punctuation(behavior='isolated')) list_pretokenizers.append(pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space)) return pre_tokenizers.Sequence(list_pretokenizers) def normalizer(self, proto): list_normalizers = [] if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) list_normalizers.append(normalizers.Strip()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap if precompiled_charsmap: list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) list_normalizers.append(normalizers.Replace(Regex(' {2,}'), ' ')) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing(single='[CLS]:0 $A:0 [SEP]:0', pair='[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1', special_tokens=[('[CLS]', self.original_tokenizer.convert_tokens_to_ids('[CLS]')), ('[SEP]', self.original_tokenizer.convert_tokens_to_ids('[SEP]'))])
def combine_to_panoptic_multi_core(img_id2img, inst_by_image, sem_by_image, segmentations_folder, overlap_thr, stuff_area_limit, categories): cpu_num = multiprocessing.cpu_count() img_ids_split = np.array_split(list(img_id2img), cpu_num) print('Number of cores: {}, images per core: {}'.format(cpu_num, len(img_ids_split[0]))) workers = multiprocessing.Pool(processes=cpu_num) processes = [] for (proc_id, img_ids) in enumerate(img_ids_split): p = workers.apply_async(combine_to_panoptic_single_core, (proc_id, img_ids, img_id2img, inst_by_image, sem_by_image, segmentations_folder, overlap_thr, stuff_area_limit, categories)) processes.append(p) panoptic_json = [] for p in processes: panoptic_json.extend(p.get()) return panoptic_json
def get_dec_inp_targ_seqs(sequence, max_len, start_id, stop_id): inp = ([start_id] + sequence[:]) target = sequence[:] if (len(inp) > max_len): inp = inp[:max_len] target = target[:max_len] else: target.append(stop_id) assert (len(inp) == len(target)) return (inp, target)
def read_files(div): doc_file = (('../raw_files/proc_output/' + 'doc_') + div) keys_file = (('../raw_files/proc_output/' + 'keys_') + div) doc_dict = OrderedDict() keys_dict = OrderedDict() with open(doc_file) as f_doc: for line in f_doc: line_json = json.loads(line) docid = line_json['docid'] doc_text = line_json['text'] doc_dict[docid] = doc_text.lower() with open(keys_file) as f_keys: contents = f_keys.read() contents = contents.split('%%%') for content in contents: if (not content): continue content = json.loads(content) (key, docid) = (content[0][0], content[0][1]) if (key == 'message_id'): if (docid not in keys_dict): keys_dict[docid] = [] template = OrderedDict() for (tag, role) in tag2role.items(): template[role] = list() for keyval in content[1:]: (key, value) = (keyval[0], keyval[1]) if (key == tag): if (tag == 'incident_type'): template[tag] = value.lower() continue if value: candidate = [] for value_str in value['strings']: candidate.append(value_str.lower()) template[role].append(candidate) if (template['incident_type'] != '*'): keys_dict[docid].append(template) return (doc_dict, keys_dict)
class GroupParameterItem(ParameterItem): def __init__(self, param, depth): ParameterItem.__init__(self, param, depth) self._initialFontPointSize = self.font(0).pointSize() self.updateDepth(depth) self.addItem = None if ('addText' in param.opts): addText = param.opts['addText'] if ('addList' in param.opts): self.addWidget = QtWidgets.QComboBox() self.addWidget.setSizeAdjustPolicy(QtWidgets.QComboBox.SizeAdjustPolicy.AdjustToContents) self.updateAddList() self.addWidget.currentIndexChanged.connect(self.addChanged) else: self.addWidget = QtWidgets.QPushButton(addText) self.addWidget.clicked.connect(self.addClicked) w = QtWidgets.QWidget() l = QtWidgets.QHBoxLayout() l.setContentsMargins(0, 0, 0, 0) w.setLayout(l) l.addWidget(self.addWidget) l.addStretch() self.addWidgetBox = w self.addItem = QtWidgets.QTreeWidgetItem([]) self.addItem.setFlags(QtCore.Qt.ItemFlag.ItemIsEnabled) self.addItem.depth = (self.depth + 1) ParameterItem.addChild(self, self.addItem) self.addItem.setSizeHint(0, self.addWidgetBox.sizeHint()) self.optsChanged(self.param, self.param.opts) def pointSize(self): return self._initialFontPointSize def updateDepth(self, depth): for c in [0, 1]: font = self.font(c) font.setBold(True) if (depth == 0): font.setPointSize((self.pointSize() + 1)) self.setFont(c, font) self.titleChanged() def addClicked(self): self.param.addNew() def addChanged(self): if (self.addWidget.currentIndex() == 0): return typ = self.addWidget.currentText() self.param.addNew(typ) self.addWidget.setCurrentIndex(0) def treeWidgetChanged(self): ParameterItem.treeWidgetChanged(self) tw = self.treeWidget() if (tw is None): return self.setFirstColumnSpanned(True) if (self.addItem is not None): tw.setItemWidget(self.addItem, 0, self.addWidgetBox) self.addItem.setFirstColumnSpanned(True) def addChild(self, child): if (self.addItem is not None): ParameterItem.insertChild(self, (self.childCount() - 1), child) else: ParameterItem.addChild(self, child) def optsChanged(self, param, opts): ParameterItem.optsChanged(self, param, opts) if ('addList' in opts): self.updateAddList() if hasattr(self, 'addWidget'): if ('enabled' in opts): self.addWidget.setEnabled(opts['enabled']) if ('tip' in opts): self.addWidget.setToolTip(opts['tip']) def updateAddList(self): self.addWidget.blockSignals(True) try: self.addWidget.clear() self.addWidget.addItem(self.param.opts['addText']) for t in self.param.opts['addList']: self.addWidget.addItem(t) finally: self.addWidget.blockSignals(False)
def dense(name, x, units, dropout_rate=None, relu=True, layer_norm=False): with tfv1.variable_scope(name): bias = variable_on_cpu('bias', [units], tfv1.zeros_initializer()) weights = variable_on_cpu('weights', [x.shape[(- 1)], units], tfv1.keras.initializers.VarianceScaling(scale=1.0, mode='fan_avg', distribution='uniform')) output = tf.nn.bias_add(tf.matmul(x, weights), bias) if relu: output = tf.minimum(tf.nn.relu(output), FLAGS.relu_clip) if layer_norm: with tfv1.variable_scope(name): output = tf.contrib.layers.layer_norm(output) if (dropout_rate is not None): output = tf.nn.dropout(output, rate=dropout_rate) return output
_bp.route('/images/<image_id>/ancestry', methods=['GET']) _auth _namespace_repo_from_session _namespace_enabled _completion _cache_headers _protect def get_image_ancestry(namespace, repository, image_id, headers): logger.debug('Checking repo permissions') permission = ReadRepositoryPermission(namespace, repository) repository_ref = registry_model.lookup_repository(namespace, repository, kind_filter='image') if ((not permission.can()) and (not ((repository_ref is not None) and repository_ref.is_public))): abort(403) logger.debug('Looking up repo image') legacy_image = registry_model.get_legacy_image(repository_ref, image_id, store) if (legacy_image is None): abort(404, 'Image %(image_id)s not found', issue='unknown-image', image_id=image_id) response = make_response(json.dumps(legacy_image.full_image_id_chain), 200) response.headers.extend(headers) return response
class OmniDeltaLMDecoderLayer(DeltaLMDecoderLayer): def forward(self, x, encoder_out: Optional[torch.Tensor]=None, encoder_padding_mask: Optional[torch.Tensor]=None, self_attn_mask: Optional[torch.Tensor]=None, self_attn_padding_mask: Optional[torch.Tensor]=None, need_attn: bool=False, need_head_weights: bool=False, checkpointing_ffn=False, checkpointing_self_attn=False, checkpointing_cross_attn=False, stack=None, **kwargs): if need_head_weights: need_attn = True residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) (x, attn, _) = self.self_attn(hidden_states=x, attention_mask=self_attn_mask, output_attentions=False, checkpointing=checkpointing_self_attn) if (not self.normalize_before): x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.ffn_layer_norm(x) if (self.fused and x.is_cuda): dropout_p = (self.activation_dropout_module.p if self.training else 0.0) weights = [self.fc3.weight, self.fc4.weight] biases = [self.fc3.bias, self.fc4.bias] x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases) else: x = self.activation_fn(self.fc3(x)) x = self.activation_dropout_module(x) x = self.fc4(x) x = dropout_residual_connection(x, residual, self.dropout_module, self.training) if (not self.normalize_before): x = self.ffn_layer_norm(x) if ((self.encoder_attn is not None) and (encoder_out is not None)): residual = x if self.normalize_before: x = self.encoder_attn_layer_norm(x) (x, attn, _) = self.encoder_attn(hidden_states=x, key_value_states=encoder_out, attention_mask=encoder_padding_mask, output_attentions=False, checkpointing=checkpointing_cross_attn) x = dropout_residual_connection(x, residual, self.dropout_module, self.training) if (not self.normalize_before): x = self.encoder_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) if (self.fused and x.is_cuda): dropout_p = (self.activation_dropout_module.p if self.training else 0.0) weights = [self.fc1.weight, self.fc2.weight] biases = [self.fc1.bias, self.fc2.bias] x = self.fused_function(dropout_p, checkpointing_ffn, x, *weights, *biases) else: x = self.activation_fn(self.fc1(x)) x = self.activation_dropout_module(x) x = self.fc2(x) x = dropout_residual_connection(x, residual, self.dropout_module, self.training) if (not self.normalize_before): x = self.final_layer_norm(x) return (x, attn, None)
def main(): from Crypto.Signature import PKCS1_v1_5 from Crypto.Hash import SHA256 from Crypto.PublicKey import RSA import struct args = get_args() f = open(args.key, 'rb') key = RSA.importKey(f.read()) f.close() f = open(args.inf, 'rb') img = f.read() f.close() signer = PKCS1_v1_5.new(key) h = SHA256.new() digest_len = h.digest_size sig_len = len(signer.sign(h)) img_size = len(img) magic = img_type = 1 algo = shdr = struct.pack('<IIIIHH', magic, img_type, img_size, algo, digest_len, sig_len) shdr_uuid = args.uuid.bytes shdr_version = struct.pack('<I', args.version) h.update(shdr) h.update(shdr_uuid) h.update(shdr_version) h.update(img) sig = signer.sign(h) f = open(args.out, 'wb') f.write(shdr) f.write(h.digest()) f.write(sig) f.write(shdr_uuid) f.write(shdr_version) f.write(img) f.close()
def train_triple(model, train_queue, test_queue, optimizer, args, show=True): losses = [] start = time() model.train() for train_epoch in range(args.train_epochs): temp = [] for (ps_train, qs_train, rs_train, labels_train) in train_queue: (inferences, regs) = model(ps_train.cuda(), qs_train.cuda(), rs_train.cuda()) loss = model.compute_loss(inferences, labels_train.cuda(), regs) loss.backward() nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip) optimizer.step() optimizer.zero_grad() model.zero_grad() temp.append(loss.cpu().detach().item()) losses.append(np.mean(temp)) if show: logging.info(('train_epoch: %d, loss: %.4f, rmse: %.4f[%.4f]' % (train_epoch, losses[(- 1)], evaluate_triple(model, test_queue), (time() - start)))) if (train_epoch > 100): if ((((losses[(- 2)] - losses[(- 1)]) / losses[(- 1)]) < (0.0001 / (len(train_queue) * args.batch_size))) or np.isnan(losses[(- 1)])): break
def generate_ann(root_path, split, image_infos, preserve_vertical, format): dst_image_root = osp.join(root_path, 'crops', split) ignore_image_root = osp.join(root_path, 'ignores', split) if (split == 'training'): dst_label_file = osp.join(root_path, f'train_label.{format}') elif (split == 'val'): dst_label_file = osp.join(root_path, f'val_label.{format}') mmcv.mkdir_or_exist(dst_image_root) mmcv.mkdir_or_exist(ignore_image_root) lines = [] for image_info in image_infos: index = 1 src_img_path = osp.join(root_path, 'imgs', image_info['file_name']) image = mmcv.imread(src_img_path) src_img_root = image_info['file_name'].split('.')[0] for anno in image_info['anno_info']: word = anno['word'] dst_img = crop_img(image, anno['bbox'], 0, 0) (h, w, _) = dst_img.shape dst_img_name = f'{src_img_root}_{index}.png' index += 1 if (min(dst_img.shape) == 0): continue if ((not preserve_vertical) and ((h / w) > 2)): dst_img_path = osp.join(ignore_image_root, dst_img_name) else: dst_img_path = osp.join(dst_image_root, dst_img_name) mmcv.imwrite(dst_img, dst_img_path) if (format == 'txt'): lines.append(f'{osp.basename(dst_image_root)}/{dst_img_name} {word}') elif (format == 'jsonl'): lines.append(json.dumps({'filename': f'{osp.basename(dst_image_root)}/{dst_img_name}', 'text': word}, ensure_ascii=False)) else: raise NotImplementedError list_to_file(dst_label_file, lines)
def test_unveiled_blocks(skip_qtbot): cosmetic_patches = AM2RCosmeticPatches(unveiled_blocks=True) dialog = AM2RCosmeticPatchesDialog(None, cosmetic_patches) skip_qtbot.addWidget(dialog) skip_qtbot.mouseClick(dialog.unveiled_blocks_check, QtCore.Qt.MouseButton.LeftButton) assert (dialog.cosmetic_patches == AM2RCosmeticPatches(unveiled_blocks=False))
def test_shape(): shape = Shape(name='shape', color='blue', material='DIRT') assert (shape.name == 'shape') assert (shape.__str__() == 'Shape shape color:blue material:DIRT') assert (shape.__repr__() == 'Shape') assert (shape.color == 'blue') assert (shape.material == 'DIRT') shape.name = 'shape1' assert (shape.name == 'shape1') with assert_raises(TypeError): shape.name = 1 shape.color = 'red' assert (shape.color == 'red') with assert_raises(ValueError): shape.color = 'puke' shape.material = 'water' assert (shape.material == 'water') shape.material = 'WATER' assert (shape.material == 'WATER') with assert_raises(ValueError): shape.material = 'fluffy cloth' assert (shape.generate_dict() == {'color': 'red', 'type': 'Shape', 'name': 'shape1', 'material': 'WATER'}) assert isinstance(shape, Shape) shape_ = Shape(color='blue') assert (shape_.name == 'unnamed') assert (shape_.__str__() == 'Shape unnamed color:blue material:default') assert (shape_.__repr__() == 'Shape')
def _check_relfile(relname, rootdir, kind): if os.path.isabs(relname): raise ValuError(f'{relname!r} is absolute, expected relative') actual = os.path.join(rootdir, relname) if (kind == 'dir'): if (not os.path.isdir(actual)): raise ValueError(f'directory {actual!r} does not exist') elif (kind == 'file'): if (not os.path.isfile(actual)): raise ValueError(f'file {actual!r} does not exist') elif (kind == 'any'): if (not os.path.exists(actual)): raise ValueError(f'{actual!r} does not exist') elif kind: raise NotImplementedError(kind)
def create_user_noverify(username, email, email_required=True, prompts=tuple(), is_possible_abuser=False): if email_required: if (not validate_email(email)): raise InvalidEmailAddressException(('Invalid email address: %s' % email)) else: email = (email or str(uuid.uuid4())) (username_valid, username_issue) = validate_username(username) if (not username_valid): raise InvalidUsernameException(('Invalid namespace %s: %s' % (username, username_issue))) try: existing = User.get(((User.username == username) | (User.email == email))) logger.debug('Existing user with same username or email.') if (existing.username == username): assert (not existing.robot) msg = ('Username has already been taken by an organization and cannot be reused: %s' % username) if (not existing.organization): msg = ('Username has already been taken by user cannot be reused: %s' % username) raise InvalidUsernameException(msg) raise InvalidEmailAddressException(('Email has already been used: %s' % email)) except User.DoesNotExist: logger.debug('Email and username are unique!') try: default_expr_s = _convert_to_s(config.app_config['DEFAULT_TAG_EXPIRATION']) default_max_builds = config.app_config.get('DEFAULT_NAMESPACE_MAXIMUM_BUILD_COUNT') threat_max_builds = config.app_config.get('THREAT_NAMESPACE_MAXIMUM_BUILD_COUNT') if (is_possible_abuser and (threat_max_builds is not None)): default_max_builds = threat_max_builds new_user = User.create(username=username, email=email, removed_tag_expiration_s=default_expr_s, maximum_queued_builds_count=default_max_builds) for prompt in prompts: create_user_prompt(new_user, prompt) return new_user except Exception as ex: raise DataModelException(ex)
_funcify.register(Eigh) def numba_funcify_Eigh(op, node, **kwargs): uplo = op.UPLO if (uplo != 'L'): warnings.warn('Numba will use object mode to allow the `UPLO` argument to `numpy.linalg.eigh`.', UserWarning) out_dtypes = tuple((o.type.numpy_dtype for o in node.outputs)) ret_sig = numba.types.Tuple([get_numba_type(node.outputs[0].type), get_numba_type(node.outputs[1].type)]) _basic.numba_njit def eigh(x): with numba.objmode(ret=ret_sig): out = np.linalg.eigh(x, UPLO=uplo) ret = (out[0].astype(out_dtypes[0]), out[1].astype(out_dtypes[1])) return ret else: _basic.numba_njit(inline='always') def eigh(x): return np.linalg.eigh(x) return eigh
class LearnerConfig(object): def __init__(self, episodic, train_learner, eval_learner, pretrained_checkpoint, checkpoint_for_eval, embedding_network, learning_rate, decay_learning_rate, decay_every, decay_rate, experiment_name, pretrained_source): if (checkpoint_for_eval and pretrained_checkpoint): raise ValueError('Cannot define both `checkpoint_for_eval` and `pretrained_checkpoint`. The difference between them is that for the former, all variables are restored (including the global step), while the latter is only applicable to the start of training for initializing the model from pre-trained weights. It is also only applicable to episodic models and restores only the embedding weights.') self.episodic = episodic self.train_learner = train_learner self.eval_learner = eval_learner self.pretrained_checkpoint = pretrained_checkpoint self.checkpoint_for_eval = checkpoint_for_eval self.embedding_network = embedding_network self.learning_rate = learning_rate self.decay_learning_rate = decay_learning_rate self.decay_every = decay_every self.decay_rate = decay_rate self.experiment_name = experiment_name self.pretrained_source = pretrained_source
class TagsFromPath(Gtk.VBox): title = _('Tags From Path') FILTERS = [UnderscoresToSpaces, TitleCase, SplitTag] handler = TagsFromPathPluginHandler() def init_plugins(cls): PluginManager.instance.register_handler(cls.handler) def __init__(self, parent, library): super().__init__(spacing=6) self.set_border_width(12) hbox = Gtk.HBox(spacing=6) cbes_defaults = TBP_EXAMPLES.split('\n') self.combo = ComboBoxEntrySave(TBP, cbes_defaults, title=_('Path Patterns'), edit_title=_('Edit saved patterns...')) self.combo.show_all() hbox.pack_start(self.combo, True, True, 0) self.preview = qltk.Button(_('_Preview'), Icons.VIEW_REFRESH) self.preview.show() hbox.pack_start(self.preview, False, True, 0) self.pack_start(hbox, False, True, 0) self.combo.get_child().connect('changed', self._changed) model = ObjectStore() self.view = Gtk.TreeView(model=model) self.view.show() sw = Gtk.ScrolledWindow() sw.set_shadow_type(Gtk.ShadowType.IN) sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC) sw.add(self.view) self.pack_start(sw, True, True, 0) vbox = Gtk.VBox() addreplace = Gtk.ComboBoxText() addreplace.append_text(_('Tags replace existing ones')) addreplace.append_text(_('Tags are added to existing ones')) addreplace.set_active(config.getboolean('tagsfrompath', 'add')) addreplace.connect('changed', self.__add_changed) vbox.pack_start(addreplace, True, True, 0) addreplace.show() self.pack_start(vbox, False, True, 0) filter_box = FilterPluginBox(self.handler, self.FILTERS) filter_box.connect('preview', self.__filter_preview) filter_box.connect('changed', self.__filter_changed) self.filter_box = filter_box self.pack_start(filter_box, False, True, 0) self.save = qltk.Button(_('_Save'), Icons.DOCUMENT_SAVE) self.save.show() bbox = Gtk.HButtonBox() bbox.set_layout(Gtk.ButtonBoxStyle.END) bbox.pack_start(self.save, True, True, 0) self.pack_start(bbox, False, True, 0) connect_obj(self.preview, 'clicked', self.__preview, None) connect_obj(parent, 'changed', self.__class__.__preview, self) connect_obj(self.save, 'clicked', self.__save, addreplace, library) for child in self.get_children(): child.show() def __filter_preview(self, *args): Gtk.Button.clicked(self.preview) def __filter_changed(self, *args): self._changed(self.combo.get_child()) def _changed(self, entry): self.save.set_sensitive(False) self.preview.set_sensitive(bool(entry.get_text())) def __add_changed(self, combo): config.set('tagsfrompath', 'add', str(bool(combo.get_active()))) def __preview(self, songs): if (songs is None): songs = [row[0].song for row in (self.view.get_model() or [])] if songs: pattern_text = self.combo.get_child().get_text() else: pattern_text = '' try: pattern = TagsFromPattern(pattern_text) except re.error: qltk.ErrorMessage(self, _('Invalid pattern'), (_('The pattern\n\t%s\nis invalid. Possibly it contains the same tag twice or it has unbalanced brackets (&lt; / &gt;).') % util.bold(pattern_text)), escape_desc=False).run() return else: if pattern_text: self.combo.prepend_text(pattern_text) self.combo.write(TBP) invalid = [] for header in pattern.headers: if (not min([song.can_change(header) for song in songs])): invalid.append(header) total = len(invalid) if (total and songs): title = ngettext('Invalid tag', 'Invalid tags', total) msg = ngettext('Invalid tag %s\n\nThe files currently selected do not support editing this tag.', 'Invalid tags %s\n\nThe files currently selected do not support editing these tags.', total) tags_str = util.bold(', '.join(invalid)) qltk.ErrorMessage(self, title, (msg % tags_str), escape_desc=False).run() pattern = TagsFromPattern('') self.view.set_model(None) model = ObjectStore() for col in self.view.get_columns(): self.view.remove_column(col) render = Gtk.CellRendererText() col = TreeViewColumn(title=_('File')) col.pack_start(render, True) col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE) def cell_data_file(column, cell, model, iter_, data): entry = model.get_value(iter_) cell.set_property('text', entry.name) col.set_cell_data_func(render, cell_data_file) def cell_data_header(column, cell, model, iter_, header): entry = model.get_value(iter_) cell.set_property('text', entry.get_match(header)) self.view.append_column(col) for (_i, header) in enumerate(pattern.headers): render = Gtk.CellRendererText() render.set_property('editable', True) render.connect('edited', self.__row_edited, model, header) escaped_title = header.replace('_', '__') col = Gtk.TreeViewColumn(escaped_title, render) col.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE) col.set_cell_data_func(render, cell_data_header, header) self.view.append_column(col) for song in songs: entry = ListEntry(song) match = pattern.match(song) for h in pattern.headers: text = match.get(h, '') for f in self.filter_box.filters: if f.active: text = f.filter(h, text) if (not song.can_multiple_values(h)): text = ', '.join(text.split('\n')) entry.matches[h] = text model.append([entry]) if songs: self.view.set_model(model) self.preview.set_sensitive(False) self.save.set_sensitive((len(pattern.headers) > 0)) def __save(self, addreplace, library): pattern_text = self.combo.get_child().get_text() pattern = TagsFromPattern(pattern_text) model = self.view.get_model() add = bool(addreplace.get_active()) win = WritingWindow(self, len(model)) win.show() was_changed = set() all_done = False for entry in ((model and model.values()) or []): song = entry.song changed = False if (not song.valid()): win.hide() dialog = OverwriteWarning(self, song) resp = dialog.run() win.show() if (resp != OverwriteWarning.RESPONSE_SAVE): break for (_i, h) in enumerate(pattern.headers): text = entry.get_match(h) if text: can_multiple = song.can_multiple_values(h) if ((not add) or (h not in song) or (not can_multiple)): song[h] = text changed = True else: for val in text.split('\n'): if (val not in song.list(h)): song.add(h, val) changed = True if changed: try: song.write() except AudioFileError: util.print_exc() WriteFailedError(self, song).run() library.reload(song, changed=was_changed) break was_changed.add(song) if win.step(): break else: all_done = True win.destroy() library.changed(was_changed) self.save.set_sensitive((not all_done)) def __row_edited(self, renderer, path, new, model, header): entry = model[path][0] if (entry.get_match(header) != new): entry.replace_match(header, new) self.preview.set_sensitive(True) self.save.set_sensitive(True)
def if_api_available(method: Callable) -> Callable: def decorated(self, *args, **kwargs): if (not self.rest_api.available): msg = 'Service unavailable. Try again later.' return api_error(msg, HTTPStatus.SERVICE_UNAVAILABLE) return method(self, *args, **kwargs) return decorated
class _LocalUnboundNameFinder(_UnboundNameFinder): def __init__(self, pyobject, parent): super().__init__(pyobject) self.parent = parent def _get_root(self): return self.parent._get_root() def is_bound(self, primary, propagated=False): name = primary.split('.')[0] if propagated: names = self.pyobject.get_scope().get_propagated_names() else: names = self.pyobject.get_scope().get_names() return ((name in names) or self.parent.is_bound(name, propagated=True)) def add_unbound(self, name): self.parent.add_unbound(name)
_module() class DiceLoss(nn.Module): def __init__(self, eps=1e-06): super().__init__() assert isinstance(eps, float) self.eps = eps def forward(self, pred, target, mask=None): pred = pred.contiguous().view(pred.size()[0], (- 1)) target = target.contiguous().view(target.size()[0], (- 1)) if (mask is not None): mask = mask.contiguous().view(mask.size()[0], (- 1)) pred = (pred * mask) target = (target * mask) a = torch.sum((pred * target)) b = torch.sum(pred) c = torch.sum(target) d = ((2 * a) / ((b + c) + self.eps)) return (1 - d)
def pytest_configure(config: Config) -> None: xmlpath = config.option.xmlpath if (xmlpath and (not hasattr(config, 'workerinput'))): junit_family = config.getini('junit_family') config.stash[xml_key] = LogXML(xmlpath, config.option.junitprefix, config.getini('junit_suite_name'), config.getini('junit_logging'), config.getini('junit_duration_report'), junit_family, config.getini('junit_log_passing_tests')) config.pluginmanager.register(config.stash[xml_key])
.parametrize(['sparse', 'dtype'], [pytest.param(True, 'csr', id='sparse'), pytest.param(False, 'csr', id='sparse2dense'), pytest.param(False, 'dense', id='dense')]) def test_eigen_known_oper(sparse, dtype): N = qutip.num(10, dtype=dtype) (spvals, spvecs) = N.eigenstates(sparse=sparse) expected = np.arange(10) is_eigen_set(N, spvals, spvecs) np.testing.assert_allclose(spvals, expected, atol=1e-13)
class Effect3513(BaseEffect): runTime = 'early' type = 'passive' def handler(fit, implant, context, projectionRange, **kwargs): fit.appliedImplants.filteredItemMultiply((lambda mod: (mod.item.group.name == 'Cyberimplant')), 'rangeSkillBonus', implant.getModifiedItemAttr('implantSetMordus'), **kwargs)
def run_step(context): logger.debug('started') context.assert_key_has_value('fileWriteJson', __name__) input_context = context.get_formatted('fileWriteJson') assert_key_has_value(obj=input_context, key='path', caller=__name__, parent='fileWriteJson') out_path = Path(input_context['path']) payload = input_context.get('payload', sentinel) encoding = input_context.get('encoding', config.default_encoding) logger.debug('opening destination file for writing: %s', out_path) out_path.parent.mkdir(parents=True, exist_ok=True) if (payload is sentinel): payload = context.get_formatted_value(context) else: payload = input_context['payload'] with open(out_path, 'w', encoding=encoding) as outfile: json.dump(payload, outfile, indent=config.json_indent, ensure_ascii=config.json_ascii) logger.info('formatted context content and wrote to %s', out_path) logger.debug('done')
class PatientIcomData(): def __init__(self, output_dir): self._data = {} self._usage_start = {} self._current_patient_data = {} self._output_dir = pathlib.Path(output_dir) def update_data(self, ip, data): try: if (self._data[ip][(- 1)][26] == data[26]): logging.warning('Skip this data item, duplicate of previous data item.') if (self._data[ip][(- 1)] != data): raise ValueError('Duplicate ID, but not duplicate data!') return if (((self._data[ip][(- 1)][26] + 1) % 256) != data[26]): raise ValueError('Data stream appears to be arriving out of order') self._data[ip].append(data) except KeyError: self._data[ip] = [data] timestamp = data[8:26].decode() (shrunk_data, patient_id) = extract.extract(data, 'Patient ID') (shrunk_data, patient_name) = extract.extract(shrunk_data, 'Patient Name') (shrunk_data, machine_id) = extract.extract(shrunk_data, 'Machine ID') logging.info(f'IP: {ip} | Timestamp: {timestamp} | Patient ID: {patient_id} | Patient Name: {patient_name} | Machine ID: {machine_id}') try: usage_start = self._usage_start[ip] except KeyError: usage_start = None if (patient_id is not None): if (usage_start is None): self._current_patient_data[ip] = [] timestamp = data[8:26].decode() iso_timestamp = f'{timestamp[0:10]}T{timestamp[10:]}' self._usage_start[ip] = iso_timestamp logging.debug('Starting data collection for patient id %(patient_id)s. Recording started at %(usage_start)s.', {'usage_start': self._usage_start[ip], 'patient_id': patient_id}) self._current_patient_data[ip].append(data) logging.debug('iCOM stream appended to the data being collected for patient id %(patient_id)s.', {'patient_id': patient_id}) elif (not (usage_start is None)): logging.debug('Delivery that started at %(usage_start)s appears to have completed.', {'usage_start': usage_start}) save_patient_data(usage_start, self._current_patient_data[ip], self._output_dir) self._current_patient_data[ip] = None self._usage_start[ip] = None else: logging.debug('No delivery is currently being recorded.')
def decompose_bivector(F): c1 = F F2 = (F * F) if (F2 == 0): return ((+ F), (0 * e1)) c2 = (0.5 * F2(4)) c1_2 = (c1 * c1)[()] c2_2 = (c2 * c2)[()] lambs = np.roots([1, (- c1_2), c2_2]) F1 = (((c1 * c2) - (lambs[0] * c1)) / (lambs[1] - lambs[0])) F2 = (((c1 * c2) - (lambs[1] * c1)) / (lambs[0] - lambs[1])) return (F1, F2)
def dice_coefficient(pred, gt, smooth=1e-05): N = gt.shape[0] pred[(pred >= 1)] = 1 gt[(gt >= 1)] = 1 pred_flat = pred.reshape(N, (- 1)) gt_flat = gt.reshape(N, (- 1)) intersection = (pred_flat * gt_flat).sum(1) unionset = (pred_flat.sum(1) + gt_flat.sum(1)) dice = (((2 * intersection) + smooth) / (unionset + smooth)) return (dice.sum() / N)
class TestTrainerDistributedNeuronCore(TestCasePlus): _torch_neuroncore def test_trainer(self): distributed_args = f''' -m torch.distributed.launch --nproc_per_node=2 --master_port={get_torch_dist_unique_port()} {self.test_file_dir}/test_trainer_distributed.py '''.split() output_dir = self.get_auto_remove_tmp_dir() args = f'--output_dir {output_dir}'.split() cmd = (([sys.executable] + distributed_args) + args) execute_subprocess_async(cmd, env=self.get_env())
def test_compress(): obj1 = QobjEvo([[qeye(N), 't'], [qeye(N), 't'], [qeye(N), 't']]) assert (obj1.num_elements == 1) obj2 = QobjEvo([[qeye(N), 't'], [qeye(N), 't'], [qeye(N), 't']], compress=False) assert (obj2.num_elements == 3) _assert_qobjevo_equivalent(obj1, obj2) obj3 = obj2.copy() assert (obj3.num_elements == 3) obj3.compress() assert (obj3.num_elements == 1) _assert_qobjevo_equivalent(obj2, obj3)
class ShmemVecEnv(VecEnv): def __init__(self, env_fns, spaces=None): if spaces: (observation_space, action_space) = spaces else: logger.log('Creating dummy env object to get spaces') with logger.scoped_configure(format_strs=[]): dummy = env_fns[0]() (observation_space, action_space) = (dummy.observation_space, dummy.action_space) dummy.close() del dummy VecEnv.__init__(self, len(env_fns), observation_space, action_space) (self.obs_keys, self.obs_shapes, self.obs_dtypes) = obs_space_info(observation_space) self.obs_bufs = [{k: Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys} for _ in env_fns] self.parent_pipes = [] self.procs = [] for (env_fn, obs_buf) in zip(env_fns, self.obs_bufs): wrapped_fn = CloudpickleWrapper(env_fn) (parent_pipe, child_pipe) = Pipe() proc = Process(target=_subproc_worker, args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys)) proc.daemon = True self.procs.append(proc) self.parent_pipes.append(parent_pipe) proc.start() child_pipe.close() self.waiting_step = False self.specs = [f().spec for f in env_fns] self.viewer = None def reset(self): if self.waiting_step: logger.warn('Called reset() while waiting for the step to complete') self.step_wait() for pipe in self.parent_pipes: pipe.send(('reset', None)) return self._decode_obses([pipe.recv() for pipe in self.parent_pipes]) def step_async(self, actions): assert (len(actions) == len(self.parent_pipes)) for (pipe, act) in zip(self.parent_pipes, actions): pipe.send(('step', act)) def step_wait(self): outs = [pipe.recv() for pipe in self.parent_pipes] (obs, rews, dones, infos) = zip(*outs) return (self._decode_obses(obs), np.array(rews), np.array(dones), infos) def close_extras(self): if self.waiting_step: self.step_wait() for pipe in self.parent_pipes: pipe.send(('close', None)) for pipe in self.parent_pipes: pipe.recv() pipe.close() for proc in self.procs: proc.join() def get_images(self, mode='human'): for pipe in self.parent_pipes: pipe.send(('render', None)) return [pipe.recv() for pipe in self.parent_pipes] def _decode_obses(self, obs): result = {} for k in self.obs_keys: bufs = [b[k] for b in self.obs_bufs] o = [np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(self.obs_shapes[k]) for b in bufs] result[k] = np.array(o) return dict_to_obs(result)
.parametrize('kwargs, is_valid', [({'choices_provider': fake_func}, True), ({'completer': fake_func}, True), ({'choices_provider': fake_func, 'completer': fake_func}, False)]) def test_apcustom_choices_callable_count(kwargs, is_valid): parser = Cmd2ArgumentParser() try: parser.add_argument('name', **kwargs) assert is_valid except ValueError as ex: assert (not is_valid) assert ('Only one of the following parameters' in str(ex))
def test_freqresp_warn_infinite(): sys_finite = ctrl.tf([1], [1, 0.01]) sys_infinite = ctrl.tf([1], [1, 0.01, 0]) np.testing.assert_almost_equal(sys_finite(0), 100) np.testing.assert_almost_equal(sys_finite(0, warn_infinite=False), 100) np.testing.assert_almost_equal(sys_finite(0, warn_infinite=True), 100) with pytest.warns(RuntimeWarning, match='divide by zero'): np.testing.assert_almost_equal(sys_infinite(0), complex(np.inf, np.nan)) with pytest.warns(RuntimeWarning, match='divide by zero'): np.testing.assert_almost_equal(sys_infinite(0, warn_infinite=True), complex(np.inf, np.nan)) np.testing.assert_almost_equal(sys_infinite(0, warn_infinite=False), complex(np.inf, np.nan)) sys_finite = ctrl.tf2ss(sys_finite) sys_infinite = ctrl.tf2ss(sys_infinite) np.testing.assert_almost_equal(sys_finite(0), 100) np.testing.assert_almost_equal(sys_finite(0, warn_infinite=False), 100) np.testing.assert_almost_equal(sys_finite(0, warn_infinite=True), 100) with pytest.warns(RuntimeWarning, match='singular matrix'): np.testing.assert_almost_equal(sys_infinite(0), complex(np.inf, np.nan)) with pytest.warns(RuntimeWarning, match='singular matrix'): np.testing.assert_almost_equal(sys_infinite(0, warn_infinite=True), complex(np.inf, np.nan)) np.testing.assert_almost_equal(sys_infinite(0, warn_infinite=False), complex(np.inf, np.nan))
class WeightedDiceLoss(nn.Module): def __init__(self, axis=((- 1), (- 2), (- 3)), smooth=1e-06): super().__init__() self.axis = axis self.smooth = smooth def forward(self, y_pred, y_truth): return (1 - torch.mean((((2 * torch.sum((y_pred * y_truth), dim=self.axis)) + self.smooth) / ((torch.sum(y_pred, dim=self.axis) + torch.sum(y_truth, dim=self.axis)) + self.smooth))))
class DatagramProtocolClient(asyncio.Protocol): def __init__(self, server, port, logger, client, retries=3, timeout=30): self.transport = None self.port = port self.server = server self.logger = logger self.retries = retries self.timeout = timeout self.client = client self.pending_requests = {} random_generator = random.SystemRandom() self.packet_id = random_generator.randrange(0, 256) self.timeout_future = None async def __timeout_handler__(self): try: while True: req2delete = [] now = datetime.now() next_weak_up = self.timeout for (id, req) in self.pending_requests.items(): secs = (req['send_date'] - now).seconds if (secs > self.timeout): if (req['retries'] == self.retries): self.logger.debug('[%s:%d] For request %d execute all retries', self.server, self.port, id) req['future'].set_exception(TimeoutError('Timeout on Reply')) req2delete.append(id) else: req['send_date'] = now req['retries'] += 1 self.logger.debug('[%s:%d] For request %d execute retry %d', self.server, self.port, id, req['retries']) self.transport.sendto(req['packet'].RequestPacket()) elif (next_weak_up > secs): next_weak_up = secs for id in req2delete: del self.pending_requests[id] (await asyncio.sleep(next_weak_up)) except asyncio.CancelledError: pass def send_packet(self, packet, future): if (packet.id in self.pending_requests): raise Exception(('Packet with id %d already present' % packet.id)) self.pending_requests[packet.id] = {'packet': packet, 'creation_date': datetime.now(), 'retries': 0, 'future': future, 'send_date': datetime.now()} self.transport.sendto(packet.RequestPacket()) def connection_made(self, transport): self.transport = transport socket = transport.get_extra_info('socket') self.logger.info('[%s:%d] Transport created with binding in %s:%d', self.server, self.port, socket.getsockname()[0], socket.getsockname()[1]) pre_loop = asyncio.get_event_loop() asyncio.set_event_loop(loop=self.client.loop) self.timeout_future = asyncio.ensure_future(self.__timeout_handler__()) asyncio.set_event_loop(loop=pre_loop) def error_received(self, exc): self.logger.error('[%s:%d] Error received: %s', self.server, self.port, exc) def connection_lost(self, exc): if exc: self.logger.warn('[%s:%d] Connection lost: %s', self.server, self.port, str(exc)) else: self.logger.info('[%s:%d] Transport closed', self.server, self.port) def datagram_received(self, data, addr): try: reply = Packet(packet=data, dict=self.client.dict) if (reply.code and (reply.id in self.pending_requests)): req = self.pending_requests[reply.id] packet = req['packet'] reply.dict = packet.dict reply.secret = packet.secret if packet.VerifyReply(reply, data): req['future'].set_result(reply) del self.pending_requests[reply.id] else: self.logger.warn('[%s:%d] Ignore invalid reply for id %d: %s', self.server, self.port, reply.id, data) else: self.logger.warn('[%s:%d] Ignore invalid reply: %s', self.server, self.port, data) except Exception as exc: self.logger.error('[%s:%d] Error on decode packet: %s', self.server, self.port, exc) async def close_transport(self): if self.transport: self.logger.debug('[%s:%d] Closing transport...', self.server, self.port) self.transport.close() self.transport = None if self.timeout_future: self.timeout_future.cancel() (await self.timeout_future) self.timeout_future = None def create_id(self): self.packet_id = ((self.packet_id + 1) % 256) return self.packet_id def __str__(self): return ('DatagramProtocolClient(server?=%s, port=%d)' % (self.server, self.port)) def __call__(self): return self
('a Settings object {with_or_without} odd and even page headers as settings') def given_a_Settings_object_with_or_without_odd_and_even_hdrs(context, with_or_without): testfile_name = {'with': 'doc-odd-even-hdrs', 'without': 'sct-section-props'}[with_or_without] context.settings = Document(test_docx(testfile_name)).settings
def get_network(config): if (config.data.image_size < 96): return functools.partial(NCSNv2, config=config) elif (96 <= config.data.image_size <= 128): return functools.partial(NCSNv2_128, config=config) elif (128 < config.data.image_size <= 256): return functools.partial(NCSNv2_256, config=config) else: raise NotImplementedError(f'No network suitable for {config.data.image_size}px implemented yet.')
class OnnxConfig(ABC): default_fixed_batch = 2 default_fixed_sequence = 8 default_fixed_num_choices = 4 torch_onnx_minimum_version = version.parse('1.8') _tasks_to_common_outputs = {'causal-lm': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'default': OrderedDict({'last_hidden_state': {0: 'batch', 1: 'sequence'}}), 'image-classification': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'image-segmentation': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}, 'pred_boxes': {0: 'batch', 1: 'sequence'}, 'pred_masks': {0: 'batch', 1: 'sequence'}}), 'masked-im': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'masked-lm': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}}), 'multiple-choice': OrderedDict({'logits': {0: 'batch'}}), 'object-detection': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}, 'pred_boxes': {0: 'batch', 1: 'sequence'}}), 'question-answering': OrderedDict({'start_logits': {0: 'batch', 1: 'sequence'}, 'end_logits': {0: 'batch', 1: 'sequence'}}), 'seq2seq-lm': OrderedDict({'logits': {0: 'batch', 1: 'decoder_sequence'}}), 'sequence-classification': OrderedDict({'logits': {0: 'batch'}}), 'token-classification': OrderedDict({'logits': {0: 'batch', 1: 'sequence'}})} def __init__(self, config: 'PretrainedConfig', task: str='default', patching_specs: List[PatchingSpec]=None): self._config = config if (task not in self._tasks_to_common_outputs): raise ValueError(f'{task} is not a supported task, supported tasks: {self._tasks_to_common_outputs.keys()}') self.task = task self._patching_specs = [] for spec in (patching_specs if (patching_specs is not None) else []): final_spec = spec if (spec.orig_op is None): final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name)) self._patching_specs.append(final_spec) def from_model_config(cls, config: 'PretrainedConfig', task: str='default') -> 'OnnxConfig': return cls(config, task=task) def inputs(self) -> Mapping[(str, Mapping[(int, str)])]: raise NotImplementedError() def outputs(self) -> Mapping[(str, Mapping[(int, str)])]: common_outputs = self._tasks_to_common_outputs[self.task] return copy.deepcopy(common_outputs) def values_override(self) -> Optional[Mapping[(str, Any)]]: if hasattr(self._config, 'use_cache'): return {'use_cache': False} return None def default_batch_size(self) -> int: return OnnxConfig.default_fixed_batch def default_sequence_length(self) -> int: return OnnxConfig.default_fixed_sequence def default_num_choices(self) -> int: return OnnxConfig.default_fixed_num_choices def default_onnx_opset(self) -> int: return DEFAULT_ONNX_OPSET def atol_for_validation(self) -> float: return 1e-05 def is_torch_support_available(self) -> bool: if is_torch_available(): from transformers.utils import torch_version return (torch_version >= self.torch_onnx_minimum_version) else: return False def use_external_data_format(num_parameters: int) -> bool: return (compute_serialized_parameters_size(num_parameters, ParameterFormat.Float) >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT) def _generate_dummy_images(self, batch_size: int=2, num_channels: int=3, image_height: int=40, image_width: int=40): images = [] for _ in range(batch_size): data = (np.random.rand(image_height, image_width, num_channels) * 255) images.append(Image.fromarray(data.astype('uint8')).convert('RGB')) return images def generate_dummy_inputs(self, preprocessor: Union[('PreTrainedTokenizerBase', 'FeatureExtractionMixin')], batch_size: int=(- 1), seq_length: int=(- 1), num_choices: int=(- 1), is_pair: bool=False, framework: Optional[TensorType]=None, num_channels: int=3, image_width: int=40, image_height: int=40, tokenizer: 'PreTrainedTokenizerBase'=None) -> Mapping[(str, Any)]: from ..feature_extraction_utils import FeatureExtractionMixin from ..tokenization_utils_base import PreTrainedTokenizerBase if (isinstance(preprocessor, PreTrainedTokenizerBase) and (tokenizer is not None)): raise ValueError('You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.') if (tokenizer is not None): warnings.warn('The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use `preprocessor` instead.', FutureWarning) logger.warning('Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.') preprocessor = tokenizer if isinstance(preprocessor, PreTrainedTokenizerBase): batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0) token_to_add = preprocessor.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension(seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add) dummy_input = ([(' '.join([preprocessor.unk_token]) * seq_length)] * batch_size) if (self.task == 'multiple-choice'): num_choices = compute_effective_axis_dimension(num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0) dummy_input = (dummy_input * num_choices) tokenized_input = preprocessor(dummy_input, text_pair=dummy_input) for (k, v) in tokenized_input.items(): tokenized_input[k] = [v[i:(i + num_choices)] for i in range(0, len(v), num_choices)] return dict(tokenized_input.convert_to_tensors(tensor_type=framework)) return dict(preprocessor(dummy_input, return_tensors=framework)) elif (isinstance(preprocessor, FeatureExtractionMixin) and (preprocessor.model_input_names[0] == 'pixel_values')): batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) return dict(preprocessor(images=dummy_input, return_tensors=framework)) else: raise ValueError('Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.') def patch_ops(self): for spec in self._patching_specs: custom_op = (spec.custom_op if (spec.op_wrapper is None) else spec.op_wrapper(spec.custom_op)) setattr(spec.o, spec.name, custom_op) def restore_ops(self): for spec in self._patching_specs: orig_op = (spec.orig_op if (spec.op_wrapper is None) else spec.op_wrapper(spec.orig_op)) setattr(spec.o, spec.name, orig_op) def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[(str, Any)]: from itertools import chain return {f'{name}.{idx}': item for (idx, item) in enumerate(chain.from_iterable(field))}
def loadIcons(): iconDir = os.path.join(pyzo.pyzoDir, 'resources', 'icons') dummyIcon = IconArtist().finish() pyzo.icons = ssdf.new() for fname in os.listdir(iconDir): if fname.endswith('.png'): try: name = fname.split('.')[0] name = name.replace('pyzo_', '') ffname = os.path.join(iconDir, fname) icon = QtGui.QIcon() icon.addFile(ffname, QtCore.QSize(16, 16)) pyzo.icons[name] = icon except Exception as err: pyzo.icons[name] = dummyIcon print(('Could not load icon %s: %s' % (fname, str(err))))
def test_populate_services_addresses(service_registry_address, private_keys, web3, contract_manager): (c1_service_proxy, _) = deploy_service_registry_and_set_urls(private_keys=private_keys, web3=web3, contract_manager=contract_manager, service_registry_address=service_registry_address) addresses = [privatekey_to_address(key) for key in private_keys] transport = Mock() populate_services_addresses(transport=transport, service_registry=c1_service_proxy, block_identifier=BLOCK_ID_LATEST) registered_services = list(transport.update_services_addresses.call_args[0][0].keys()) assert (len(registered_services) == 3) assert (sorted(addresses) == sorted(registered_services))
def convert_interactive(op): import tempfile import os import subprocess import queue [fh, out_fn] = tempfile.mkstemp(suffix='.mrc') os.close(fh) cmd = [str(op['situs_pdb2vol_program']), op['pdb_file'], out_fn] print(cmd) proc = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, universal_newlines=True) def enqueue_output(out, queue): for line in iter(out.readline, b''): queue.put(line) out.close() def getOutput(outQueue): outStr = '' try: while True: outStr += outQueue.get_nowait() except queue.Empty: return outStr outQueue = queue.Queue() errQueue = queue.Queue() from threading import Thread outThread = Thread(target=enqueue_output, args=(proc.stdout, outQueue)) errThread = Thread(target=enqueue_output, args=(proc.stderr, errQueue)) outThread.daemon = True errThread.daemon = True outThread.start() errThread.start() while True: someInput = raw_input('Input: ') proc.stdin.write((someInput + '\n')) errors = getOutput(errQueue) output = getOutput(outQueue) print(('output:' + output)) print(('errors:' + errors)) someInput = raw_input('Input: ') proc.stdin.write((someInput + '\n')) os.remove(out_fn) print('return') return op
class Wav2VecFeatureReader(object): def __init__(self, cp_file, layer): state = fairseq.checkpoint_utils.load_checkpoint_to_cpu(cp_file) self.layer = layer if ('cfg' in state): w2v_args = state['cfg'] task = fairseq.tasks.setup_task(w2v_args.task) model = task.build_model(w2v_args.model) else: w2v_args = state['args'] task = fairseq.tasks.setup_task(w2v_args) model = task.build_model(w2v_args) model.load_state_dict(state['model'], strict=True) model.eval() model.cuda() self.model = model def read_audio(self, fname): (wav, sr) = sf.read(fname) assert (sr == 16000.0) return wav def get_feats(self, loc): x = self.read_audio(loc) with torch.no_grad(): source = torch.from_numpy(x).view(1, (- 1)).float().cuda() res = self.model(source=source, mask=False, features_only=True, layer=self.layer) return res['layer_results'][self.layer][0].squeeze(1)
class BaseModel(pybamm.BaseSubModel): def __init__(self, param, domain, options, phase='primary'): super().__init__(param, domain, options=options, phase=phase) def _get_standard_active_material_variables(self, eps_solid): param = self.param phase_name = self.phase_name (domain, Domain) = self.domain_Domain if (eps_solid.domain == []): eps_solid = pybamm.PrimaryBroadcast(eps_solid, 'current collector') if (eps_solid.domain == ['current collector']): eps_solid = pybamm.PrimaryBroadcast(eps_solid, f'{domain} electrode') eps_solid_av = pybamm.x_average(eps_solid) variables = {f'{Domain} electrode {phase_name}active material volume fraction': eps_solid, f'X-averaged {domain} electrode {phase_name}active material volume fraction': eps_solid_av} if (self.options['particle shape'] == 'no particles'): a = self.phase_param.a variables.update({f'{Domain} electrode surface area to volume ratio [m-1]': a, f'X-averaged {domain} electrode surface area to volume ratio [m-1]': pybamm.x_average(a)}) return variables else: L = self.domain_param.L c_s_max = self.phase_param.c_max C = (((((pybamm.yz_average(eps_solid_av) * L) * param.A_cc) * c_s_max) * param.F) / 3600) if (phase_name == ''): variables.update({f'{Domain} electrode capacity [A.h]': C}) else: variables.update({f'{Domain} electrode {phase_name}phase capacity [A.h]': C}) domain_options = getattr(self.options, domain) if (domain_options['particle size'] == 'single'): R = self.phase_param.R elif (domain_options['particle size'] == 'distribution'): if (self.domain == 'negative'): R_ = pybamm.standard_spatial_vars.R_n elif (self.domain == 'positive'): R_ = pybamm.standard_spatial_vars.R_p R = pybamm.size_average(R_) R_av = pybamm.x_average(R) if (self.options['particle shape'] == 'spherical'): a = ((3 * eps_solid) / R) a_av = pybamm.x_average(a) a.print_name = f'a_{domain[0]}' a_av.print_name = f'a_av_{domain[0]}' variables.update({f'{Domain} {phase_name}particle radius': (R / self.phase_param.R_typ), f'{Domain} {phase_name}particle radius [m]': R, f'X-averaged {domain} {phase_name}particle radius [m]': R_av, f'{Domain} electrode {phase_name}surface area to volume ratio [m-1]': a, f'X-averaged {domain} electrode {phase_name}surface area to volume ratio [m-1]': a_av}) return variables def _get_standard_active_material_change_variables(self, deps_solid_dt): (domain, Domain) = self.domain_Domain if (deps_solid_dt.domain == ['current collector']): deps_solid_dt_av = deps_solid_dt deps_solid_dt = pybamm.PrimaryBroadcast(deps_solid_dt_av, f'{domain} electrode') else: deps_solid_dt_av = pybamm.x_average(deps_solid_dt) variables = {f'{Domain} electrode {self.phase_name}active material volume fraction change [s-1]': deps_solid_dt, f'X-averaged {domain} electrode {self.phase_name}active material volume fraction change [s-1]': deps_solid_dt_av} return variables
class TestPythonLayer(unittest.TestCase): def setUp(self): net_file = python_net_file() self.net = caffe.Net(net_file, caffe.TRAIN) os.remove(net_file) def test_forward(self): x = 8 self.net.blobs['data'].data[...] = x self.net.forward() for y in self.net.blobs['three'].data.flat: self.assertEqual(y, ((10 ** 3) * x)) def test_backward(self): x = 7 self.net.blobs['three'].diff[...] = x self.net.backward() for y in self.net.blobs['data'].diff.flat: self.assertEqual(y, ((10 ** 3) * x)) def test_reshape(self): s = 4 self.net.blobs['data'].reshape(s, s, s, s) self.net.forward() for blob in six.itervalues(self.net.blobs): for d in blob.data.shape: self.assertEqual(s, d) def test_exception(self): net_file = exception_net_file() self.assertRaises(RuntimeError, caffe.Net, net_file, caffe.TEST) os.remove(net_file) def test_parameter(self): net_file = parameter_net_file() net = caffe.Net(net_file, caffe.TRAIN) net.forward() net.backward() layer = net.layers[list(net._layer_names).index('layer')] self.assertEqual(layer.blobs[0].data[0], 0) self.assertEqual(layer.blobs[0].diff[0], 1) layer.blobs[0].data[0] += layer.blobs[0].diff[0] self.assertEqual(layer.blobs[0].data[0], 1) (h, caffemodel_file) = tempfile.mkstemp() net.save(caffemodel_file) layer.blobs[0].data[0] = (- 1) self.assertEqual(layer.blobs[0].data[0], (- 1)) net.copy_from(caffemodel_file) self.assertEqual(layer.blobs[0].data[0], 1) os.remove(caffemodel_file) net2 = caffe.Net(net_file, caffe.TRAIN) net2.share_with(net) layer = net.layers[list(net2._layer_names).index('layer')] self.assertEqual(layer.blobs[0].data[0], 1) os.remove(net_file)
(cc=STDCALL, params={'hHandle': HANDLE, 'dwMilliseconds': DWORD}) def hook_WaitForSingleObject(ql: Qiling, address: int, params): hHandle = params['hHandle'] handle = ql.os.handle_manager.get(hHandle) if handle: target_thread = handle.obj ql.os.thread_manager.cur_thread.waitfor(target_thread) return 0