code
stringlengths
281
23.7M
def _enumerate_dst_area_chunks(dst_area, dst_chunks): for (position, slices) in _enumerate_chunk_slices(dst_chunks): chunk_shape = tuple((chunk[pos] for (pos, chunk) in zip(position, dst_chunks))) target_geo_def = dst_area[slices[(- 2):]] block_info = {'num-chunks': [len(chunk) for chunk in dst_chunks], 'chunk-location': position, 'array-location': slices, 'chunk-shape': chunk_shape, 'area': target_geo_def} (yield (block_info, target_geo_def))
class Migration(migrations.Migration): dependencies = [('participants', '0001_store_participants')] operations = [migrations.AddField(model_name='participant', name='facebook_url', field=models.CharField(blank=True, max_length=2048)), migrations.AddField(model_name='participant', name='instagram_handle', field=models.CharField(blank=True, max_length=30)), migrations.AddField(model_name='participant', name='linkedin_url', field=models.CharField(blank=True, max_length=2048))]
def collect(workflow_prefix: str, force: bool) -> None: results_path = build_started_results_path(workflow_prefix) if results_path.exists(): started_results = pd.read_csv(results_path) else: logger.warning('Started results are not found.') started_results = create_empty_dataframe_for_started_results() workflows = _get_workflows(workflow_prefix) if (_workflows_finished(workflows) or force): results = _merge_workflows_and_started_results(workflows, started_results) results = _clean_results(results) collect_datetime = get_utc_now_timestamp() results['collected_date'] = ([collect_datetime] * len(results)) _save_collected_results(workflow_prefix, results) logger.info(f'Collected {len(results)} workflows. Finished.') else: logger.info('Not collecting. Workflows are still running. Use -f option to force collect.')
class MockVirtualEnv(VirtualEnv): def __init__(self, path: Path, base: (Path | None)=None, sys_path: (list[str] | None)=None) -> None: super().__init__(path, base=base) self._sys_path = sys_path def sys_path(self) -> list[str]: if (self._sys_path is not None): return self._sys_path return super().sys_path
def test_storyboard_story_input(): init = OSC.Init() TD = OSC.TransitionDynamics(OSC.DynamicsShapes.step, OSC.DynamicsDimension.rate, 1) egospeed = OSC.AbsoluteSpeedAction(10, TD) init.add_init_action('Ego', egospeed) init.add_init_action('Ego', OSC.TeleportAction(OSC.WorldPosition(1, 2, 3, 0, 0, 0))) init.add_init_action('Target_1', egospeed) init.add_init_action('Target_1', OSC.TeleportAction(OSC.WorldPosition(1, 5, 3, 0, 0, 0))) init.add_init_action('Target_2', egospeed) init.add_init_action('Target_2', OSC.TeleportAction(OSC.WorldPosition(10, 2, 3, 0, 0, 0))) prettyprint(init.get_element()) event = OSC.Event('myfirstevent', OSC.Priority.overwrite) event.add_trigger(trigger) event.add_action('newspeed', speedaction) man = OSC.Maneuver('my maneuver') man.add_event(event) prettyprint(man.get_element()) mangr = OSC.ManeuverGroup('mangroup') mangr.add_actor('Ego') mangr.add_maneuver(man) prettyprint(mangr.get_element()) act = OSC.Act('my act', trigger) act.add_maneuver_group(mangr) prettyprint(act.get_element()) story = OSC.Story('mystory') story.add_act(act) prettyprint(story.get_element()) sb = OSC.StoryBoard(init) sb.add_story(story) prettyprint(sb.get_element()) sb2 = OSC.StoryBoard(init) sb2.add_story(story) sb3 = OSC.StoryBoard(init) sb3.add_story(story) sb3.add_story(story) assert (sb == sb2) assert (sb != sb3) sb4 = OSC.StoryBoard.parse(sb3.get_element()) assert (sb3 == sb4) assert (version_validation('Storyboard', sb, 0) == ValidationResponse.OK) assert (version_validation('Storyboard', sb, 1) == ValidationResponse.OK) assert (version_validation('Storyboard', sb, 2) == ValidationResponse.OK)
def test_eval_hmean_ic13(): det_boxes = [] gt_boxes = [] gt_ignored_boxes = [] precision_thr = 0.4 recall_thr = 0.8 center_dist_thr = 1.0 one2one_score = 1.0 one2many_score = 0.8 many2one_score = 1 with pytest.raises(AssertionError): hmean_ic13.eval_hmean_ic13([1], gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, center_dist_thr, one2one_score, one2many_score, many2one_score) with pytest.raises(AssertionError): hmean_ic13.eval_hmean_ic13(det_boxes, 1, gt_ignored_boxes, precision_thr, recall_thr, center_dist_thr, one2one_score, one2many_score, many2one_score) with pytest.raises(AssertionError): hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, 1, precision_thr, recall_thr, center_dist_thr, one2one_score, one2many_score, many2one_score) with pytest.raises(AssertionError): hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, 1.1, recall_thr, center_dist_thr, one2one_score, one2many_score, many2one_score) with pytest.raises(AssertionError): hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, 1.1, center_dist_thr, one2one_score, one2many_score, many2one_score) with pytest.raises(AssertionError): hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, (- 1), one2one_score, one2many_score, many2one_score) with pytest.raises(AssertionError): hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, center_dist_thr, (- 1), one2many_score, many2one_score) with pytest.raises(AssertionError): hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, center_dist_thr, one2one_score, (- 1), many2one_score) with pytest.raises(AssertionError): hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, center_dist_thr, one2one_score, one2many_score, (- 1)) det_boxes = [[[0, 0, 1, 0, 1, 1, 0, 1], [10, 0, 11, 0, 11, 1, 10, 1]]] gt_boxes = [[[0, 0, 1, 0, 1, 1, 0, 1]]] gt_ignored_boxes = [[]] (dataset_result, img_result) = hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, center_dist_thr, one2one_score, one2many_score, many2one_score) assert (img_result[0]['recall'] == 1) assert (img_result[0]['precision'] == 0.5) assert math.isclose(img_result[0]['hmean'], ((2 * 0.5) / 1.5)) gt_boxes = [[[0, 0, 2, 0, 2, 1, 0, 1]]] det_boxes = [[[0, 0, 1, 0, 1, 1, 0, 1], [1, 0, 2, 0, 2, 1, 1, 1]]] (dataset_result, img_result) = hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, center_dist_thr, one2one_score, one2many_score, many2one_score) assert (img_result[0]['recall'] == 0.8) assert (img_result[0]['precision'] == (1.6 / 2)) assert math.isclose(img_result[0]['hmean'], ((2 * 0.64) / 1.6)) precision_thr = 0.6 recall_thr = 0.8 det_boxes = [[[0, 0, 2, 0, 2, 1, 0, 1]]] gt_boxes = [[[0, 0, 1, 0, 1, 1, 0, 1], [1, 0, 2, 0, 2, 1, 1, 1]]] (dataset_result, img_result) = hmean_ic13.eval_hmean_ic13(det_boxes, gt_boxes, gt_ignored_boxes, precision_thr, recall_thr, center_dist_thr, one2one_score, one2many_score, many2one_score) assert (img_result[0]['recall'] == 1) assert (img_result[0]['precision'] == 1) assert math.isclose(img_result[0]['hmean'], 1)
def preformat_Peptides(dataset_dir, name): try: from graphgps.loader.dataset.peptides_functional import PeptidesFunctionalDataset from graphgps.loader.dataset.peptides_structural import PeptidesStructuralDataset except Exception as e: logging.error('ERROR: Failed to import Peptides dataset class, make sure RDKit is installed.') raise e dataset_type = name.split('-', 1)[1] if (dataset_type == 'functional'): dataset = PeptidesFunctionalDataset(dataset_dir) elif (dataset_type == 'structural'): dataset = PeptidesStructuralDataset(dataset_dir) s_dict = dataset.get_idx_split() dataset.split_idxs = [s_dict[s] for s in ['train', 'val', 'test']] return dataset
class RelationTreeTests(SimpleTestCase): all_models = (CassandraThing,) def setUp(self): apps.clear_cache() def test_clear_cache_clears_relation_tree(self): all_models_with_cache = (m for m in self.all_models if (not m._meta.abstract)) for m in all_models_with_cache: self.assertNotIn('_relation_tree', m._meta.__dict__) def test_first_relation_tree_access_populates_all(self): self.assertEqual(len(CassandraThing._meta._relation_tree), 0)
def generate_alias_id(chat): chat_id = chat.id title = chat.title while True: alias_id = ''.join([random.choice((string.ascii_letters + string.digits)) for _ in range(len(str(chat_id)))]) if (alias_id in alias_ids): continue alias_ids.append(alias_id) chat_ids.append({'chat_id': chat_id, 'alias_id': alias_id, 'title': title}) return alias_id
def dliate_erode(img, kernel): er_k = kernel di_k = kernel erode_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, ((er_k // 2), (er_k // 2))) dilate_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (di_k, di_k)) img_f = cv2.dilate(img, dilate_kernel) img_f = cv2.erode(img_f, erode_kernel) return img_f
def data_pre(dataset, task): test_data = pd.read_csv('../../data/dataset/{}/{}.tsv'.format(dataset, task), sep='\t') (text_a, text_b, label, similarity) = (test_data['text_a'], test_data['text_b'], test_data['labels'], []) ppservers = () job_server = pp.Server(ppservers=ppservers) modules = ('nltk.corpus', 'string') jobs = [job_server.submit(get_similarity, (text_a, text_b, i), (get_synsets,), modules=modules) for i in range(len(label))] for job in jobs: similarity.append(job()) data = pd.DataFrame(columns=['text_a', 'text_b', 'labels', 'similarity']) data['text_a'] = text_a data['text_b'] = text_b data['labels'] = label data['similarity'] = similarity data.to_csv('../../data/dataset/{}/{}_similarity.tsv'.format(dataset, task), sep='\t', encoding='utf-8', index=False)
class Babel(): default_date_formats = ImmutableDict({'time': 'medium', 'date': 'medium', 'datetime': 'medium', 'time.short': None, 'time.medium': None, 'time.full': None, 'time.long': None, 'date.short': None, 'date.medium': None, 'date.full': None, 'date.long': None, 'datetime.short': None, 'datetime.medium': None, 'datetime.full': None, 'datetime.long': None}) def __init__(self, app=None, date_formats=None, configure_jinja=True, *args, **kwargs): self._configure_jinja = configure_jinja self.date_formats = date_formats if (app is not None): self.init_app(app, *args, **kwargs) def init_app(self, app, default_locale='en', default_domain='messages', default_translation_directories='translations', default_timezone='UTC', locale_selector=None, timezone_selector=None): if (not hasattr(app, 'extensions')): app.extensions = {} directories = app.config.get('BABEL_TRANSLATION_DIRECTORIES', default_translation_directories).split(';') app.extensions['babel'] = BabelConfiguration(default_locale=app.config.get('BABEL_DEFAULT_LOCALE', default_locale), default_timezone=app.config.get('BABEL_DEFAULT_TIMEZONE', default_timezone), default_domain=app.config.get('BABEL_DOMAIN', default_domain), default_directories=directories, translation_directories=list(self._resolve_directories(directories, app)), instance=self, locale_selector=locale_selector, timezone_selector=timezone_selector) if (self.date_formats is None): self.date_formats = self.default_date_formats.copy() if self._configure_jinja: app.jinja_env.filters.update(datetimeformat=format_datetime, dateformat=format_date, timeformat=format_time, timedeltaformat=format_timedelta, numberformat=format_number, decimalformat=format_decimal, currencyformat=format_currency, percentformat=format_percent, scientificformat=format_scientific) app.jinja_env.add_extension('jinja2.ext.i18n') app.jinja_env.install_gettext_callables(gettext=(lambda s: get_translations().ugettext(s)), ngettext=(lambda s, p, n: get_translations().ungettext(s, p, n)), newstyle=True, pgettext=(lambda c, s: get_translations().upgettext(c, s)), npgettext=(lambda c, s, p, n: get_translations().unpgettext(c, s, p, n))) def list_translations(self): result = [] for dirname in get_babel().translation_directories: if (not os.path.isdir(dirname)): continue for folder in os.listdir(dirname): locale_dir = os.path.join(dirname, folder, 'LC_MESSAGES') if (not os.path.isdir(locale_dir)): continue if any((x.endswith('.mo') for x in os.listdir(locale_dir))): result.append(Locale.parse(folder)) if (self.default_locale not in result): result.append(self.default_locale) return result def default_locale(self) -> Locale: return Locale.parse(get_babel().default_locale) def default_timezone(self) -> timezone: return timezone(get_babel().default_timezone) def domain(self) -> str: return get_babel().default_domain _property def domain_instance(self): return Domain(domain=self.domain) def _resolve_directories(directories: List[str], app=None): for path in directories: if os.path.isabs(path): (yield path) elif (app is not None): (yield os.path.join(app.root_path, path))
class TestHTLCManager(ElectrumTestCase): def test_adding_htlcs_race(self): A = HTLCManager(StoredDict({}, None, [])) B = HTLCManager(StoredDict({}, None, [])) A.channel_open_finished() B.channel_open_finished() (ah0, bh0) = (H('A', 0), H('B', 0)) B.recv_htlc(A.send_htlc(ah0)) self.assertEqual(B.log[REMOTE]['locked_in'][0][LOCAL], 1) A.recv_htlc(B.send_htlc(bh0)) self.assertEqual(B.get_htlcs_in_latest_ctx(LOCAL), []) self.assertEqual(A.get_htlcs_in_latest_ctx(LOCAL), []) self.assertEqual(B.get_htlcs_in_next_ctx(LOCAL), [(RECEIVED, ah0)]) self.assertEqual(A.get_htlcs_in_next_ctx(LOCAL), [(RECEIVED, bh0)]) A.send_ctx() B.recv_ctx() B.send_ctx() A.recv_ctx() self.assertEqual(B.get_htlcs_in_oldest_unrevoked_ctx(LOCAL), []) self.assertEqual(A.get_htlcs_in_oldest_unrevoked_ctx(LOCAL), []) self.assertEqual(B.get_htlcs_in_latest_ctx(LOCAL), [(RECEIVED, ah0)]) self.assertEqual(A.get_htlcs_in_latest_ctx(LOCAL), [(RECEIVED, bh0)]) B.send_rev() A.recv_rev() A.send_rev() B.recv_rev() self.assertEqual(B.get_htlcs_in_oldest_unrevoked_ctx(LOCAL), [(RECEIVED, ah0)]) self.assertEqual(A.get_htlcs_in_oldest_unrevoked_ctx(LOCAL), [(RECEIVED, bh0)]) self.assertEqual(B.get_htlcs_in_latest_ctx(LOCAL), [(RECEIVED, ah0)]) self.assertEqual(A.get_htlcs_in_latest_ctx(LOCAL), [(RECEIVED, bh0)]) A.send_ctx() B.recv_ctx() B.send_ctx() A.recv_ctx() self.assertEqual(B.get_htlcs_in_oldest_unrevoked_ctx(LOCAL), [(RECEIVED, ah0)]) self.assertEqual(A.get_htlcs_in_oldest_unrevoked_ctx(LOCAL), [(RECEIVED, bh0)]) self.assertEqual(B.get_htlcs_in_latest_ctx(LOCAL), [(RECEIVED, ah0), (SENT, bh0)][::(- 1)]) self.assertEqual(A.get_htlcs_in_latest_ctx(LOCAL), [(RECEIVED, bh0), (SENT, ah0)][::(- 1)]) B.send_rev() A.recv_rev() A.send_rev() B.recv_rev() self.assertEqual(B.get_htlcs_in_oldest_unrevoked_ctx(LOCAL), [(RECEIVED, ah0), (SENT, bh0)][::(- 1)]) self.assertEqual(A.get_htlcs_in_oldest_unrevoked_ctx(LOCAL), [(RECEIVED, bh0), (SENT, ah0)][::(- 1)]) self.assertEqual(B.get_htlcs_in_latest_ctx(LOCAL), [(RECEIVED, ah0), (SENT, bh0)][::(- 1)]) self.assertEqual(A.get_htlcs_in_latest_ctx(LOCAL), [(RECEIVED, bh0), (SENT, ah0)][::(- 1)]) def test_single_htlc_full_lifecycle(self): def htlc_lifecycle(htlc_success: bool): A = HTLCManager(StoredDict({}, None, [])) B = HTLCManager(StoredDict({}, None, [])) A.channel_open_finished() B.channel_open_finished() B.recv_htlc(A.send_htlc(H('A', 0))) self.assertEqual(len(B.get_htlcs_in_next_ctx(REMOTE)), 0) self.assertEqual(len(A.get_htlcs_in_next_ctx(REMOTE)), 1) self.assertEqual(len(B.get_htlcs_in_next_ctx(LOCAL)), 1) self.assertEqual(len(A.get_htlcs_in_next_ctx(LOCAL)), 0) A.send_ctx() B.recv_ctx() B.send_rev() A.recv_rev() B.send_ctx() A.recv_ctx() A.send_rev() B.recv_rev() self.assertEqual(len(A.get_htlcs_in_latest_ctx(LOCAL)), 1) self.assertEqual(len(B.get_htlcs_in_latest_ctx(LOCAL)), 1) if htlc_success: B.send_settle(0) A.recv_settle(0) else: B.send_fail(0) A.recv_fail(0) self.assertEqual(list(A.htlcs_by_direction(REMOTE, RECEIVED).values()), [H('A', 0)]) self.assertNotEqual(A.get_htlcs_in_latest_ctx(LOCAL), []) self.assertNotEqual(B.get_htlcs_in_latest_ctx(REMOTE), []) self.assertEqual(A.get_htlcs_in_next_ctx(LOCAL), []) self.assertNotEqual(A.get_htlcs_in_next_ctx(REMOTE), []) self.assertEqual(A.get_htlcs_in_next_ctx(REMOTE), A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual(B.get_htlcs_in_next_ctx(REMOTE), []) B.send_ctx() A.recv_ctx() A.send_rev() self.assertEqual(A.get_htlcs_in_next_ctx(REMOTE), []) B.recv_rev() A.send_ctx() B.recv_ctx() B.send_rev() A.recv_rev() self.assertEqual(B.get_htlcs_in_latest_ctx(LOCAL), []) self.assertEqual(A.get_htlcs_in_latest_ctx(LOCAL), []) self.assertEqual(A.get_htlcs_in_latest_ctx(REMOTE), []) self.assertEqual(B.get_htlcs_in_latest_ctx(REMOTE), []) self.assertEqual(len(A.all_settled_htlcs_ever(LOCAL)), int(htlc_success)) self.assertEqual(len(A.sent_in_ctn(2)), int(htlc_success)) self.assertEqual(len(B.received_in_ctn(2)), int(htlc_success)) A.recv_htlc(B.send_htlc(H('B', 0))) self.assertEqual(A.get_htlcs_in_next_ctx(REMOTE), []) self.assertNotEqual(A.get_htlcs_in_next_ctx(LOCAL), []) self.assertNotEqual(B.get_htlcs_in_next_ctx(REMOTE), []) self.assertEqual(B.get_htlcs_in_next_ctx(LOCAL), []) B.send_ctx() A.recv_ctx() A.send_rev() B.recv_rev() self.assertNotEqual(A.get_htlcs_in_next_ctx(REMOTE), A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual(A.get_htlcs_in_next_ctx(LOCAL), A.get_htlcs_in_latest_ctx(LOCAL)) self.assertEqual(B.get_htlcs_in_next_ctx(REMOTE), B.get_htlcs_in_latest_ctx(REMOTE)) self.assertNotEqual(B.get_htlcs_in_next_ctx(LOCAL), B.get_htlcs_in_next_ctx(REMOTE)) htlc_lifecycle(htlc_success=True) htlc_lifecycle(htlc_success=False) def test_remove_htlc_while_owing_commitment(self): def htlc_lifecycle(htlc_success: bool): A = HTLCManager(StoredDict({}, None, [])) B = HTLCManager(StoredDict({}, None, [])) A.channel_open_finished() B.channel_open_finished() ah0 = H('A', 0) B.recv_htlc(A.send_htlc(ah0)) A.send_ctx() B.recv_ctx() B.send_rev() A.recv_rev() if htlc_success: B.send_settle(0) A.recv_settle(0) else: B.send_fail(0) A.recv_fail(0) self.assertEqual([], A.get_htlcs_in_oldest_unrevoked_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_oldest_unrevoked_ctx(REMOTE)) self.assertEqual([], A.get_htlcs_in_latest_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual([], A.get_htlcs_in_next_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_next_ctx(REMOTE)) B.send_ctx() A.recv_ctx() A.send_rev() B.recv_rev() self.assertEqual([], A.get_htlcs_in_oldest_unrevoked_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_oldest_unrevoked_ctx(REMOTE)) self.assertEqual([], A.get_htlcs_in_latest_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual([], A.get_htlcs_in_next_ctx(LOCAL)) self.assertEqual([], A.get_htlcs_in_next_ctx(REMOTE)) htlc_lifecycle(htlc_success=True) htlc_lifecycle(htlc_success=False) def test_adding_htlc_between_send_ctx_and_recv_rev(self): A = HTLCManager(StoredDict({}, None, [])) B = HTLCManager(StoredDict({}, None, [])) A.channel_open_finished() B.channel_open_finished() A.send_ctx() B.recv_ctx() B.send_rev() ah0 = H('A', 0) B.recv_htlc(A.send_htlc(ah0)) self.assertEqual([], A.get_htlcs_in_latest_ctx(LOCAL)) self.assertEqual([], A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual([], A.get_htlcs_in_next_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_next_ctx(REMOTE)) A.recv_rev() self.assertEqual([], A.get_htlcs_in_latest_ctx(LOCAL)) self.assertEqual([], A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual([], A.get_htlcs_in_next_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_next_ctx(REMOTE)) A.send_ctx() B.recv_ctx() self.assertEqual([], A.get_htlcs_in_latest_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual([], A.get_htlcs_in_next_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_next_ctx(REMOTE)) B.send_rev() A.recv_rev() self.assertEqual([], A.get_htlcs_in_latest_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual([(Direction.SENT, ah0)], A.get_htlcs_in_next_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_next_ctx(REMOTE)) B.send_ctx() A.recv_ctx() self.assertEqual([], A.get_htlcs_in_oldest_unrevoked_ctx(LOCAL)) self.assertEqual([(Direction.SENT, ah0)], A.get_htlcs_in_latest_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual([(Direction.SENT, ah0)], A.get_htlcs_in_next_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_next_ctx(REMOTE)) A.send_rev() B.recv_rev() self.assertEqual([(Direction.SENT, ah0)], A.get_htlcs_in_oldest_unrevoked_ctx(LOCAL)) self.assertEqual([(Direction.SENT, ah0)], A.get_htlcs_in_latest_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_latest_ctx(REMOTE)) self.assertEqual([(Direction.SENT, ah0)], A.get_htlcs_in_next_ctx(LOCAL)) self.assertEqual([(Direction.RECEIVED, ah0)], A.get_htlcs_in_next_ctx(REMOTE)) def test_unacked_local_updates(self): A = HTLCManager(StoredDict({}, None, [])) B = HTLCManager(StoredDict({}, None, [])) A.channel_open_finished() B.channel_open_finished() self.assertEqual({}, A.get_unacked_local_updates()) ah0 = H('A', 0) B.recv_htlc(A.send_htlc(ah0)) A.store_local_update_raw_msg(b'upd_msg0', is_commitment_signed=False) self.assertEqual({1: [b'upd_msg0']}, A.get_unacked_local_updates()) ah1 = H('A', 1) B.recv_htlc(A.send_htlc(ah1)) A.store_local_update_raw_msg(b'upd_msg1', is_commitment_signed=False) self.assertEqual({1: [b'upd_msg0', b'upd_msg1']}, A.get_unacked_local_updates()) A.send_ctx() B.recv_ctx() A.store_local_update_raw_msg(b'ctx1', is_commitment_signed=True) self.assertEqual({1: [b'upd_msg0', b'upd_msg1', b'ctx1']}, A.get_unacked_local_updates()) ah2 = H('A', 2) B.recv_htlc(A.send_htlc(ah2)) A.store_local_update_raw_msg(b'upd_msg2', is_commitment_signed=False) self.assertEqual({1: [b'upd_msg0', b'upd_msg1', b'ctx1'], 2: [b'upd_msg2']}, A.get_unacked_local_updates()) B.send_rev() A.recv_rev() self.assertEqual({2: [b'upd_msg2']}, A.get_unacked_local_updates())
class IndexedDataset(torch.utils.data.Dataset): def __init__(self, path): super().__init__() with open(index_file_path(path), 'rb') as f: magic = f.read(8) assert (magic == b'TNTIDX\x00\x00') version = f.read(8) assert (struct.unpack('<Q', version) == (1,)) (code, self.element_size) = struct.unpack('<QQ', f.read(16)) self.dtype = dtypes[code] (self.size, self.s) = struct.unpack('<QQ', f.read(16)) self.dim_offsets = read_longs(f, (self.size + 1)) self.data_offsets = read_longs(f, (self.size + 1)) self.sizes = read_longs(f, self.s) self.read_data(path) def read_data(self, path): self.data_file = open(data_file_path(path), 'rb', buffering=0) def check_index(self, i): if ((i < 0) or (i >= self.size)): raise IndexError('index out of range') def __del__(self): self.data_file.close() def __getitem__(self, i): self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[(i + 1)]] a = np.empty(tensor_size, dtype=self.dtype) self.data_file.seek((self.data_offsets[i] * self.element_size)) self.data_file.readinto(a) item = torch.from_numpy(a).long() return item def __len__(self): return self.size def exists(path): return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path)))
def create_minibatch_rv(rv: TensorVariable, total_size: Union[(int, None, Sequence[Union[(int, EllipsisType, None)]])]) -> TensorVariable: if isinstance(total_size, int): if (rv.ndim <= 1): total_size = [total_size] else: missing_ndims = (rv.ndim - 1) total_size = ([total_size] + ([None] * missing_ndims)) elif isinstance(total_size, (list, tuple)): total_size = list(total_size) if (Ellipsis in total_size): if (total_size.count(Ellipsis) > 1): raise ValueError('Only one Ellipsis can be present in total_size') sep = total_size.index(Ellipsis) begin = total_size[:sep] end = total_size[(sep + 1):] missing_ndims = max((((rv.ndim - len(begin)) - len(end)), 0)) total_size = ((begin + ([None] * missing_ndims)) + end) if (len(total_size) > rv.ndim): raise ValueError(f'Length of total_size {total_size} is langer than RV ndim {rv.ndim}') else: raise TypeError(f'Invalid type for total_size: {total_size}') return cast(TensorVariable, minibatch_rv(rv, *total_size))
def main(): parser = argparse.ArgumentParser(description='Krkn Chaos Recommender Command-Line tool') args = parse_arguments(parser) if ((args.config_file is None) and (not args.options)): logging.error('You have to either specify a config file path or pass recommender options as command line arguments') parser.print_help() sys.exit(1) if (args.config_file is not None): (namespace, kubeconfig, prometheus_endpoint, auth_token, scrape_duration, chaos_tests, log_level) = read_configuration(args.config_file) if args.options: namespace = args.namespace kubeconfig = args.kubeconfig auth_token = args.token scrape_duration = args.scrape_duration log_level = args.log_level prometheus_endpoint = args.prometheus_endpoint chaos_tests = {'MEM': args.MEM, 'GENERIC': args.GENERIC, 'CPU': args.CPU, 'NETWORK': args.NETWORK} if (log_level not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']): logging.error(f'{log_level} not a valid log level') sys.exit(1) logging.basicConfig(level=log_level) logging.info('INPUTS') logging.info(f'Namespace: {namespace}') logging.info(f'Kubeconfig: {kubeconfig}') logging.info(f'Prometheus endpoint: {prometheus_endpoint}') logging.info(f'Scrape duration: {scrape_duration}') for test in chaos_tests.keys(): logging.info(f'Chaos tests {test}: {chaos_tests[test]}') logging.info('') logging.info('Starting Analysis ...') logging.info('Fetching the Telemetry data') file_path = prometheus.fetch_utilization_from_prometheus(prometheus_endpoint, auth_token, namespace, scrape_duration) analysis(file_path, chaos_tests)
class NetWrapper(nn.Module): def __init__(self, net, projection_size, projection_hidden_size, layer=(- 2)): super().__init__() self.net = net self.layer = layer self.projector = None self.projection_size = projection_size self.projection_hidden_size = projection_hidden_size self.hidden = {} self.hook_registered = False def _find_layer(self): if (type(self.layer) == str): modules = dict([*self.net.named_modules()]) return modules.get(self.layer, None) elif (type(self.layer) == int): children = [*self.net.children()] return children[self.layer] return None def _hook(self, _, input, output): device = input[0].device self.hidden[device] = flatten(output) def _register_hook(self): layer = self._find_layer() assert (layer is not None), f'hidden layer ({self.layer}) not found' handle = layer.register_forward_hook(self._hook) self.hook_registered = True ('projector') def _get_projector(self, hidden): (_, dim) = hidden.shape projector = MLP(dim, self.projection_size, self.projection_hidden_size) return projector.to(hidden) def get_representation(self, x): if (self.layer == (- 1)): return self.net(x) if (not self.hook_registered): self._register_hook() self.hidden.clear() _ = self.net(x) hidden = self.hidden[x.device] self.hidden.clear() assert (hidden is not None), f'hidden layer {self.layer} never emitted an output' return hidden def forward(self, x, return_projection=True): representation = self.get_representation(x) if (not return_projection): return representation projector = self._get_projector(representation) projection = projector(representation) return (projection, representation)
def get_scheduler(name: Union[(str, SchedulerType)], optimizer: Optimizer, num_warmup_steps: Optional[int]=None, num_training_steps: Optional[int]=None): name = SchedulerType(name) schedule_func = TYPE_TO_SCHEDULER_FUNCTION[name] if (name == SchedulerType.CONSTANT): return schedule_func(optimizer) if (num_warmup_steps is None): raise ValueError(f'{name} requires `num_warmup_steps`, please provide that argument.') if (name == SchedulerType.CONSTANT_WITH_WARMUP): return schedule_func(optimizer, num_warmup_steps=num_warmup_steps) if (num_training_steps is None): raise ValueError(f'{name} requires `num_training_steps`, please provide that argument.') return schedule_func(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=num_training_steps)
def train(base_loader, val_loader, model, optimization, start_epoch, stop_epoch, params): if (optimization == 'Adam'): optimizer = torch.optim.Adam(model.parameters()) else: raise ValueError('Unknown optimization, please define by yourself') max_acc = 0 for epoch in range(start_epoch, stop_epoch): model.train() model.train_loop(epoch, base_loader, optimizer) model.eval() if (not os.path.isdir(params.checkpoint_dir)): os.makedirs(params.checkpoint_dir) acc = model.test_loop(val_loader) if (acc > max_acc): print('best model! save...') max_acc = acc outfile = os.path.join(params.checkpoint_dir, 'best_model.tar') torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile) if (((epoch % params.save_freq) == 0) or (epoch == (stop_epoch - 1))): outfile = os.path.join(params.checkpoint_dir, '{:d}.tar'.format(epoch)) torch.save({'epoch': epoch, 'state': model.state_dict()}, outfile) return model
.parametrize('sampler', [sample_blackjax_nuts, sample_numpyro_nuts]) .parametrize('idata_kwargs', [dict(), dict(log_likelihood=True), dict(coords={'x_coord': ['x1', 'x2']}), dict(dims={'x': ['x_coord2']}), dict(coords={'x_coord3': ['A', 'B']}, dims={'x': ['x_coord3']})]) .parametrize('postprocessing_backend', [None, 'cpu']) def test_idata_kwargs(model_test_idata_kwargs: pm.Model, sampler: Callable[(..., az.InferenceData)], idata_kwargs: Dict[(str, Any)], postprocessing_backend: Optional[str]): idata: Optional[az.InferenceData] = None with model_test_idata_kwargs: idata = sampler(tune=50, draws=50, chains=1, idata_kwargs=idata_kwargs, postprocessing_backend=postprocessing_backend) assert (idata is not None) const_data = idata.get('constant_data') assert (const_data is not None) assert ('constantdata' in const_data) assert ('mutabledata' in const_data) if idata_kwargs.get('log_likelihood', False): assert ('log_likelihood' in idata) else: assert ('log_likelihood' not in idata) posterior = idata.get('posterior') assert (posterior is not None) x_dim_expected = idata_kwargs.get('dims', model_test_idata_kwargs.named_vars_to_dims)['x'][0] assert (x_dim_expected is not None) assert (posterior['x'].dims[(- 1)] == x_dim_expected) x_coords_expected = idata_kwargs.get('coords', model_test_idata_kwargs.coords)[x_dim_expected] assert (x_coords_expected is not None) assert (list(x_coords_expected) == list(posterior['x'].coords[x_dim_expected].values)) assert (posterior['z'].dims[2] == 'z_coord') assert np.all((posterior['z'].coords['z_coord'].values == np.array(['apple', 'banana', 'orange'])))
class _BackendREST(_BackendBase): def __init__(self, url, bugzillasession): _BackendBase.__init__(self, url, bugzillasession) self._bugzillasession.set_rest_defaults() def _handle_error(self, e): response = getattr(e, 'response', None) if (response is None): raise e if (response.status_code in [400, 401, 404]): self._handle_error_response(response.text) raise e def _handle_error_response(self, text): try: result = json.loads(text) except json.JSONDecodeError: return if result.get('error'): raise BugzillaError(result['message'], code=result['code']) def _handle_response(self, text): try: ret = dict(json.loads(text)) except Exception: log.debug('Failed to parse REST response. Output is:\n%s', text) raise if ret.get('error', False): raise BugzillaError(ret['message'], code=ret['code']) return ret def _op(self, method, apiurl, paramdict=None): fullurl = os.path.join(self._url, apiurl.lstrip('/')) log.debug('Bugzilla REST %s %s params=%s', method, fullurl, paramdict) data = None authparams = self._bugzillasession.get_auth_params() if (method == 'GET'): authparams.update((paramdict or {})) else: data = json.dumps((paramdict or {})) try: response = self._bugzillasession.request(method, fullurl, data=data, params=authparams) except BugzillaHTTPError as e: self._handle_error(e) return self._handle_response(response.text) def _get(self, *args, **kwargs): return self._op('GET', *args, **kwargs) def _put(self, *args, **kwargs): return self._op('PUT', *args, **kwargs) def _post(self, *args, **kwargs): return self._op('POST', *args, **kwargs) def get_xmlrpc_proxy(self): raise BugzillaError('You are using the bugzilla REST API, so raw XMLRPC access is not provided.') def is_rest(self): return True def bugzilla_version(self): return self._get('/version') def bug_create(self, paramdict): return self._post('/bug', paramdict) def bug_fields(self, paramdict): return self._get('/field/bug', paramdict) def bug_get(self, bug_ids, aliases, paramdict): bug_list = listify(bug_ids) alias_list = listify(aliases) data = paramdict.copy() if ((len((bug_list or [])) + len((alias_list or []))) == 1): for id_list in (bug_list, alias_list): if id_list: return self._get(('/bug/%s' % id_list[0]), data) data['id'] = bug_list data['alias'] = alias_list ret = self._get('/bug', data) return ret def bug_attachment_get(self, attachment_ids, paramdict): ret = {} for attid in listify(attachment_ids): out = self._get(('/bug/attachment/%s' % attid), paramdict) _update_key(ret, out, 'attachments') _update_key(ret, out, 'bugs') return ret def bug_attachment_get_all(self, bug_ids, paramdict): ret = {} for bugid in listify(bug_ids): out = self._get(('/bug/%s/attachment' % bugid), paramdict) _update_key(ret, out, 'attachments') _update_key(ret, out, 'bugs') return ret def bug_attachment_create(self, bug_ids, data, paramdict): if ((data is not None) and ('data' not in paramdict)): paramdict['data'] = base64.b64encode(data).decode('utf-8') paramdict['ids'] = listify(bug_ids) return self._post(('/bug/%s/attachment' % paramdict['ids'][0]), paramdict) def bug_attachment_update(self, attachment_ids, paramdict): paramdict['ids'] = listify(attachment_ids) return self._put(('/bug/attachment/%s' % paramdict['ids'][0]), paramdict) def bug_comments(self, bug_ids, paramdict): ret = {} for bugid in bug_ids: out = self._get(('/bug/%s/comment' % bugid), paramdict) _update_key(ret, out, 'bugs') return ret def bug_history(self, bug_ids, paramdict): ret = {'bugs': []} for bugid in bug_ids: out = self._get(('/bug/%s/history' % bugid), paramdict) ret['bugs'].extend(out.get('bugs', [])) return ret def bug_search(self, paramdict): return self._get('/bug', paramdict) def bug_update(self, bug_ids, paramdict): data = paramdict.copy() data['ids'] = listify(bug_ids) return self._put(('/bug/%s' % data['ids'][0]), data) def bug_update_tags(self, bug_ids, paramdict): raise BugzillaError('No REST API available for bug_update_tags') def component_create(self, paramdict): return self._post('/component', paramdict) def component_update(self, paramdict): if ('ids' in paramdict): apiurl = str(listify(paramdict['ids'])[0]) if ('names' in paramdict): apiurl = ('%(product)s/%(component)s' % listify(paramdict['names'])[0]) return self._put(('/component/%s' % apiurl), paramdict) def externalbugs_add(self, paramdict): raise BugzillaError('No REST API available yet for externalbugs_add') def externalbugs_remove(self, paramdict): raise BugzillaError('No REST API available yet for externalbugs_remove') def externalbugs_update(self, paramdict): raise BugzillaError('No REST API available yet for externalbugs_update') def group_get(self, paramdict): return self._get('/group', paramdict) def product_get(self, paramdict): return self._get('/product/get', paramdict) def product_get_accessible(self): return self._get('/product_accessible') def product_get_enterable(self): return self._get('/product_enterable') def product_get_selectable(self): return self._get('/product_selectable') def user_create(self, paramdict): return self._post('/user', paramdict) def user_get(self, paramdict): return self._get('/user', paramdict) def user_login(self, paramdict): return self._get('/login', paramdict) def user_logout(self): return self._get('/logout') def user_update(self, paramdict): urlid = None if ('ids' in paramdict): urlid = listify(paramdict['ids'])[0] if ('names' in paramdict): urlid = listify(paramdict['names'])[0] return self._put(('/user/%s' % urlid), paramdict)
class LARSOptimizer(tf.train.Optimizer): def __init__(self, learning_rate, momentum=0.9, use_nesterov=False, weight_decay=0.0, exclude_from_weight_decay=None, exclude_from_layer_adaptation=None, classic_momentum=True, eeta=EETA_DEFAULT, name='LARSOptimizer'): super(LARSOptimizer, self).__init__(False, name) self.learning_rate = learning_rate self.momentum = momentum self.weight_decay = weight_decay self.use_nesterov = use_nesterov self.classic_momentum = classic_momentum self.eeta = eeta self.exclude_from_weight_decay = exclude_from_weight_decay if exclude_from_layer_adaptation: self.exclude_from_layer_adaptation = exclude_from_layer_adaptation else: self.exclude_from_layer_adaptation = exclude_from_weight_decay def apply_gradients(self, grads_and_vars, global_step=None, name=None): if (global_step is None): global_step = tf.train.get_or_create_global_step() new_global_step = (global_step + 1) assignments = [] for (grad, param) in grads_and_vars: if ((grad is None) or (param is None)): continue param_name = param.op.name v = tf.get_variable(name=(param_name + '/Momentum'), shape=param.shape.as_list(), dtype=tf.float32, trainable=False, initializer=tf.zeros_initializer()) if self._use_weight_decay(param_name): grad += (self.weight_decay * param) if self.classic_momentum: trust_ratio = 1.0 if self._do_layer_adaptation(param_name): w_norm = tf.norm(param, ord=2) g_norm = tf.norm(grad, ord=2) trust_ratio = tf.where(tf.greater(w_norm, 0), tf.where(tf.greater(g_norm, 0), ((self.eeta * w_norm) / g_norm), 1.0), 1.0) scaled_lr = (self.learning_rate * trust_ratio) next_v = (tf.multiply(self.momentum, v) + (scaled_lr * grad)) if self.use_nesterov: update = (tf.multiply(self.momentum, next_v) + (scaled_lr * grad)) else: update = next_v next_param = (param - update) else: next_v = (tf.multiply(self.momentum, v) + grad) if self.use_nesterov: update = (tf.multiply(self.momentum, next_v) + grad) else: update = next_v trust_ratio = 1.0 if self._do_layer_adaptation(param_name): w_norm = tf.norm(param, ord=2) v_norm = tf.norm(update, ord=2) trust_ratio = tf.where(tf.greater(w_norm, 0), tf.where(tf.greater(v_norm, 0), ((self.eeta * w_norm) / v_norm), 1.0), 1.0) scaled_lr = (trust_ratio * self.learning_rate) next_param = (param - (scaled_lr * update)) assignments.extend([param.assign(next_param), v.assign(next_v), global_step.assign(new_global_step)]) return tf.group(*assignments, name=name) def _use_weight_decay(self, param_name): if (not self.weight_decay): return False if self.exclude_from_weight_decay: for r in self.exclude_from_weight_decay: if (re.search(r, param_name) is not None): return False return True def _do_layer_adaptation(self, param_name): if self.exclude_from_layer_adaptation: for r in self.exclude_from_layer_adaptation: if (re.search(r, param_name) is not None): return False return True
def train(args, generator, discriminator_photo, discriminator_cari, discriminator_feat_p, discriminator_feat_c, g_optim, d_optim_p, d_optim_c, d_optim_fp, d_optim_fc, g_ema, p_cls, c_cls, id_net, device): pbar = range(args.iter) if (get_rank() == 0): if (not os.path.exists(f'checkpoint/{args.name}')): os.makedirs(f'checkpoint/{args.name}') if (not os.path.exists(f'sample/{args.name}')): os.makedirs(f'sample/{args.name}') pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.01) d_loss_val = 0 d_feat_loss_val = 0 r1_loss = torch.tensor(0.0, device=device) gan_loss_val = 0 gan_feat_loss_val = 0 idt_loss_val = 0 attr_loss_val = 0 loss_dict = {} if args.distributed: g_module = generator.module d_module_p = discriminator_photo.module d_module_c = discriminator_cari.module d_module_feat_p = discriminator_feat_p.module d_module_feat_c = discriminator_feat_c.module else: g_module = generator d_module_p = discriminator_photo d_module_c = discriminator_cari d_module_feat_p = discriminator_feat_p d_module_feat_c = discriminator_feat_c accum = (0.5 ** (32 / (10 * 1000))) sample_z = torch.randn(args.n_sample, args.latent, device=device) criterion_BCE = nn.BCELoss() for idx in pbar: i = (idx + args.start_iter) if (i > args.iter): print('Done!') break '\n Discriminator for feat cari\n ' requires_grad(generator, False) requires_grad(discriminator_feat_c, True) noise = mixing_noise(args.batch, args.latent, args.mixing, device) noise_fine = mixing_noise(args.batch, args.latent, args.mixing, device) ret = generator(noise, noise_fine, truncation_latent=mean_latent, mode='p2c') fake_feat = ret['co'] real_feat = ret['gt_co'].detach() fake_pred = discriminator_feat_c(fake_feat) real_pred = discriminator_feat_c(real_feat) d_loss = d_logistic_loss(real_pred, fake_pred) loss_dict['d_feat_c'] = d_loss loss_dict['real_score_feat_c'] = real_pred.mean() loss_dict['fake_score_feat_c'] = fake_pred.mean() discriminator_feat_c.zero_grad() d_loss.backward() d_optim_fc.step() d_regularize = ((i % args.d_reg_every) == 0) if d_regularize: real_feat.requires_grad = True real_pred = discriminator_feat_c(real_feat) r1_loss = d_r1_loss(real_pred, real_feat) discriminator_feat_c.zero_grad() ((((args.r1 / 2) * r1_loss) * args.d_reg_every) + (0 * real_pred[0])).backward() d_optim_fc.step() '\n Discriminator for feat photo\n ' requires_grad(generator, False) requires_grad(discriminator_feat_p, True) noise = mixing_noise(args.batch, args.latent, args.mixing, device) noise_fine = mixing_noise(args.batch, args.latent, args.mixing, device) ret = generator(noise, noise_fine, truncation_latent=mean_latent, mode='c2p') fake_feat = ret['po'] real_feat = ret['gt_po'].detach() fake_pred = discriminator_feat_p(fake_feat) real_pred = discriminator_feat_p(real_feat) d_loss = d_logistic_loss(real_pred, fake_pred) loss_dict['d_feat_p'] = d_loss loss_dict['real_score_feat_p'] = real_pred.mean() loss_dict['fake_score_feat_p'] = fake_pred.mean() discriminator_feat_p.zero_grad() d_loss.backward() d_optim_fp.step() d_regularize = ((i % args.d_reg_every) == 0) if d_regularize: real_feat.requires_grad = True real_pred = discriminator_feat_p(real_feat) r1_loss = d_r1_loss(real_pred, real_feat) discriminator_feat_p.zero_grad() ((((args.r1 / 2) * r1_loss) * args.d_reg_every) + (0 * real_pred[0])).backward() d_optim_fp.step() '\n Discriminator for cari\n ' requires_grad(generator, False) requires_grad(discriminator_cari, True) noise = mixing_noise(args.batch, args.latent, args.mixing, device) noise_fine = mixing_noise(args.batch, args.latent, args.mixing, device) fake_img = generator(noise, noise_fine, truncation_latent=mean_latent, mode='p2c')['result'] real_img = generator(noise, noise_fine, truncation_latent=mean_latent, mode='c_gt') real_img = real_img.detach() fake_pred = discriminator_cari(fake_img) real_pred = discriminator_cari(real_img) d_loss = d_logistic_loss(real_pred, fake_pred) loss_dict['d_c'] = d_loss loss_dict['real_score_c'] = real_pred.mean() loss_dict['fake_score_c'] = fake_pred.mean() discriminator_cari.zero_grad() d_loss.backward() d_optim_c.step() d_regularize = ((i % args.d_reg_every) == 0) if d_regularize: real_img.requires_grad = True real_pred = discriminator_cari(real_img) r1_loss = d_r1_loss(real_pred, real_img) discriminator_cari.zero_grad() ((((args.r1 / 2) * r1_loss) * args.d_reg_every) + (0 * real_pred[0])).backward() d_optim_c.step() '\n Discriminator for photo\n ' requires_grad(generator, False) requires_grad(discriminator_photo, True) noise = mixing_noise(args.batch, args.latent, args.mixing, device) noise_fine = mixing_noise(args.batch, args.latent, args.mixing, device) fake_img = generator(noise, noise_fine, truncation_latent=mean_latent, mode='c2p')['result'] real_img = generator(noise, noise_fine, truncation_latent=mean_latent, mode='p_gt') real_img = real_img.detach() fake_pred = discriminator_photo(fake_img) real_pred = discriminator_photo(real_img) d_loss = d_logistic_loss(real_pred, fake_pred) loss_dict['d_p'] = d_loss loss_dict['real_score_p'] = real_pred.mean() loss_dict['fake_score_p'] = fake_pred.mean() discriminator_photo.zero_grad() d_loss.backward() d_optim_p.step() d_regularize = ((i % args.d_reg_every) == 0) if d_regularize: real_img.requires_grad = True real_pred = discriminator_photo(real_img) r1_loss = d_r1_loss(real_pred, real_img) discriminator_photo.zero_grad() ((((args.r1 / 2) * r1_loss) * args.d_reg_every) + (0 * real_pred[0])).backward() d_optim_p.step() loss_dict['r1'] = r1_loss if args.distributed: requires_grad(generator.module.deformation_blocks_CP, True) requires_grad(generator.module.deformation_blocks_PC, True) else: requires_grad(generator.deformation_blocks_CP, True) requires_grad(generator.deformation_blocks_PC, True) requires_grad(discriminator_photo, False) requires_grad(discriminator_cari, False) requires_grad(discriminator_feat_p, False) requires_grad(discriminator_feat_c, False) noise = mixing_noise(args.batch, args.latent, args.mixing, device) ret_p2c = generator(noise, truncation_latent=mean_latent, mode='p2c') ret_p2c_recon = generator(noise, truncation_latent=mean_latent, mode='p2c_recon') ret_c2p = generator(noise, truncation_latent=mean_latent, mode='c2p') cyc_loss_p2c = 0 cyc_loss_c2p = 0 for lv in range(len(ret_p2c['po'])): cyc_loss_p2c += F.mse_loss(ret_p2c['po'][lv], ret_p2c['ro'][lv]) cyc_loss_c2p += F.mse_loss(ret_c2p['co'][lv], ret_c2p['ro'][lv]) cyc_loss = ((cyc_loss_p2c + cyc_loss_c2p) / 2) attr_p_p2c = p_cls(ret_p2c['org']).detach() attr_c_p2c = c_cls(ret_p2c['result']) attr_loss_p2c = criterion_BCE(attr_c_p2c, attr_p_p2c) attr_c_c2p = c_cls(ret_c2p['org']).detach() attr_p_c2p = p_cls(ret_c2p['result']) attr_loss_c2p = criterion_BCE(attr_p_c2p, attr_c_c2p) attr_loss = ((attr_loss_p2c + attr_loss_c2p) / 2) fake_pred_photo = discriminator_photo(ret_c2p['result']) fake_pred_cari = discriminator_cari(ret_p2c['result']) gan_loss_p2c = g_nonsaturating_loss(fake_pred_cari) gan_loss_c2p = g_nonsaturating_loss(fake_pred_photo) gan_loss = ((gan_loss_p2c + gan_loss_c2p) / 2) fake_feat_photo = discriminator_feat_p(ret_c2p['po']) fake_feat_cari = discriminator_feat_c(ret_p2c['co']) gan_feat_loss_p2c = g_nonsaturating_loss(fake_feat_cari) gan_feat_loss_c2p = g_nonsaturating_loss(fake_feat_photo) gan_feat_loss = ((gan_feat_loss_p2c + gan_feat_loss_c2p) / 2) cyc_id_loss = F.mse_loss(id_net(ret_p2c_recon['result']), id_net(ret_p2c_recon['org']).detach()) g_loss = ((((((10 * gan_loss) + (10 * cyc_loss)) + gan_feat_loss) + gan_feat_loss) + (10 * attr_loss)) + (10000 * cyc_id_loss)) loss_dict['gan'] = gan_loss loss_dict['cyc'] = cyc_loss loss_dict['attr'] = attr_loss loss_dict['feat'] = gan_feat_loss loss_dict['idt'] = cyc_id_loss generator.zero_grad() g_loss.backward() g_optim.step() accumulate(g_ema, g_module, accum) loss_reduced = reduce_loss_dict(loss_dict) d_loss_p_val = loss_reduced['d_p'].mean().item() d_loss_c_val = loss_reduced['d_c'].mean().item() gan_loss_val = loss_reduced['gan'].mean().item() cyc_loss_val = loss_reduced['cyc'].mean().item() feat_loss_val = loss_reduced['feat'].mean().item() attr_loss_val = loss_reduced['attr'].mean().item() idt_loss_val = loss_reduced['idt'].mean().item() r1_val = loss_reduced['r1'].mean().item() real_score_p_val = loss_reduced['real_score_p'].mean().item() fake_score_p_val = loss_reduced['fake_score_p'].mean().item() real_score_c_val = loss_reduced['real_score_c'].mean().item() fake_score_c_val = loss_reduced['fake_score_c'].mean().item() if (get_rank() == 0): pbar.set_description(f'd_p: {d_loss_p_val:.4f}; d_c: {d_loss_c_val:.4f}; g: {gan_loss_val:.4f}, {cyc_loss_val:.4f}, {feat_loss_val:.4f}, {attr_loss_val:.4f}, {idt_loss_val:.4f}; r1: {r1_val:.4f}; ') if ((i % 100) == 0): with torch.no_grad(): g_ema.eval() ret = g_ema([sample_z], truncation_latent=mean_latent, mode='p2c') utils.save_image(ret['result'], f'sample/{args.name}/p2c_exg_{str(i).zfill(6)}.png', nrow=int((args.n_sample ** 0.5)), normalize=True, range=((- 1), 1)) utils.save_image(ret['org'], f'sample/{args.name}/p2c_gt_{str(i).zfill(6)}.png', nrow=int((args.n_sample ** 0.5)), normalize=True, range=((- 1), 1)) ret = g_ema([sample_z], truncation_latent=mean_latent, mode='c2p') utils.save_image(ret['result'], f'sample/{args.name}/c2p_exg_{str(i).zfill(6)}.png', nrow=int((args.n_sample ** 0.5)), normalize=True, range=((- 1), 1)) utils.save_image(ret['org'], f'sample/{args.name}/c2p_gt_{str(i).zfill(6)}.png', nrow=int((args.n_sample ** 0.5)), normalize=True, range=((- 1), 1)) if ((i % 1000) == 0): torch.save({'g': g_module.state_dict(), 'd_p': d_module_p.state_dict(), 'd_c': d_module_c.state_dict(), 'd_feat_p': d_module_feat_p.state_dict(), 'd_feat_c': d_module_feat_c.state_dict(), 'g_ema': g_ema.state_dict(), 'g_optim': g_optim.state_dict(), 'd_optim_p': d_optim_p.state_dict(), 'd_optim_c': d_optim_c.state_dict(), 'd_optim_fp': d_optim_fp.state_dict(), 'd_optim_fc': d_optim_fc.state_dict(), 'args': args}, f'checkpoint/{args.name}/{str(i).zfill(6)}.pt')
class _TestingThread(threading.Thread): def __init__(self): super(_TestingThread, self).__init__() self.results = [] self.exc = None def run(self): try: with mssqlconn() as mssql: for i in range(0, 1000): num = mssql.execute_scalar('SELECT %d', (i,)) assert (num == i) self.results.append(num) except Exception as exc: self.exc = exc
def test_mws_xml_to_dotdict_resultkey(simple_xml_response_str): output = mws_xml_to_dotdict(simple_xml_response_str, result_key='ListMatchingProductsResult') assert isinstance(output, DotDict) assert isinstance(output, dict) assert ('ListMatchingProductsResult' not in output) assert ('ResponseMetadata' not in output) identifiers = output.Products.Product[0].Identifiers assert (identifiers.MarketplaceASIN.MarketplaceId == 'APJ6JRA9NG5V4') assert (identifiers.MarketplaceASIN.ASIN == '')
def dump_gl(context=None): if (context is not None): info = context.get_info() else: from pyglet.gl import gl_info as info print('gl_info.get_version():', info.get_version()) print('gl_info.get_vendor():', info.get_vendor()) print('gl_info.get_renderer():', info.get_renderer())
.testinfra_hosts('docker://rockylinux9', 'ssh://rockylinux9') def test_docker_encoding(host): encoding = host.check_output("python3 -c 'import locale;print(locale.getpreferredencoding())'") assert (encoding == 'UTF-8') string = 'teinfra seak u8' assert (host.check_output('echo %s | tee /tmp/s.txt', string) == string) assert (host.file('/tmp/s.txt').content_string.strip() == string)
class TestFunctional(): def test_fail_to_ok(self, pytester: pytest.Pytester) -> None: p = pytester.makepyfile(textwrap.dedent('\n def test_one():\n x = 0\n assert x == 1\n ')) child = pytester.spawn_pytest(('-f %s --traceconfig' % p), expect_timeout=30.0) child.expect('def test_one') child.expect('x == 1') child.expect('1 failed') child.expect('### LOOPONFAILING ####') child.expect('waiting for changes') p.write_text(textwrap.dedent('\n def test_one():\n x = 1\n assert x == 1\n ')) child.expect('.*1 passed.*') child.kill(15) def test_xfail_passes(self, pytester: pytest.Pytester) -> None: p = pytester.makepyfile(textwrap.dedent('\n import pytest\n .xfail\n def test_one():\n pass\n ')) child = pytester.spawn_pytest(('-f %s' % p), expect_timeout=30.0) child.expect('1 xpass') child.expect('waiting for changes') child.kill(15)
class MetricWrapper(Metric): def isAggregate(self): return self.aggregate def getTags(self): return self.tags '\n This method does nothing and therefore keeps the existing metric unchanged.\n ' def processDefaultMetric(self): self.tags = {} self.aggregate = False "\n Processes a metric of the CPUCollector. It stores the cpuId in a tag and\n marks all metrics with 'total' as aggregates, so they can be skipped if\n the skipAggregates feature is active.\n " def processCpuMetric(self): if (len(self.getMetricPath().split('.')) > 1): self.aggregate = (self.getMetricPath().split('.')[0] == 'total') cpuId = self.delegate.getMetricPath().split('.')[0] self.tags['cpuId'] = cpuId self.path = self.path.replace((('.' + cpuId) + '.'), '.') "\n Processes metrics of the HaProxyCollector. It stores the backend and the\n server to which the backends send as tags. Counters with 'backend' as\n backend name are considered aggregates.\n " def processHaProxyMetric(self): if (len(self.getMetricPath().split('.')) == 3): self.aggregate = (self.getMetricPath().split('.')[1] == 'backend') backend = self.delegate.getMetricPath().split('.')[0] server = self.delegate.getMetricPath().split('.')[1] self.tags['backend'] = backend self.tags['server'] = server self.path = self.path.replace((('.' + server) + '.'), '.') self.path = self.path.replace((('.' + backend) + '.'), '.') '\n Processes metrics of the DiskspaceCollector. It stores the mountpoint as a\n tag. There are no aggregates in this collector.\n ' def processDiskspaceMetric(self): if (len(self.getMetricPath().split('.')) == 2): mountpoint = self.delegate.getMetricPath().split('.')[0] self.tags['mountpoint'] = mountpoint self.path = self.path.replace((('.' + mountpoint) + '.'), '.') '\n Processes metrics of the DiskusageCollector. It stores the device as a\n tag. There are no aggregates in this collector.\n ' def processDiskusageMetric(self): if (len(self.getMetricPath().split('.')) == 2): device = self.delegate.getMetricPath().split('.')[0] self.tags['device'] = device self.path = self.path.replace((('.' + device) + '.'), '.') '\n Processes metrics of the NetworkCollector. It stores the interface as a\n tag. There are no aggregates in this collector.\n ' def processNetworkMetric(self): if (len(self.getMetricPath().split('.')) == 2): interface = self.delegate.getMetricPath().split('.')[0] self.tags['interface'] = interface self.path = self.path.replace((('.' + interface) + '.'), '.') def processMattermostMetric(self): split = self.getMetricPath().split('.') if (len(split) > 2): if ((split[0] == 'teamdetails') or (split[0] == 'channeldetails')): team = split[1] self.tags['team'] = team self.path = self.path.replace((('.' + team) + '.'), '.') if (split[0] == 'channeldetails'): channel = split[2] self.tags['channel'] = channel self.path = self.path.replace((('.' + channel) + '.'), '.') if (split[0] == 'userdetails'): user = split[1] team = split[2] channel = split[3] self.tags['user'] = user self.tags['team'] = team self.tags['channel'] = channel self.path = self.path.replace((('.' + user) + '.'), '.') self.path = self.path.replace((('.' + team) + '.'), '.') self.path = self.path.replace((('.' + channel) + '.'), '.') handlers = {'cpu': processCpuMetric, 'haproxy': processHaProxyMetric, 'mattermost': processMattermostMetric, 'diskspace': processDiskspaceMetric, 'iostat': processDiskusageMetric, 'network': processNetworkMetric, 'default': processDefaultMetric} def __init__(self, delegate, logger): self.path = delegate.path self.value = delegate.value self.host = delegate.host self.raw_value = delegate.raw_value self.timestamp = delegate.timestamp self.precision = delegate.precision self.ttl = delegate.ttl self.metric_type = delegate.metric_type self.delegate = delegate self.tags = {} self.aggregate = False self.newMetricName = None self.logger = logger handler = self.handlers.get(self.getCollectorPath(), self.handlers['default']) handler(self)
class GridPlot(AbstractPlot): def __init__(self, columns=3, *plots): super(GridPlot, self).__init__() self.plots = plots self.columns = columns self.rows = (((len(plots) + self.columns) - 1) // self.columns) width = max([elem.figsize[0] for elem in self.plots]) height = max([elem.figsize[1] for elem in self.plots]) self.figsize = ((self.columns * width), (self.rows * height)) self.one_figsize = (width, height) self.new_plot = True def _plot(self): for (i, plotter) in enumerate(self.plots): plt.subplot(self.rows, self.columns, (i + 1)) plotter.plot(fontsize=self.fontsize_, show_legend=self.show_legend_) def _plot_bokeh(self, current_plot, show_legend=True): from bokeh import models import bokeh.plotting as bkh from bokeh.core.properties import value lst = [] row_lst = [] for plotter in self.plots: cur_plot = bkh.figure(title=plotter.title, plot_width=(self.one_figsize[0] * self.BOKEH_RESIZE), plot_height=(self.one_figsize[1] * self.BOKEH_RESIZE)) if (plotter.xlim is not None): cur_plot.x_range = models.Range1d(start=plotter.xlim[0], end=plotter.xlim[1]) if (plotter.ylim is not None): cur_plot.y_range = models.Range1d(start=plotter.ylim[0], end=plotter.ylim[1]) cur_plot.title_text_font_size = value('{}pt'.format(plotter.fontsize)) cur_plot.xaxis.axis_label = plotter.xlabel cur_plot.yaxis.axis_label = plotter.ylabel cur_plot.legend.orientation = 'top_right' cur_plot = plotter._plot_bokeh(cur_plot, show_legend=show_legend) if (len(row_lst) >= self.columns): lst.append(row_lst) row_lst = [] row_lst.append(cur_plot) if (len(row_lst) > 0): lst.append(row_lst) grid = models.GridPlot(children=lst) return grid def _plot_tmva(self): raise NotImplementedError('Not supported for TMVA')
class TestPep420Namespaces(): def test_namespace_package_importable(self, venv, tmp_path, editable_opts): pkg_A = namespaces.build_pep420_namespace_package(tmp_path, 'myns.n.pkgA') pkg_B = namespaces.build_pep420_namespace_package(tmp_path, 'myns.n.pkgB') opts = editable_opts[:] opts.append('--no-build-isolation') venv.run(['python', '-m', 'pip', 'install', str(pkg_A), *opts]) venv.run(['python', '-m', 'pip', 'install', '-e', str(pkg_B), *opts]) venv.run(['python', '-c', 'import myns.n.pkgA; import myns.n.pkgB']) def test_namespace_created_via_package_dir(self, venv, tmp_path, editable_opts): files = {'pkgA': {'pyproject.toml': dedent(' [build-system]\n requires = ["setuptools", "wheel"]\n build-backend = "setuptools.build_meta"\n\n [project]\n name = "pkgA"\n version = "3.14159"\n\n [tool.setuptools]\n package-dir = {"myns.n.pkgA" = "src"}\n '), 'src': {'__init__.py': 'a = 1'}}} jaraco.path.build(files, prefix=tmp_path) pkg_A = (tmp_path / 'pkgA') pkg_B = namespaces.build_pep420_namespace_package(tmp_path, 'myns.n.pkgB') pkg_C = namespaces.build_pep420_namespace_package(tmp_path, 'myns.n.pkgC') opts = editable_opts[:] opts.append('--no-build-isolation') venv.run(['python', '-m', 'pip', 'install', str(pkg_A), *opts]) venv.run(['python', '-m', 'pip', 'install', '-e', str(pkg_B), *opts]) venv.run(['python', '-m', 'pip', 'install', '-e', str(pkg_C), *opts]) venv.run(['python', '-c', 'from myns.n import pkgA, pkgB, pkgC']) def test_namespace_accidental_config_in_lenient_mode(self, venv, tmp_path): files = {'pkgA': {'pyproject.toml': dedent(' [build-system]\n requires = ["setuptools", "wheel"]\n build-backend = "setuptools.build_meta"\n\n [project]\n name = "pkgA"\n version = "3.14159"\n\n [tool.setuptools]\n packages.find.include = ["mypkg.*"]\n '), 'mypkg': {'__init__.py': '', 'other.py': 'b = 1', 'n': {'__init__.py': '', 'pkgA.py': 'a = 1'}}, 'MANIFEST.in': EXAMPLE['MANIFEST.in']}} jaraco.path.build(files, prefix=tmp_path) pkg_A = (tmp_path / 'pkgA') opts = ['--no-build-isolation'] venv.run(['python', '-m', 'pip', '-v', 'install', '-e', str(pkg_A), *opts]) out = venv.run(['python', '-c', 'from mypkg.n import pkgA; print(pkgA.a)']) assert (str(out, 'utf-8').strip() == '1') cmd = ' try:\n import mypkg.other\n except ImportError:\n print("mypkg.other not defined")\n ' out = venv.run(['python', '-c', dedent(cmd)]) assert ('mypkg.other not defined' in str(out, 'utf-8'))
_model_architecture('linformer_roberta', 'linformer_roberta_large') def linformer_roberta_large_architecture(args): args.encoder_layers = getattr(args, 'encoder_layers', 24) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.0) args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0) args.compressed = getattr(args, 'compressed', 4) args.shared_kv_compressed = getattr(args, 'shared_kv_compressed', 0) args.shared_layer_kv_compressed = getattr(args, 'shared_layer_kv_compressed', 0)
class MemoryService(object): def __init__(self, config): self._config = config self._page_size = os.sysconf('SC_PAGE_SIZE') self._root_path = '/sys/kernel' if os.getenv('JTOP_TESTING', False): self._root_path = '/fake_sys/kernel' logger.warning('Running in JTOP_TESTING folder={root_dir}'.format(root_dir=self._root_path)) self._is_emc = (True if read_emc(self._root_path) else False) if self._is_emc: logger.info('Found EMC!') logger.info('Memory service started') def swap_path(self): config = self._config.get('swap', {}) directory = config.get('directory', CONFIG_DEFAULT_SWAP_DIRECTORY) swap_name = config.get('name', CONFIG_DEFAULT_SWAP_NAME) return '{directory}/{name}'.format(directory=directory, name=swap_name) def clear_cache(self): clear_cache = Command(['sysctl', 'vm.drop_caches=3']) out = clear_cache() return (True if out else False) def swap_set(size, path_swap, on_boot): if os.path.isfile(path_swap): logger.error('{path_swap} already exist'.format(path_swap=path_swap)) return logger.info('Activate {path_swap} auto={on_boot}'.format(path_swap=path_swap, on_boot=on_boot)) sp.call(shlex.split('fallocate -l {size}G {path_swap}'.format(size=size, path_swap=path_swap))) os.chmod(path_swap, (stat.S_IREAD | stat.S_IWRITE)) sp.call(shlex.split('mkswap {path_swap}'.format(path_swap=path_swap))) sp.call(shlex.split('swapon {path_swap}'.format(path_swap=path_swap))) if (not on_boot): return swap_string_boot = '{path_swap} none swap sw 0 0'.format(path_swap=path_swap) fstab = read_fstab() if (path_swap in fstab): logger.warn('{path_swap} Already on boot'.format(path_swap=path_swap)) return file_object = open(PATH_FSTAB, 'a') file_object.write('{swap_string_boot}\n'.format(swap_string_boot=swap_string_boot)) file_object.close() def swap_deactivate(path_swap): if (not os.path.isfile(path_swap)): logger.error('{path_swap} Does not exist'.format(path_swap=path_swap)) return sp.call(shlex.split('swapoff {path_swap}'.format(path_swap=path_swap))) os.remove(path_swap) logger.info('Deactivate {path_swap}'.format(path_swap=path_swap)) swap_string_boot = '{path_swap} none swap sw 0 0'.format(path_swap=path_swap) fstab = read_fstab() if (path_swap not in fstab): return logger.info('Removing {path_swap} from fstab'.format(path_swap=path_swap)) with open(PATH_FSTAB, 'r') as f: lines = f.readlines() with open(PATH_FSTAB, 'w') as f: for line in lines: if (line.strip('\n') != swap_string_boot): f.write(line) logger.info('Removed {path_swap} from boot'.format(path_swap=path_swap)) def get_status(self, mem_total): memory = {} mem_size = buddyinfo(self._page_size) large_free_bank = 0 for (_, data) in mem_size.items(): large_free_bank += sum([zone['nr_free'][(- 1)] for zone in data]) status_mem = meminfo() ram_shared = status_mem.get('NvMapMemUsed', 0) if mem_total: ram_shared = (mem_total if (ram_shared == 0) else ram_shared) ram_total = status_mem.get('MemTotal', 0) ram_free = status_mem.get('MemFree', 0) ram_buffer = status_mem.get('Buffers', 0) ram_cached = status_mem.get('Cached', 0) ram_SReclaimable = status_mem.get('SReclaimable', 0) total_used_memory = (ram_total - ram_free) cached_memory = (ram_cached + ram_SReclaimable) memory['RAM'] = {'tot': ram_total, 'used': (total_used_memory - (ram_buffer + ram_cached)), 'free': ram_free, 'buffers': ram_buffer, 'cached': cached_memory, 'shared': ram_shared, 'lfb': large_free_bank} swap_total = status_mem.get('SwapTotal', 0) swap_free = status_mem.get('SwapFree', 0) swap_cached = status_mem.get('SwapCached', 0) swap_table = read_swapon() fstab = read_fstab() for name in swap_table: swap_table[name]['boot'] = (name in fstab) memory['SWAP'] = {'tot': swap_total, 'used': (swap_total - swap_free), 'cached': swap_cached, 'table': swap_table} if self._is_emc: memory['EMC'] = read_emc(self._root_path) memory['EMC']['online'] = True utilization = int(cat((self._root_path + '/actmon_avg_activity/mc_all'))) memory['EMC']['val'] = (utilization // memory['EMC']['cur']) if os.path.isdir((self._root_path + '/debug/nvmap/iram')): size = 0 if os.path.isfile((self._root_path + '/debug/nvmap/iram/size')): size = (int(cat((self._root_path + '/debug/nvmap/iram/size')), 16) // 1024) (used_total, _) = read_process_table((self._root_path + '/debug/nvmap/iram/clients')) memory['IRAM'] = {'tot': size, 'used': used_total, 'lfb': (size - used_total)} return memory
def duplicate_module(module_file: Union[(str, os.PathLike)], old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, dest_file: Optional[str]=None, add_copied_from: bool=True): if (dest_file is None): dest_file = str(module_file).replace(old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased) with open(module_file, 'r', encoding='utf-8') as f: content = f.read() objects = parse_module_content(content) new_objects = [] for obj in objects: if ('PRETRAINED_CONFIG_ARCHIVE_MAP = {' in obj): obj = (((f'{new_model_patterns.model_upper_cased}_PRETRAINED_CONFIG_ARCHIVE_MAP = ' + '{') + f''' "{new_model_patterns.checkpoint}": " ''') + '}\n') new_objects.append(obj) continue elif ('PRETRAINED_MODEL_ARCHIVE_LIST = [' in obj): if obj.startswith('TF_'): prefix = 'TF_' elif obj.startswith('FLAX_'): prefix = 'FLAX_' else: prefix = '' obj = f'''{prefix}{new_model_patterns.model_upper_cased}_PRETRAINED_MODEL_ARCHIVE_LIST = [ "{new_model_patterns.checkpoint}", # See all {new_model_patterns.model_name} models at ] ''' new_objects.append(obj) continue special_pattern = False for (pattern, attr) in SPECIAL_PATTERNS.items(): if (pattern in obj): obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr)) new_objects.append(obj) special_pattern = True break if special_pattern: continue old_obj = obj (obj, replacement) = replace_model_patterns(obj, old_model_patterns, new_model_patterns) has_copied_from = (re.search('^#\\s+Copied from', obj, flags=re.MULTILINE) is not None) if (add_copied_from and (not has_copied_from) and (_re_class_func.search(obj) is not None) and (len(replacement) > 0)): module_name = get_module_from_file(module_file) old_object_name = _re_class_func.search(old_obj).groups()[0] obj = add_content_to_text(obj, f'# Copied from {module_name}.{old_object_name} with {replacement}', add_before=_re_class_func) obj = re.sub('\n[ ]+# Copied from [^\n]*\n', '\n', obj) new_objects.append(obj) with open(dest_file, 'w', encoding='utf-8') as f: content = f.write('\n'.join(new_objects))
class ViTImageProcessingTester(unittest.TestCase): def __init__(self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]): size = (size if (size is not None) else {'height': 18, 'width': 18}) self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return {'image_mean': self.image_mean, 'image_std': self.image_std, 'do_normalize': self.do_normalize, 'do_resize': self.do_resize, 'size': self.size}
class VQA2Dataset(BaseDataset): def __init__(self, dataset_type, imdb_file_index, config, *args, **kwargs): super().__init__('vqa2', dataset_type, config) imdb_files = self.config.imdb_files if (dataset_type not in imdb_files): raise ValueError('Dataset type {} is not present in imdb_files of dataset config'.format(dataset_type)) self.imdb_file = imdb_files[dataset_type][imdb_file_index] self.imdb_file = self._get_absolute_path(self.imdb_file) self.imdb = ImageDatabase(self.imdb_file) self.kwargs = kwargs self.image_depth_first = self.config.image_depth_first self._should_fast_read = self.config.fast_read self.use_ocr = self.config.use_ocr self.use_ocr_info = self.config.use_ocr_info self._use_features = False if hasattr(self.config, 'image_features'): self._use_features = True self.features_max_len = self.config.features_max_len self._return_info = self.config.get('return_info', True) all_image_feature_dirs = self.config.image_features[dataset_type] curr_image_features_dir = all_image_feature_dirs[imdb_file_index] curr_image_features_dir = curr_image_features_dir.split(',') curr_image_features_dir = self._get_absolute_path(curr_image_features_dir) self.features_db = FeaturesDataset('coco', directories=curr_image_features_dir, depth_first=self.image_depth_first, max_features=self.features_max_len, fast_read=self._should_fast_read, imdb=self.imdb, return_info=self._return_info) def _get_absolute_path(self, paths): if isinstance(paths, list): return [self._get_absolute_path(path) for path in paths] elif isinstance(paths, str): if (not os.path.isabs(paths)): pythia_root = get_pythia_root() paths = os.path.join(pythia_root, self.config.data_root_dir, paths) return paths else: raise TypeError('Paths passed to dataset should either be string or list') def __len__(self): return len(self.imdb) def try_fast_read(self): if (self._dataset_type == 'test'): return if (hasattr(self, '_should_fast_read') and (self._should_fast_read is True)): self.writer.write('Starting to fast read {} {} dataset'.format(self._name, self._dataset_type)) self.cache = {} for idx in tqdm.tqdm(range(len(self.imdb)), miniters=100, disable=(not is_main_process())): self.cache[idx] = self.load_item(idx) def get_item(self, idx): if ((self._should_fast_read is True) and (self._dataset_type != 'test')): return self.cache[idx] else: return self.load_item(idx) def load_item(self, idx): sample_info = self.imdb[idx] current_sample = Sample() if ('question_tokens' in sample_info): text_processor_argument = {'tokens': sample_info['question_tokens']} else: text_processor_argument = {'text': sample_info['question']} processed_question = self.text_processor(text_processor_argument) current_sample.question_tokens = sample_info['question_tokens'] current_sample.text = processed_question['text'] current_sample.question_id = torch.tensor(sample_info['question_id'], dtype=torch.int) if isinstance(sample_info['image_id'], int): current_sample.image_id = torch.tensor(sample_info['image_id'], dtype=torch.int) else: current_sample.image_id = sample_info['image_id'] current_sample.text_len = torch.tensor(len(sample_info['question_tokens']), dtype=torch.int) if (self._use_features is True): features = self.features_db[idx] current_sample.update(features) current_sample = self.add_ocr_details(sample_info, current_sample) current_sample = self.add_answer_info(sample_info, current_sample) return current_sample def add_ocr_details(self, sample_info, sample): if self.use_ocr: ocr_tokens = [self.ocr_token_processor({'text': token})['text'] for token in sample_info['ocr_tokens']] context = self.context_processor({'tokens': ocr_tokens}) sample.context = context['text'] sample.context_tokens = context['tokens'] sample.context_feature_0 = context['text'] sample.context_info_0 = Sample() sample.context_info_0.max_features = context['length'] order_vectors = torch.eye(len(sample.context_tokens)) order_vectors[context['length']:] = 0 sample.order_vectors = order_vectors if (self.use_ocr_info and ('ocr_info' in sample_info)): sample.ocr_bbox = self.bbox_processor({'info': sample_info['ocr_info']})['bbox'] return sample def add_answer_info(self, sample_info, sample): if ('answers' in sample_info): answers = sample_info['answers'] answer_processor_arg = {'answers': answers} if self.use_ocr: answer_processor_arg['tokens'] = sample_info['ocr_tokens'] processed_soft_copy_answers = self.answer_processor(answer_processor_arg) sample.answers = processed_soft_copy_answers['answers'] sample.targets = processed_soft_copy_answers['answers_scores'] return sample def idx_to_answer(self, idx): return self.answer_processor.convert_idx_to_answer(idx) def format_for_evalai(self, report): answers = report.scores.argmax(dim=1) predictions = [] answer_space_size = self.answer_processor.get_true_vocab_size() for (idx, question_id) in enumerate(report.question_id): answer_id = answers[idx].item() if (answer_id >= answer_space_size): answer_id -= answer_space_size answer = report.context_tokens[idx][answer_id] if (answer == self.context_processor.PAD_TOKEN): answer = 'unanswerable' else: answer = self.answer_processor.idx2word(answer_id) actual_answer = report.answers[idx] predictions.append({'question_id': question_id.item(), 'answer': answer, 'actual_answers': actual_answer, 'question_tokens': report.question_tokens[idx], 'image_id': report.image_id[idx].item()}) return predictions
def main(): subprocess.run(['mkdocs', 'build'], check=True) hti = Html2Image(custom_flags=['--force-device-scale-factor=2']) html_str = Path('docs/diagram.md').read_text() css_tags = f''' <style>{Path('site/css/theme.css').read_text()}</style> <style>{Path('site/css/theme_extra.css').read_text()}</style> <style>{Path('site/extra.css').read_text()}</style> <style> body {{ background: white; }} </style> ''' html_str = (css_tags + html_str) [screenshot, *_] = hti.screenshot(html_str=html_str, size=(830, 405)) dest_path = Path('docs/data/how-it-works.png') if dest_path.exists(): dest_path.unlink() Path(screenshot).rename(dest_path)
def _pack(binary): data_size = (binary.dtype.itemsize * binary.shape[0]) out_size = data_size out = cp.empty_like(binary, dtype=cp.ubyte, shape=out_size) (threadsperblock, blockspergrid) = _get_tpb_bpg() k_type = 'pack' _populate_kernel_cache(out.dtype, k_type) kernel = _get_backend_kernel(out.dtype, blockspergrid, threadsperblock, k_type) kernel(out_size, binary, out) _print_atts(kernel) del binary return out
class TestEPSL1B(BaseTestCaseEPSL1B): def setUp(self): self.scan_lines = 1080 self.earth_views = 2048 sections = self._create_structure() sections[('mphr', 0)]['TOTAL_MDR'] = ((b'TOTAL_MDR = ' + bytes(str(self.scan_lines), encoding='ascii')) + b'\n') sections[('mphr', 0)]['SPACECRAFT_ID'] = b'SPACECRAFT_ID = M03\n' sections[('mphr', 0)]['INSTRUMENT_ID'] = b'INSTRUMENT_ID = AVHR\n' sections[('sphr', 0)]['EARTH_VIEWS_PER_SCANLINE'] = ((b'EARTH_VIEWS_PER_SCANLINE = ' + bytes(str(self.earth_views), encoding='ascii')) + b'\n') sections[('sphr', 0)]['NAV_SAMPLE_RATE'] = b'NAV_SAMPLE_RATE = 20\n' (_fd, fname) = mkstemp() fd = open(_fd) self.filename = fname for (_, arr) in sections.items(): arr.tofile(fd) fd.close() self.fh = eps.EPSAVHRRFile(self.filename, {'start_time': 'now', 'end_time': 'later'}, {}) def test_read_all(self): self.fh._read_all() assert (self.fh.scanlines == 1080) assert (self.fh.pixels == 2048) def test_dataset(self): did = make_dataid(name='1', calibration='reflectance') res = self.fh.get_dataset(did, {}) assert isinstance(res, xr.DataArray) assert (res.attrs['platform_name'] == 'Metop-C') assert (res.attrs['sensor'] == 'avhrr-3') assert (res.attrs['name'] == '1') assert (res.attrs['calibration'] == 'reflectance') assert (res.attrs['units'] == '%') did = make_dataid(name='4', calibration='brightness_temperature') res = self.fh.get_dataset(did, {}) assert isinstance(res, xr.DataArray) assert (res.attrs['platform_name'] == 'Metop-C') assert (res.attrs['sensor'] == 'avhrr-3') assert (res.attrs['name'] == '4') assert (res.attrs['calibration'] == 'brightness_temperature') assert (res.attrs['units'] == 'K') def test_get_dataset_radiance(self): did = make_dataid(name='1', calibration='radiance') res = self.fh.get_dataset(did, {}) assert isinstance(res, xr.DataArray) assert (res.attrs['platform_name'] == 'Metop-C') assert (res.attrs['sensor'] == 'avhrr-3') assert (res.attrs['name'] == '1') assert (res.attrs['calibration'] == 'radiance') assert (res.attrs['units'] == 'W m^-2 sr^-1') def test_navigation(self): did = make_dataid(name='longitude') res = self.fh.get_dataset(did, {}) assert isinstance(res, xr.DataArray) assert (res.attrs['platform_name'] == 'Metop-C') assert (res.attrs['sensor'] == 'avhrr-3') assert (res.attrs['name'] == 'longitude') def test_angles(self): did = make_dataid(name='solar_zenith_angle') res = self.fh.get_dataset(did, {}) assert isinstance(res, xr.DataArray) assert (res.attrs['platform_name'] == 'Metop-C') assert (res.attrs['sensor'] == 'avhrr-3') assert (res.attrs['name'] == 'solar_zenith_angle') def test_clould_flags(self): did = make_dataid(name='cloud_flags') res = self.fh.get_dataset(did, {}) assert isinstance(res, xr.DataArray) assert (res.attrs['platform_name'] == 'Metop-C') assert (res.attrs['sensor'] == 'avhrr-3') assert (res.attrs['name'] == 'cloud_flags') ('satpy.readers.eps_l1b.EPSAVHRRFile.__getitem__') def test_get_full_angles_twice(self, mock__getitem__): geotiemock = mock.Mock() metop20kmto1km = geotiemock.metop20kmto1km metop20kmto1km.side_effect = (lambda x, y: (x.copy(), y.copy())) def mock_getitem(key): data = {'ANGULAR_RELATIONS_FIRST': np.zeros((7, 4)), 'ANGULAR_RELATIONS': np.zeros((7, 103, 4)), 'ANGULAR_RELATIONS_LAST': np.zeros((7, 4)), 'NAV_SAMPLE_RATE': 20} return data[key] mock__getitem__.side_effect = mock_getitem avhrr_reader = satpy.readers.eps_l1b.EPSAVHRRFile(filename='foo', filename_info={'start_time': 'foo', 'end_time': 'bar'}, filetype_info={'foo': 'bar'}) avhrr_reader.scanlines = 7 avhrr_reader.pixels = 2048 with mock.patch.dict('sys.modules', geotiepoints=geotiemock): (sun_azi, sun_zen, sat_azi, sat_zen) = avhrr_reader.get_full_angles() sun_zen_np1 = np.array(sun_zen) sun_zen_np2 = np.array(sun_zen) assert np.allclose(sun_zen_np1, sun_zen_np2)
def test_solution_integrator(): assert (SolutionIntegrator.OCP.value == 'OCP') assert (SolutionIntegrator.SCIPY_RK23.value == 'RK23') assert (SolutionIntegrator.SCIPY_RK45.value == 'RK45') assert (SolutionIntegrator.SCIPY_DOP853.value == 'DOP853') assert (SolutionIntegrator.SCIPY_BDF.value == 'BDF') assert (SolutionIntegrator.SCIPY_LSODA.value == 'LSODA') assert (len(SolutionIntegrator) == 6)
class ContextFlag(): def __init__(self) -> None: self.__count = 0 def __bool__(self) -> bool: return (self.__count > 0) def __enter__(self) -> None: self.__count += 1 def __exit__(self, *args: Any) -> None: self.__count -= 1 if (self.__count < 0): raise ValueError('count has gone below 0')
class Random(object): MDIG = 32 ONE = 1 m1 = ((ONE << (MDIG - 2)) + ((ONE << (MDIG - 2)) - ONE)) m2 = (ONE << (MDIG // 2)) dm1 = (1.0 / float(m1)) def __init__(self, seed): self.initialize(seed) self.left = 0.0 self.right = 1.0 self.width = 1.0 self.haveRange = False def initialize(self, seed): self.seed = seed seed = abs(seed) jseed = min(seed, self.m1) if ((jseed % 2) == 0): jseed -= 1 k0 = (9069 % self.m2) k1 = (9069 / self.m2) j0 = (jseed % self.m2) j1 = (jseed / self.m2) self.m = (array('d', [0]) * 17) for iloop in range(17): jseed = (j0 * k0) j1 = ((((jseed / self.m2) + (j0 * k1)) + (j1 * k0)) % (self.m2 / 2)) j0 = (jseed % self.m2) self.m[iloop] = (j0 + (self.m2 * j1)) self.i = 4 self.j = 16 def nextDouble(self): (I, J, m) = (self.i, self.j, self.m) k = (m[I] - m[J]) if (k < 0): k += self.m1 self.m[J] = k if (I == 0): I = 16 else: I -= 1 self.i = I if (J == 0): J = 16 else: J -= 1 self.j = J if self.haveRange: return (self.left + ((self.dm1 * float(k)) * self.width)) else: return (self.dm1 * float(k)) def RandomMatrix(self, a): for (x, y) in a.indexes(): a[(x, y)] = self.nextDouble() return a def RandomVector(self, n): return array('d', [self.nextDouble() for i in range(n)])
def test_swap_with_zero_cirq_gate_diagram(): gate = SwapWithZero(3, 2, 4) gh = cq_testing.GateHelper(gate) cirq.testing.assert_has_diagram(cirq.Circuit(gh.operation, cirq.decompose_once(gh.operation)), '\nselection0: (r0)\n \nselection1: (r0)(approx)\n \nselection2: (r0)(approx)(approx)\n \ntargets[0][0]: swap_0(x)(x)\n \ntargets[0][1]: swap_0(x)(x)\n \ntargets[1][0]: swap_1(y)\n \ntargets[1][1]: swap_1(y)\n \ntargets[2][0]: swap_2(x)(y)\n \ntargets[2][1]: swap_2(x)(y)\n \ntargets[3][0]: swap_3(y)\n \ntargets[3][1]: swap_3(y)\n')
def add_attached_meshes(mesh_ids, meshes, poses, link_names): attached_objects = list() for (mesh_id, mesh, pose, link_name) in zip(mesh_ids, meshes, poses, link_names): attached_object_msg = _AttachedCollisionObject() attached_object_msg.link_name = link_name attached_object_msg.touch_links = ['panda_suction_cup', 'panda_table', 'panda_target_box'] triangles = list() for face in _np.array(mesh.faces): triangle = _MeshTriangle() triangle.vertex_indices = face triangles.append(triangle) vertices = list() for vertex in _np.array(mesh.vertices): point = _Point() point.x = vertex[0] point.y = vertex[1] point.z = vertex[2] vertices.append(point) mesh_msg = _Mesh() mesh_msg.vertices = vertices mesh_msg.triangles = triangles attached_object_msg.object.meshes.append(mesh_msg) attached_object_msg.object.mesh_poses.append(pose) attached_object_msg.object.id = mesh_id attached_object_msg.object.operation = attached_object_msg.object.ADD attached_objects.append(attached_object_msg) _rospy.wait_for_service('update_scene') update_scene = _rospy.ServiceProxy('update_scene', _UpdateScene) response = update_scene(attached_objects, list()) return response.success
.parametrize(('current_os', 'required_files'), [('Windows', [('AM2R.exe',), ('data.win',)]), ('Linux', [('AM2R.AppImage',)]), ('Linux', [('runner',), ('assets', 'game.unx')]), ('Darwin', [('AM2R.app', 'Contents', 'MacOS', 'Mac_Runner'), ('AM2R.app', 'Contents', 'Resources', 'game.ios')])]) def test_is_valid_input_dir(current_os: str, required_files: list[tuple[str]], tmp_path: Path, mocker: pytest_mock.MockerFixture) -> None: mocker.patch('platform.system', return_value=current_os) for file in required_files: tmp_path.joinpath(*file).parent.mkdir(parents=True, exist_ok=True) tmp_path.joinpath(*file).touch() assert _is_valid_input_dir(tmp_path)
def read_tles_from_mmam_xml_files(paths): fnames = collect_filenames(paths) tles = [] for fname in fnames: data = read_tle_from_mmam_xml_file(fname).split('\n') for two_lines in _group_iterable_to_chunks(2, data): tl_stream = io.StringIO('\n'.join(two_lines)) tles.append(Tle('', tle_file=tl_stream)) return tles
class StubtestMiscUnit(unittest.TestCase): def test_output(self) -> None: output = run_stubtest(stub='def bad(number: int, text: str) -> None: ...', runtime='def bad(num, text): pass', options=[]) expected = f'''error: {TEST_MODULE_NAME}.bad is inconsistent, stub argument "number" differs from runtime argument "num" Stub: in file {TEST_MODULE_NAME}.pyi:1 def (number: builtins.int, text: builtins.str) Runtime: in file {TEST_MODULE_NAME}.py:1 def (num, text) Found 1 error (checked 1 module) ''' assert (output == expected) output = run_stubtest(stub='def bad(number: int, text: str) -> None: ...', runtime='def bad(num, text): pass', options=['--concise']) expected = '{}.bad is inconsistent, stub argument "number" differs from runtime argument "num"\n'.format(TEST_MODULE_NAME) assert (output == expected) def test_ignore_flags(self) -> None: output = run_stubtest(stub='', runtime="__all__ = ['f']\ndef f(): pass", options=['--ignore-missing-stub']) assert (output == 'Success: no issues found in 1 module\n') output = run_stubtest(stub='', runtime='def f(): pass', options=['--ignore-missing-stub']) assert (output == 'Success: no issues found in 1 module\n') output = run_stubtest(stub='def f(__a): ...', runtime='def f(a): pass', options=['--ignore-positional-only']) assert (output == 'Success: no issues found in 1 module\n') def test_allowlist(self) -> None: allowlist = tempfile.NamedTemporaryFile(mode='w+', delete=False) try: with allowlist: allowlist.write(f'''{TEST_MODULE_NAME}.bad # comment # comment''') output = run_stubtest(stub='def bad(number: int, text: str) -> None: ...', runtime='def bad(asdf, text): pass', options=['--allowlist', allowlist.name]) assert (output == 'Success: no issues found in 1 module\n') output = run_stubtest(stub='', runtime='', options=['--allowlist', allowlist.name]) assert (output == f'''note: unused allowlist entry {TEST_MODULE_NAME}.bad Found 1 error (checked 1 module) ''') output = run_stubtest(stub='', runtime='', options=['--allowlist', allowlist.name, '--ignore-unused-allowlist']) assert (output == 'Success: no issues found in 1 module\n') with open(allowlist.name, mode='w+') as f: f.write(f'''{TEST_MODULE_NAME}.b.* ''') f.write('(unused_missing)?\n') f.write('unused.*\n') output = run_stubtest(stub=textwrap.dedent('\n def good() -> None: ...\n def bad(number: int) -> None: ...\n def also_bad(number: int) -> None: ...\n '.lstrip('\n')), runtime=textwrap.dedent('\n def good(): pass\n def bad(asdf): pass\n def also_bad(asdf): pass\n '.lstrip('\n')), options=['--allowlist', allowlist.name, '--generate-allowlist']) assert (output == f'''note: unused allowlist entry unused.* {TEST_MODULE_NAME}.also_bad ''') finally: os.unlink(allowlist.name) def test_mypy_build(self) -> None: output = run_stubtest(stub='+', runtime='', options=[]) assert (output == 'error: not checking stubs due to failed mypy compile:\n{}.pyi:1: error: invalid syntax [syntax]\n'.format(TEST_MODULE_NAME)) output = run_stubtest(stub='def f(): ...\ndef f(): ...', runtime='', options=[]) assert (output == 'error: not checking stubs due to mypy build errors:\n{}.pyi:2: error: Name "f" already defined on line 1 [no-redef]\n'.format(TEST_MODULE_NAME)) def test_missing_stubs(self) -> None: output = io.StringIO() with contextlib.redirect_stdout(output): test_stubs(parse_options(['not_a_module'])) assert (remove_color_code(output.getvalue()) == 'error: not_a_module failed to find stubs\nStub:\nMISSING\nRuntime:\nN/A\n\nFound 1 error (checked 1 module)\n') def test_only_py(self) -> None: with use_tmp_dir(TEST_MODULE_NAME): with open(f'{TEST_MODULE_NAME}.py', 'w') as f: f.write('a = 1') output = io.StringIO() with contextlib.redirect_stdout(output): test_stubs(parse_options([TEST_MODULE_NAME])) output_str = remove_color_code(output.getvalue()) assert (output_str == 'Success: no issues found in 1 module\n') def test_get_typeshed_stdlib_modules(self) -> None: stdlib = mypy.stubtest.get_typeshed_stdlib_modules(None, (3, 7)) assert ('builtins' in stdlib) assert ('os' in stdlib) assert ('os.path' in stdlib) assert ('asyncio' in stdlib) assert ('graphlib' not in stdlib) assert ('formatter' in stdlib) assert ('contextvars' in stdlib) assert ('importlib.metadata' not in stdlib) stdlib = mypy.stubtest.get_typeshed_stdlib_modules(None, (3, 10)) assert ('graphlib' in stdlib) assert ('formatter' not in stdlib) assert ('importlib.metadata' in stdlib) def test_signature(self) -> None: def f(a: int, b: int, *, c: int, d: int=0, **kwargs: Any) -> None: pass assert (str(mypy.stubtest.Signature.from_inspect_signature(inspect.signature(f))) == 'def (a, b, *, c, d = ..., **kwargs)') def test_builtin_signature_with_unrepresentable_default(self) -> None: sig = mypy.stubtest.safe_inspect_signature(bytes.hex) assert (sig is not None) assert (str(mypy.stubtest.Signature.from_inspect_signature(sig)) == 'def (self, sep = ..., bytes_per_sep = ...)') def test_config_file(self) -> None: runtime = 'temp = 5\n' stub = 'from decimal import Decimal\ntemp: Decimal\n' config_file = f'''[mypy] plugins={root_dir}/test-data/unit/plugins/decimal_to_int.py ''' output = run_stubtest(stub=stub, runtime=runtime, options=[]) assert (output == f'''error: {TEST_MODULE_NAME}.temp variable differs from runtime type Literal[5] Stub: in file {TEST_MODULE_NAME}.pyi:2 _decimal.Decimal Runtime: 5 Found 1 error (checked 1 module) ''') output = run_stubtest(stub=stub, runtime=runtime, options=[], config_file=config_file) assert (output == 'Success: no issues found in 1 module\n') def test_no_modules(self) -> None: output = io.StringIO() with contextlib.redirect_stdout(output): test_stubs(parse_options([])) assert (remove_color_code(output.getvalue()) == 'error: no modules to check\n') def test_module_and_typeshed(self) -> None: output = io.StringIO() with contextlib.redirect_stdout(output): test_stubs(parse_options(['--check-typeshed', 'some_module'])) assert (remove_color_code(output.getvalue()) == 'error: cannot pass both --check-typeshed and a list of modules\n')
class LinearBottleneck(nn.Module): def __init__(self, inplanes, outplanes, stride=1, t=6, activation=nn.ReLU6): super(LinearBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, (inplanes * t), kernel_size=1, bias=False) self.bn1 = nn.BatchNorm2d((inplanes * t), momentum=0.0003) self.conv2 = nn.Conv2d((inplanes * t), (inplanes * t), kernel_size=3, stride=stride, padding=1, bias=False, groups=(inplanes * t)) self.bn2 = nn.BatchNorm2d((inplanes * t), momentum=0.0003) self.conv3 = nn.Conv2d((inplanes * t), outplanes, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(outplanes, momentum=0.0003) self.activation = activation(inplace=True) self.stride = stride self.t = t self.inplanes = inplanes self.outplanes = outplanes def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.activation(out) out = self.conv2(out) out = self.bn2(out) out = self.activation(out) out = self.conv3(out) out = self.bn3(out) if ((self.stride == 1) and (self.inplanes == self.outplanes)): out += residual return out
class Arguments(): def __init__(self, description): self.parser = ArgumentParser(description=description) self.checks = [] self.add_argument('--root', dest='root', default='experiments') self.add_argument('--experiment', dest='experiment', default='dirty') self.add_argument('--run', dest='run', default=Run.name) self.add_argument('--local_rank', dest='rank', default=(- 1), type=int) def add_model_parameters(self): self.add_argument('--similarity', dest='similarity', default='cosine', choices=['cosine', 'l2']) self.add_argument('--dim', dest='dim', default=128, type=int) self.add_argument('--query_maxlen', dest='query_maxlen', default=32, type=int) self.add_argument('--doc_maxlen', dest='doc_maxlen', default=180, type=int) self.add_argument('--mask-punctuation', dest='mask_punctuation', default=False, action='store_true') def add_model_training_parameters(self): self.add_argument('--resume', dest='resume', default=False, action='store_true') self.add_argument('--resume_optimizer', dest='resume_optimizer', default=False, action='store_true') self.add_argument('--checkpoint', dest='checkpoint', default=None, required=False) self.add_argument('--lr', dest='lr', default=3e-06, type=float) self.add_argument('--maxsteps', dest='maxsteps', default=400000, type=int) self.add_argument('--bsize', dest='bsize', default=32, type=int) self.add_argument('--accum', dest='accumsteps', default=2, type=int) self.add_argument('--amp', dest='amp', default=False, action='store_true') def add_model_inference_parameters(self): self.add_argument('--checkpoint', dest='checkpoint', required=True) self.add_argument('--bsize', dest='bsize', default=128, type=int) self.add_argument('--amp', dest='amp', default=False, action='store_true') def add_training_input(self): self.add_argument('--triples', dest='triples', required=True) self.add_argument('--queries', dest='queries', default=None) self.add_argument('--collection', dest='collection', default=None) def check_training_input(args): assert ((args.collection is None) == (args.queries is None)), 'For training, both (or neither) --collection and --queries must be supplied.If neither is supplied, the --triples file must contain texts (not PIDs).' self.checks.append(check_training_input) def add_ranking_input(self): self.add_argument('--queries', dest='queries', default=None) self.add_argument('--collection', dest='collection', default=None) self.add_argument('--qrels', dest='qrels', default=None) def add_reranking_input(self): self.add_ranking_input() self.add_argument('--topk', dest='topK', required=True) self.add_argument('--shortcircuit', dest='shortcircuit', default=False, action='store_true') def add_indexing_input(self): self.add_argument('--collection', dest='collection', required=True) self.add_argument('--index_root', dest='index_root', required=True) self.add_argument('--index_name', dest='index_name', required=True) def add_compressed_index_input(self): self.add_argument('--compression_level', dest='compression_level', choices=[1, 2], type=int, default=None) def add_index_use_input(self): self.add_argument('--index_root', dest='index_root', required=True) self.add_argument('--index_name', dest='index_name', required=True) self.add_argument('--partitions', dest='partitions', default=None, type=int, required=False) def add_retrieval_input(self): self.add_index_use_input() self.add_argument('--nprobe', dest='nprobe', default=10, type=int) self.add_argument('--retrieve_only', dest='retrieve_only', default=False, action='store_true') def add_argument(self, *args, **kw_args): return self.parser.add_argument(*args, **kw_args) def check_arguments(self, args): for check in self.checks: check(args) def parse(self): args = self.parser.parse_args() self.check_arguments(args) args.input_arguments = copy.deepcopy(args) (args.nranks, args.distributed) = distributed.init(args.rank) args.nthreads = int((max(os.cpu_count(), faiss.omp_get_max_threads()) * 0.8)) args.nthreads = max(1, (args.nthreads // args.nranks)) if (args.nranks > 1): print_message(f'#> Restricting number of threads for FAISS to {args.nthreads} per process', condition=(args.rank == 0)) faiss.omp_set_num_threads(args.nthreads) Run.init(args.rank, args.root, args.experiment, args.run) Run._log_args(args) Run.info(args.input_arguments.__dict__, '\n') return args
class Basic3DBlock(nn.Module): def __init__(self, in_planes, out_planes, kernel_size): super(Basic3DBlock, self).__init__() self.block = nn.Sequential(nn.Conv3d(in_planes, out_planes, kernel_size=kernel_size, stride=1, padding=((kernel_size - 1) // 2)), nn.BatchNorm3d(out_planes), nn.ReLU(True)) def forward(self, x): return self.block(x)
class TransformComponent(ABC): def __init__(self) -> None: self._parent = None def parent(self) -> Any: return self._parent def parent(self, parent: None) -> None: self._parent = parent def output_columns(self) -> List[str]: def transform(self, dataframe: DataFrame) -> DataFrame:
def test_unsuccessful_load_from_s3_client_error(s3_stub): s3_stub.add_client_error('get_object') with pytest.raises(LoaderException): _load_from_s3(json.dumps({'region_name': 'us-east-1', 'bucket_name': 'my-test-bucket', 'file_key': 'my-object-key', 'sse_key': 'my-sse-key'}).encode('utf-8'))
class Effect6699(BaseEffect): type = 'passive' def handler(fit, src, context, projectionRange, **kwargs): lvl = src.level fit.modules.filteredItemBoost((lambda mod: (mod.item.group.name == 'Rig Drones')), 'drawback', (src.getModifiedItemAttr('rigDrawbackBonus') * lvl), **kwargs)
def mode_dataframe(spark_context, spark_session): data = [{'id': 1, 'timestamp': '2016-04-11 11:31:11', 'feature1': 200}, {'id': 1, 'timestamp': '2016-04-11 11:44:12', 'feature1': 200}, {'id': 1, 'timestamp': '2016-04-11 11:46:24', 'feature1': 200}, {'id': 1, 'timestamp': '2016-04-11 12:03:21', 'feature1': 300}, {'id': 1, 'timestamp': '2016-04-12 11:31:11', 'feature1': 300}, {'id': 1, 'timestamp': '2016-04-12 11:44:12', 'feature1': 300}, {'id': 1, 'timestamp': '2016-04-12 11:46:24', 'feature1': 300}, {'id': 1, 'timestamp': '2016-04-12 12:03:21', 'feature1': 300}] df = spark_session.read.json(spark_context.parallelize(data, 1)) df = df.withColumn(TIMESTAMP_COLUMN, df.timestamp.cast(DataType.TIMESTAMP.spark)) return df
class BuildScripts(du_build_scripts): description = 'copy scripts to build directory' def run(self): du_build_scripts.run(self) for script in self.scripts: outfile = os.path.join(self.build_dir, os.path.basename(script)) new = os.path.splitext(outfile)[0] try: os.unlink(new) except OSError: pass self.move_file(outfile, new)
class CSVLoggerTest(unittest.TestCase): def test_csv_log(self) -> None: with TemporaryDirectory() as tmpdir: csv_path = Path(tmpdir, 'test.csv').as_posix() logger = CSVLogger(csv_path, steps_before_flushing=1) log_name = 'asdf' log_value = 123.0 log_step = 10 logger.log(log_name, log_value, log_step) logger.close() with open(csv_path) as f: output = list(csv.DictReader(f)) self.assertEqual(float(output[0][log_name]), log_value) self.assertEqual(int(output[0]['step']), log_step) def test_csv_log_async(self) -> None: with TemporaryDirectory() as tmpdir: csv_path = Path(tmpdir, 'test.csv').as_posix() logger = CSVLogger(csv_path, steps_before_flushing=1, async_write=True) log_name = 'asdf' log_value = 123.0 log_step = 10 logger.log(log_name, log_value, log_step) logger.close() with open(csv_path) as f: output = list(csv.DictReader(f)) self.assertEqual(float(output[0][log_name]), log_value) self.assertEqual(int(output[0]['step']), log_step)
class TestTransformerLowpass(unittest.TestCase): def test_default(self): tfm = new_transformer() tfm.lowpass(1000.0) actual_args = tfm.effects expected_args = ['lowpass', '-2', '1000.000000', '0.707000q'] self.assertEqual(expected_args, actual_args) actual_log = tfm.effects_log expected_log = ['lowpass'] self.assertEqual(expected_log, actual_log) actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) expected_res = True self.assertEqual(expected_res, actual_res) tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm) def test_one_pole(self): tfm = new_transformer() tfm.lowpass(1000.0, n_poles=1) actual_args = tfm.effects expected_args = ['lowpass', '-1', '1000.000000'] self.assertEqual(expected_args, actual_args) actual_log = tfm.effects_log expected_log = ['lowpass'] self.assertEqual(expected_log, actual_log) actual_res = tfm.build(INPUT_FILE, OUTPUT_FILE) expected_res = True self.assertEqual(expected_res, actual_res) tfm_assert_array_to_file_output(INPUT_FILE, OUTPUT_FILE, tfm) def test_frequency_invalid(self): tfm = new_transformer() with self.assertRaises(ValueError): tfm.lowpass((- 20)) def test_width_q_invalid(self): tfm = new_transformer() with self.assertRaises(ValueError): tfm.lowpass(1000.0, width_q=0.0) def test_n_poles_invalid(self): tfm = new_transformer() with self.assertRaises(ValueError): tfm.lowpass(1000.0, n_poles=3)
def read_squad_examples(input_file, is_training, version_2_with_negative): with open(input_file, 'r', encoding='utf-8') as reader: input_data = json.load(reader)['data'] def is_whitespace(c): if ((c == ' ') or (c == '\t') or (c == '\r') or (c == '\n') or (ord(c) == 8239)): return True return False examples = [] for entry in input_data: for paragraph in entry['paragraphs']: paragraph_text = paragraph['context'] doc_tokens = [] char_to_word_offset = [] prev_is_whitespace = True for c in paragraph_text: if is_whitespace(c): prev_is_whitespace = True else: if prev_is_whitespace: doc_tokens.append(c) else: doc_tokens[(- 1)] += c prev_is_whitespace = False char_to_word_offset.append((len(doc_tokens) - 1)) for qa in paragraph['qas']: qas_id = qa['id'] question_text = qa['question'] start_position = None end_position = None orig_answer_text = None is_impossible = False if is_training: if version_2_with_negative: is_impossible = qa['is_impossible'] if ((len(qa['answers']) != 1) and (not is_impossible)): raise ValueError('For training, each question should have exactly 1 answer.') if (not is_impossible): answer = qa['answers'][0] orig_answer_text = answer['text'] answer_offset = answer['answer_start'] answer_length = len(orig_answer_text) start_position = char_to_word_offset[answer_offset] end_position = char_to_word_offset[((answer_offset + answer_length) - 1)] actual_text = ' '.join(doc_tokens[start_position:(end_position + 1)]) cleaned_answer_text = ' '.join(whitespace_tokenize(orig_answer_text)) if (actual_text.find(cleaned_answer_text) == (- 1)): logger.warning("Could not find answer: '%s' vs. '%s'", actual_text, cleaned_answer_text) continue else: start_position = (- 1) end_position = (- 1) orig_answer_text = '' example = SquadExample(qas_id=qas_id, question_text=question_text, doc_tokens=doc_tokens, orig_answer_text=orig_answer_text, start_position=start_position, end_position=end_position, is_impossible=is_impossible) examples.append(example) return examples
class SelectExtractor(BaseExtractor, SourceHandlerMixin): SUPPORTED_STMT_TYPES = ['select_statement', 'set_expression', 'bracketed'] def __init__(self, dialect: str, metadata_provider: MetaDataProvider): super().__init__(dialect, metadata_provider) self.columns = [] self.tables = [] self.union_barriers = [] def extract(self, statement: BaseSegment, context: AnalyzerContext) -> SubQueryLineageHolder: holder = self._init_holder(context) subqueries = [] segments = ([statement] if (statement.type == 'set_expression') else list_child_segments(statement)) for segment in segments: for sq in self.list_subquery(segment): subqueries.append(sq) self._handle_swap_partition(segment, holder) self._handle_select_into(segment, holder) self._handle_table(segment, holder) self._handle_column(segment) if is_set_expression(segment): for (idx, sub_segment) in enumerate(segment.get_children('select_statement', 'bracketed')): if (idx != 0): self.union_barriers.append((len(self.columns), len(self.tables))) for seg in list_child_segments(sub_segment): for sq in self.list_subquery(seg): subqueries.append(sq) self._handle_table(seg, holder) self._handle_column(seg) self.end_of_query_cleanup(holder) self.extract_subquery(subqueries, holder) holder.expand_wildcard(self.metadata_provider) return holder def _handle_swap_partition(segment: BaseSegment, holder: SubQueryLineageHolder): if (segment.type == 'select_clause'): if (select_clause_element := segment.get_child('select_clause_element')): if (function := select_clause_element.get_child('function')): if (function.first_non_whitespace_segment_raw_upper == 'SWAP_PARTITIONS_BETWEEN_TABLES'): if (bracketed := function.get_child('bracketed')): expressions = bracketed.get_children('expression') holder.add_read(SqlFluffTable(escape_identifier_name(expressions[0].raw))) holder.add_write(SqlFluffTable(escape_identifier_name(expressions[3].raw))) def _handle_select_into(self, segment: BaseSegment, holder: SubQueryLineageHolder): if (segment.type in ['into_table_clause', 'into_clause']): if (identifier := find_table_identifier(segment)): if (table := self.find_table(identifier)): holder.add_write(table) def _handle_table(self, segment: BaseSegment, holder: SubQueryLineageHolder) -> None: if (segment.type in ['from_clause', 'join_clause']): from_expressions = segment.get_children('from_expression') if (len(from_expressions) > 1): for from_expression in from_expressions: if (from_expression_element := find_from_expression_element(from_expression)): self._add_dataset_from_expression_element(from_expression_element, holder) else: if (from_expression_element := find_from_expression_element(segment)): self._add_dataset_from_expression_element(from_expression_element, holder) for join_clause in list_join_clause(segment): self._handle_table(join_clause, holder) def _handle_column(self, segment: BaseSegment) -> None: if (segment.type == 'select_clause'): for sub_segment in segment.get_children('select_clause_element'): self.columns.append(SqlFluffColumn.of(sub_segment)) def _add_dataset_from_expression_element(self, segment: BaseSegment, holder: SubQueryLineageHolder) -> None: all_segments = [seg for seg in list_child_segments(segment) if (seg.type != 'keyword')] if (table_expression := segment.get_child('table_expression')): if table_expression.get_child('function'): return first_segment = all_segments[0] if (first_segment.type == 'bracketed'): if (table_expression := first_segment.get_child('table_expression')): if table_expression.get_child('values_clause'): return subqueries = list_subqueries(segment) if subqueries: for sq in subqueries: (bracketed, alias) = sq read_sq = SqlFluffSubQuery.of(bracketed, alias) self.tables.append(read_sq) else: table_identifier = find_table_identifier(segment) if table_identifier: subquery_flag = False alias = None if ((len(all_segments) > 1) and (all_segments[1].type == 'alias_expression')): all_segments = list_child_segments(all_segments[1]) alias = str((all_segments[1].raw if (len(all_segments) > 1) else all_segments[0].raw)) if ('.' not in table_identifier.raw): cte_dict = {s.alias: s for s in holder.cte} cte = cte_dict.get(table_identifier.raw) if (cte is not None): self.tables.append(SqlFluffSubQuery.of(cte.query, (alias or table_identifier.raw))) subquery_flag = True if (subquery_flag is False): if (table_identifier.type == 'file_reference'): self.tables.append(Path(escape_identifier_name(table_identifier.segments[(- 1)].raw))) else: self.tables.append(SqlFluffTable.of(table_identifier, alias=alias))
(frozen=True) class TranslatorConfiguration(BitPackValue): translator_requirement: dict[(NodeIdentifier, LayoutTranslatorRequirement)] fixed_gfmc_compound: bool = True fixed_torvus_temple: bool = True fixed_great_temple: bool = True def bit_pack_encode(self, metadata) -> Iterator[tuple[(int, int)]]: templates = [_get_vanilla_actual_translator_configurations(), _get_vanilla_colors_translator_configurations(), self.with_full_random().translator_requirement, self.translator_requirement] (yield from bitpacking.pack_array_element(self.translator_requirement, templates)) if (templates.index(self.translator_requirement) == 3): for translator in self.translator_requirement.values(): (yield from translator.bit_pack_encode({})) def bit_pack_unpack(cls, decoder: BitPackDecoder, metadata) -> Self: templates = [_get_vanilla_actual_translator_configurations(), _get_vanilla_colors_translator_configurations(), cls.default().with_full_random().translator_requirement, None] translator_requirement = decoder.decode_element(templates) if (translator_requirement is None): translator_requirement = {} for gate in templates[0].keys(): translator_requirement[gate] = LayoutTranslatorRequirement.bit_pack_unpack(decoder, {}) return cls(translator_requirement) def as_json(self) -> dict: return {'translator_requirement': {key.as_string: item.value for (key, item) in self.translator_requirement.items()}} def from_json(cls, value: dict) -> Self: default = cls.default() params = copy.copy(value) translator_requirement = copy.copy(default.translator_requirement) for (key, item) in params.pop('translator_requirement').items(): translator_requirement[NodeIdentifier.from_string(key)] = LayoutTranslatorRequirement(item) return cls(translator_requirement, **params) def default(cls) -> Self: return cls(_get_vanilla_actual_translator_configurations()) def with_vanilla_actual(self) -> Self: return dataclasses.replace(self, translator_requirement=_get_vanilla_actual_translator_configurations()) def with_vanilla_colors(self) -> Self: return dataclasses.replace(self, translator_requirement=_get_vanilla_colors_translator_configurations()) def with_full_random(self) -> Self: return dataclasses.replace(self, translator_requirement={key: LayoutTranslatorRequirement.RANDOM for key in self.translator_requirement.keys()}) def with_full_random_with_unlocked(self) -> Self: return dataclasses.replace(self, translator_requirement={key: LayoutTranslatorRequirement.RANDOM_WITH_REMOVED for key in self.translator_requirement.keys()}) def replace_requirement_for_gate(self, gate: NodeIdentifier, requirement: LayoutTranslatorRequirement) -> Self: assert (gate in self.translator_requirement) result = copy.copy(self) new_requirement = copy.copy(self.translator_requirement) new_requirement[gate] = requirement return dataclasses.replace(result, translator_requirement=new_requirement) def description(self) -> str: translator_configurations = [(self.with_vanilla_actual(), 'Vanilla (Actual)'), (self.with_vanilla_colors(), 'Vanilla (Colors)'), (self.with_full_random(), 'Random'), (self.with_full_random_with_unlocked(), 'Random with Unlocked')] for (translator_config, name) in translator_configurations: if (translator_config == self): return name return 'Custom'
class ModelBuilderTest(tf.test.TestCase): def test_compute_vertex_channels_linear(self): matrix1 = np.array([[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], [0, 0, 0, 0]]) vc1 = model_builder.compute_vertex_channels(8, 8, matrix1) assert (vc1 == [8, 8, 8, 8]) vc2 = model_builder.compute_vertex_channels(8, 16, matrix1) assert (vc2 == [8, 16, 16, 16]) vc3 = model_builder.compute_vertex_channels(16, 8, matrix1) assert (vc3 == [16, 8, 8, 8]) matrix2 = np.array([[0, 1], [0, 0]]) vc4 = model_builder.compute_vertex_channels(1, 1, matrix2) assert (vc4 == [1, 1]) vc5 = model_builder.compute_vertex_channels(1, 5, matrix2) assert (vc5 == [1, 5]) vc5 = model_builder.compute_vertex_channels(5, 1, matrix2) assert (vc5 == [5, 1]) def test_compute_vertex_channels_no_output_branch(self): matrix1 = np.array([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]]) vc1 = model_builder.compute_vertex_channels(8, 8, matrix1) assert (vc1 == [8, 8, 8, 8, 8]) vc2 = model_builder.compute_vertex_channels(8, 16, matrix1) assert (vc2 == [8, 16, 16, 16, 16]) vc3 = model_builder.compute_vertex_channels(16, 8, matrix1) assert (vc3 == [16, 8, 8, 8, 8]) def test_compute_vertex_channels_output_branching(self): matrix1 = np.array([[0, 1, 1, 0], [0, 0, 0, 1], [0, 0, 0, 1], [0, 0, 0, 0]]) vc1 = model_builder.compute_vertex_channels(8, 8, matrix1) assert (vc1 == [8, 4, 4, 8]) vc2 = model_builder.compute_vertex_channels(8, 16, matrix1) assert (vc2 == [8, 8, 8, 16]) vc3 = model_builder.compute_vertex_channels(16, 8, matrix1) assert (vc3 == [16, 4, 4, 8]) vc4 = model_builder.compute_vertex_channels(8, 15, matrix1) assert (vc4 == [8, 8, 7, 15]) matrix2 = np.array([[0, 1, 1, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]]) vc5 = model_builder.compute_vertex_channels(8, 8, matrix2) assert (vc5 == [8, 3, 3, 2, 8]) vc6 = model_builder.compute_vertex_channels(8, 15, matrix2) assert (vc6 == [8, 5, 5, 5, 15]) def test_compute_vertex_channels_max(self): matrix1 = np.array([[0, 1, 0, 0, 0], [0, 0, 1, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]]) vc1 = model_builder.compute_vertex_channels(8, 8, matrix1) assert (vc1 == [8, 4, 4, 4, 8]) vc2 = model_builder.compute_vertex_channels(8, 9, matrix1) assert (vc2 == [8, 5, 5, 4, 9]) matrix2 = np.array([[0, 1, 0, 1, 0], [0, 0, 1, 0, 1], [0, 0, 0, 1, 0], [0, 0, 0, 0, 1], [0, 0, 0, 0, 0]]) vc3 = model_builder.compute_vertex_channels(8, 8, matrix2) assert (vc3 == [8, 4, 4, 4, 8]) vc4 = model_builder.compute_vertex_channels(8, 15, matrix2) assert (vc4 == [8, 8, 7, 7, 15]) def test_covariance_matrix_against_numpy(self): for _ in range(100): batch = np.random.randint(50, 150) features = np.random.randint(500, 1500) matrix = np.random.random((batch, features)) tf_matrix = tf.constant(matrix, dtype=tf.float32) tf_cov_tensor = model_builder._covariance_matrix(tf_matrix) with tf.Session() as sess: tf_cov = sess.run(tf_cov_tensor) np_cov = np.cov(matrix) np.testing.assert_array_almost_equal(tf_cov, np_cov)
def _load_checkpoint(session, checkpoint_path, allow_drop_layers, allow_lr_init=True): ckpt = tfv1.train.load_checkpoint(checkpoint_path) vars_in_ckpt = frozenset(ckpt.get_variable_to_shape_map().keys()) load_vars = set(tfv1.global_variables()) init_vars = set() lr_var = set((v for v in load_vars if (v.op.name == 'learning_rate'))) if (lr_var and (('learning_rate' not in vars_in_ckpt) or (FLAGS.force_initialize_learning_rate and allow_lr_init))): assert (len(lr_var) <= 1) load_vars -= lr_var init_vars |= lr_var if FLAGS.load_cudnn: missing_vars = set() for v in load_vars: if (v.op.name not in vars_in_ckpt): log_warn(('CUDNN variable not found: %s' % v.op.name)) missing_vars.add(v) init_vars.add(v) load_vars -= init_vars missing_var_names = [v.op.name for v in missing_vars] if any((('Adam' not in v) for v in missing_var_names)): log_error('Tried to load a CuDNN RNN checkpoint but there were more missing variables than just the Adam moment tensors. Missing variables: {}'.format(missing_var_names)) sys.exit(1) if (allow_drop_layers and (FLAGS.drop_source_layers > 0)): if (FLAGS.drop_source_layers >= 6): log_warn('The checkpoint only has 6 layers, but you are trying to drop all of them or more than all of them. Continuing and dropping only 5 layers.') FLAGS.drop_source_layers = 5 dropped_layers = ['2', '3', 'lstm', '5', '6'][((- 1) * int(FLAGS.drop_source_layers)):] for v in load_vars: if any(((layer in v.op.name) for layer in dropped_layers)): init_vars.add(v) load_vars -= init_vars for v in sorted(load_vars, key=(lambda v: v.op.name)): log_info(('Loading variable from checkpoint: %s' % v.op.name)) v.load(ckpt.get_tensor(v.op.name), session=session) for v in sorted(init_vars, key=(lambda v: v.op.name)): log_info(('Initializing variable: %s' % v.op.name)) session.run(v.initializer)
def add_sample_args(parser): common_arg = parser.add_argument_group('Common') add_common_arg(common_arg) common_arg.add_argument('--model_load', type=str, required=True, help='Where to load the model') common_arg.add_argument('--config_load', type=str, required=True, help='Where to load the config') common_arg.add_argument('--vocab_load', type=str, required=True, help='Where to load the vocab') common_arg.add_argument('--n_samples', type=int, required=True, help='Number of samples to sample') common_arg.add_argument('--gen_save', type=str, required=True, help='Where to save the gen molecules') common_arg.add_argument('--n_batch', type=int, default=32, help='Size of batch') common_arg.add_argument('--max_len', type=int, default=100, help='Max of length of SMILES') return parser
.end_to_end() def test_collect_task(runner, tmp_path): source = '\n import pytask\n\n .depends_on("in.txt")\n .produces("out.txt")\n def task_example():\n pass\n ' tmp_path.joinpath('task_module.py').write_text(textwrap.dedent(source)) tmp_path.joinpath('in.txt').touch() result = runner.invoke(cli, ['collect', tmp_path.as_posix()]) assert (result.exit_code == ExitCode.OK) captured = result.output.replace('\n', '').replace(' ', '') assert ('<Module' in captured) assert ('task_module.py>' in captured) assert ('<Function' in captured) assert ('task_example>' in captured) result = runner.invoke(cli, ['collect', tmp_path.as_posix(), '--nodes']) assert (result.exit_code == ExitCode.OK) captured = result.output.replace('\n', '').replace(' ', '') assert ('<Module' in captured) assert ('task_module.py>' in captured) assert ('<Function' in captured) assert ('task_example>' in captured) assert ('<Dependency' in captured) assert ('in.txt>' in captured) assert ('<Product' in captured) assert ('out.txt>' in captured)
class SRDRM_gen(BaseSRModel): def __init__(self, lr_shape, hr_shape, SCALE=4): super(SRDRM_gen, self).__init__('SRDRM', lr_shape, hr_shape, SCALE) self.n_residual_blocks = 8 self.gf = 64 def residual_block(self, layer_input, filters): d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(layer_input) d = BatchNormalization(momentum=0.5)(d) d = Activation('relu')(d) d = Conv2D(filters, kernel_size=3, strides=1, padding='same')(d) d = BatchNormalization(momentum=0.5)(d) d = Add()([d, layer_input]) return d def deconv2d(self, layer_input): u = UpSampling2D(size=2)(layer_input) u = Conv2D(256, kernel_size=3, strides=1, padding='same')(u) u = Activation('relu')(u) return u def res_mult_2x(self, layer_input): l1 = Conv2D(64, kernel_size=4, strides=1, padding='same')(layer_input) l1 = Activation('relu')(l1) r = self.residual_block(l1, self.gf) for _ in range((self.n_residual_blocks - 1)): r = self.residual_block(r, self.gf) l2 = Conv2D(64, kernel_size=4, strides=1, padding='same')(r) l2 = BatchNormalization(momentum=0.8)(l2) l2 = Add()([l2, l1]) layer_2x = self.deconv2d(l2) return layer_2x def create_model(self): init = Input(shape=self.lr_shape) o1 = self.res_mult_2x(init) o2 = (o1 if (self.SCALE < 4) else self.res_mult_2x(o1)) o3 = (o2 if (self.SCALE < 8) else self.res_mult_2x(o2)) out = Conv2D(3, kernel_size=5, strides=1, padding='same', activation='tanh')(o3) return Model(init, out)
class Mode(): def __init__(self, linker: Optional[Union[(str, Linker)]]=None, optimizer: Union[(str, RewriteDatabaseQuery)]='default', db: RewriteDatabase=None): if (linker is None): linker = config.linker if (isinstance(optimizer, str) and (optimizer == 'default')): optimizer = config.optimizer self.__setstate__((linker, optimizer)) if (db is None): global optdb self.optdb = optdb else: self.optdb = db def __getstate__(self): return (self.provided_linker, self.provided_optimizer) def __setstate__(self, state): global optdb (linker, optimizer) = state self.optdb = optdb self.provided_linker = linker self.provided_optimizer = optimizer if (isinstance(linker, str) or (linker is None)): linker = predefined_linkers[linker] self.linker = linker if (isinstance(optimizer, str) or (optimizer is None)): optimizer = predefined_optimizers[optimizer] if isinstance(optimizer, RewriteDatabaseQuery): self.provided_optimizer = optimizer self._optimizer = optimizer self.call_time = 0 self.fn_time = 0 def __str__(self): return f'{self.__class__.__name__}(linker={self.provided_linker}, optimizer={self.provided_optimizer}, optdb={self.optdb})' def __get_optimizer(self): if isinstance(self._optimizer, RewriteDatabaseQuery): return self.optdb.query(self._optimizer) else: return self._optimizer optimizer = property(__get_optimizer) def get_linker_optimizer(self, linker, optimizer): if (isinstance(linker, str) or (linker is None)): linker = predefined_linkers[linker] if (isinstance(optimizer, str) or (optimizer is None)): optimizer = predefined_optimizers[optimizer] return (linker, optimizer) def including(self, *tags): (link, opt) = self.get_linker_optimizer(self.provided_linker, self.provided_optimizer) return self.clone(optimizer=opt.including(*tags), linker=link) def register(self, *optimizations): (link, opt) = self.get_linker_optimizer(self.provided_linker, self.provided_optimizer) return self.clone(optimizer=opt.register(*optimizations)) def excluding(self, *tags): (link, opt) = self.get_linker_optimizer(self.provided_linker, self.provided_optimizer) return self.clone(optimizer=opt.excluding(*tags), linker=link) def requiring(self, *tags): (link, opt) = self.get_linker_optimizer(self.provided_linker, self.provided_optimizer) return self.clone(optimizer=opt.requiring(*tags), linker=link) def clone(self, link_kwargs=None, optimizer='', **kwargs): if (link_kwargs is None): link_kwargs = {} new_linker = self.linker.clone(**link_kwargs) if (optimizer == ''): optimizer = self.provided_optimizer new_mode = type(self)(linker=new_linker, optimizer=optimizer) return new_mode
class TestImageNavigation(): () def expected(self): exp = {'lon': [[(- 114.56923), (- 112.096837), (- 109.559702)], [8.33221, 8.793893, 9.22339], [15.918476, 16.268354, 16.6332]], 'lat': [[(- 23.078721), (- 24.629845), (- 26.133314)], [(- 42.513409), (- 39.790231), (- 37.06392)], [3.342834, 6.07043, 8.795932]]} return exp def test_get_lons_lats(self, navigation_params, expected): (lons, lats) = nav.get_lons_lats(lines=np.array([1000, 1500, 2000]), pixels=np.array([1000, 1500, 2000]), nav_params=navigation_params) np.testing.assert_allclose(lons, expected['lon']) np.testing.assert_allclose(lats, expected['lat'])
class TestGroupSearcher(): __test__ = False def __init__(self): self.query_text = np.random.random(text_vector_size).tolist() self.query_image = np.random.random(image_vector_size).tolist() self.query_code = np.random.random(code_vector_size).tolist() self.group_by = 'rand_digit' self.group_size = 1 self.limit = 10 def group_search(self, client: QdrantBase, query_vector: Union[(types.NumpyArray, Sequence[float], Tuple[(str, List[float])], types.NamedVector)]) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=query_vector, with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), group_by=self.group_by, limit=self.limit, group_size=self.group_size) def group_search_text(self, client: QdrantBase) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), group_by=self.group_by, limit=self.limit, group_size=self.group_size) def group_search_text_single(self, client: QdrantBase) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=self.query_text, with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), group_by=self.group_by, limit=self.limit, group_size=self.group_size) def group_search_image(self, client: QdrantBase) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=('image', self.query_image), with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), group_by=self.group_by, limit=self.limit, group_size=self.group_size) def group_search_image_with_lookup(self, client: QdrantBase) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=('image', self.query_image), with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), group_by=self.group_by, limit=self.limit, group_size=self.group_size, with_lookup=LOOKUP_COLLECTION_NAME) def group_search_image_with_lookup_2(self, client: QdrantBase) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=('image', self.query_image), with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), group_by=self.group_by, limit=self.limit, group_size=self.group_size, with_lookup=models.WithLookup(collection=LOOKUP_COLLECTION_NAME, with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), with_vectors=['image'])) def group_search_code(self, client: QdrantBase) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=('code', self.query_code), with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), group_by=self.group_by, limit=self.limit, group_size=self.group_size) def group_search_score_threshold(self, client: QdrantBase) -> models.GroupsResult: res1 = client.search_groups(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), limit=self.limit, group_by=self.group_by, score_threshold=0.9, group_size=self.group_size) res2 = client.search_groups(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), limit=self.limit, group_by=self.group_by, score_threshold=0.95, group_size=self.group_size) res3 = client.search_groups(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), limit=self.limit, group_by=self.group_by, score_threshold=0.1, group_size=self.group_size) return models.GroupsResult(groups=((res1.groups + res2.groups) + res3.groups)) def group_search_text_select_payload(self, client: QdrantBase) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=['text_array', 'nested.id'], limit=self.limit, group_by=self.group_by, group_size=self.group_size) def group_search_payload_exclude(self, client: QdrantBase) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), with_payload=models.PayloadSelectorExclude(exclude=['text_array', 'nested.id', 'city.geo', 'rand_number']), limit=self.limit, group_by=self.group_by, group_size=self.group_size) def group_search_image_select_vector(self, client: QdrantBase) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=('image', self.query_image), with_payload=False, with_vectors=['image', 'code'], limit=self.limit, group_by=self.group_by, group_size=self.group_size) def filter_group_search_text(self, client: QdrantBase, query_filter: models.Filter) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=('text', self.query_text), query_filter=query_filter, with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), limit=self.limit, group_by=self.group_by, group_size=self.group_size) def filter_group_search_text_single(self, client: QdrantBase, query_filter: models.Filter) -> models.GroupsResult: return client.search_groups(collection_name=COLLECTION_NAME, query_vector=self.query_text, query_filter=query_filter, with_payload=models.PayloadSelectorExclude(exclude=['city.geo', 'rand_number']), with_vectors=True, limit=self.limit, group_by=self.group_by, group_size=self.group_size)
class AbstractCertificateErrorWrapper(): def __init__(self) -> None: self._certificate_accepted: Optional[bool] = None def __str__(self) -> str: raise NotImplementedError def __repr__(self) -> str: raise NotImplementedError def is_overridable(self) -> bool: raise NotImplementedError def html(self) -> str: return f'<p>{html.escape(str(self))}</p>' def accept_certificate(self) -> None: self._certificate_accepted = True def reject_certificate(self) -> None: self._certificate_accepted = False def defer(self) -> None: raise NotImplementedError def certificate_was_accepted(self) -> bool: if (not self.is_overridable()): return False if (self._certificate_accepted is None): raise ValueError('No decision taken yet') return self._certificate_accepted
_tf class TFXLMModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ((TFXLMModel, TFXLMWithLMHeadModel, TFXLMForSequenceClassification, TFXLMForQuestionAnsweringSimple, TFXLMForTokenClassification, TFXLMForMultipleChoice) if is_tf_available() else ()) all_generative_model_classes = ((TFXLMWithLMHeadModel,) if is_tf_available() else ()) pipeline_model_mapping = ({'feature-extraction': TFXLMModel, 'fill-mask': TFXLMWithLMHeadModel, 'question-answering': TFXLMForQuestionAnsweringSimple, 'text-classification': TFXLMForSequenceClassification, 'text-generation': TFXLMWithLMHeadModel, 'token-classification': TFXLMForTokenClassification, 'zero-shot': TFXLMForSequenceClassification} if is_tf_available() else {}) test_head_masking = False test_onnx = False def is_pipeline_test_to_skip(self, pipeline_test_casse_name, config_class, model_architecture, tokenizer_name, processor_name): if (pipeline_test_casse_name == 'FillMaskPipelineTests'): return True elif ((pipeline_test_casse_name == 'QAPipelineTests') and (tokenizer_name is not None) and (not tokenizer_name.endswith('Fast'))): return True return False def setUp(self): self.model_tester = TFXLMModelTester(self) self.config_tester = ConfigTester(self, config_class=XLMConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_xlm_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_model(*config_and_inputs) def test_xlm_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_lm_head(*config_and_inputs) def test_xlm_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_qa(*config_and_inputs) def test_xlm_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_sequence_classif(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_token_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_xlm_for_multiple_choice(*config_and_inputs) def test_model_from_pretrained(self): for model_name in TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFXLMModel.from_pretrained(model_name) self.assertIsNotNone(model)
def test_windows_corner_case(): def __test(f, N, normalize): f(N, normalize) for f in [blackman, hanning, hamming, bartlett, trapezoid, rectangular]: with pytest.raises(ValueError): __test(f, 256, (- 1)) with pytest.raises(ValueError): __test(f, 256, 3)
def flush(): with sd_lock: try: saveddata_session.flush() except (KeyboardInterrupt, SystemExit): raise except Exception: saveddata_session.rollback() exc_info = sys.exc_info() raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
class Branch(ControlOp): error_kind = ERR_NEVER BOOL: Final = 100 IS_ERROR: Final = 101 def __init__(self, value: Value, true_label: BasicBlock, false_label: BasicBlock, op: int, line: int=(- 1), *, rare: bool=False) -> None: super().__init__(line) self.value = value self.true = true_label self.false = false_label self.op = op self.negated = False self.traceback_entry: (tuple[(str, int)] | None) = None self.rare = rare def targets(self) -> Sequence[BasicBlock]: return (self.true, self.false) def set_target(self, i: int, new: BasicBlock) -> None: assert ((i == 0) or (i == 1)) if (i == 0): self.true = new else: self.false = new def sources(self) -> list[Value]: return [self.value] def invert(self) -> None: self.negated = (not self.negated) def accept(self, visitor: OpVisitor[T]) -> T: return visitor.visit_branch(self)
class BertGenerationTokenizer(PreTrainedTokenizer): vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES prefix_tokens: List[int] = [] model_input_names = ['input_ids', 'attention_mask'] def __init__(self, vocab_file, bos_token='<s>', eos_token='</s>', unk_token='<unk>', pad_token='<pad>', sep_token='<::::>', sp_model_kwargs: Optional[Dict[(str, Any)]]=None, **kwargs) -> None: self.sp_model_kwargs = ({} if (sp_model_kwargs is None) else sp_model_kwargs) super().__init__(bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs) self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state['sp_model'] = None return state def __setstate__(self, d): self.__dict__ = d if (not hasattr(self, 'sp_model_kwargs')): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): current_sub_tokens = [] out_string = '' for token in tokens: if (token in self.all_special_tokens): out_string += (self.sp_model.decode(current_sub_tokens) + token) current_sub_tokens = [] else: current_sub_tokens.append(token) out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str]=None) -> Tuple[str]: if (not os.path.isdir(save_directory)): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return out_vocab_file = os.path.join(save_directory, (((filename_prefix + '-') if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])) if ((os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file)) and os.path.isfile(self.vocab_file)): copyfile(self.vocab_file, out_vocab_file) elif (not os.path.isfile(self.vocab_file)): with open(out_vocab_file, 'wb') as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,)
class CouplingLayer(nn.Module): def __init__(self, num_inputs, num_hidden, mask=None, act=nn.LeakyReLU, s_act_func=nn.Tanh, t_act_func=None): super(CouplingLayer, self).__init__() self.num_inputs = num_inputs if (mask is None): mask = (torch.arange(0, num_inputs) % 2).type(torch.float32) self.mask = nn.Parameter(mask, requires_grad=False) self.scale_net = gen_fc_net(num_inputs, num_hidden, act=act, final_act=s_act_func) self.translate_net = gen_fc_net(num_inputs, num_hidden, act=act, final_act=t_act_func) def forward(self, x, cond_inputs=None, mode='direct'): mask = self.mask inv_mask = (1.0 - mask) masked_x = (x * mask) log_s = (self.scale_net(masked_x) * inv_mask) t = (self.translate_net(masked_x) * inv_mask) if (mode == 'direct'): z = (((inv_mask * (x - t)) * torch.exp((- log_s))) + masked_x) else: z = ((inv_mask * ((x * torch.exp(log_s)) + t)) + masked_x) return (z, log_s.sum(dim=1, keepdim=False)) def sample(self, num_samples, noise=None): if (noise is None): noise = torch.Tensor(num_samples, self.num_inputs).normal_() samples = self.forward(noise, mode='inverse')[0] return samples
class ConvDecoder(tf.Module): def __init__(self, shape, depth=32, activation=tf.nn.relu, dist='normal'): super(ConvDecoder, self).__init__() self._shape = shape self._dist = dist self._depth = depth self._dense = tf.keras.layers.Dense((32 * depth)) self._layers = tf.keras.Sequential([tf.keras.layers.Conv2DTranspose((4 * depth), 5, activation=activation, strides=2), tf.keras.layers.Conv2DTranspose((2 * depth), 5, activation=activation, strides=2), tf.keras.layers.Conv2DTranspose(depth, 6, activation=activation, strides=2), tf.keras.layers.Conv2DTranspose(self._shape[(- 1)], 6, strides=2)]) def __call__(self, inputs): x = self._dense(inputs) x = tf.reshape(x, [(- 1), 1, 1, (32 * self._depth)]) x = self._layers(x) if tf.reduce_any((tf.shape(x)[(- 3):] != self._shape)): x = tf.keras.layers.UpSampling2D()(x) x = tf.reshape(x, tf.concat([tf.shape(inputs)[:(- 1)], self._shape], 0)) if (self._dist == 'normal'): return tfd.Independent(tfd.Normal(x, 1.0), len(self._shape)) elif (self._dist == 'bernoulli'): return tfd.Independent(tfd.Bernoulli(x, dtype=tf.float32), len(self._shape))
() def restart() -> None: try: ok = instance.restart(session='_restart') except sessions.SessionError as e: log.destroy.exception('Failed to save session!') raise cmdutils.CommandError('Failed to save session: {}!'.format(e)) except SyntaxError as e: log.destroy.exception('Got SyntaxError') raise cmdutils.CommandError('SyntaxError in {}:{}: {}'.format(e.filename, e.lineno, e)) if ok: instance.shutdown(is_restart=True)
class BlockItem(scrapy.Item): block_hash = scrapy.Field() block_number = scrapy.Field() parent_hash = scrapy.Field() difficulty = scrapy.Field() total_difficulty = scrapy.Field() size = scrapy.Field() transaction_hashes = scrapy.Field() gas_limit = scrapy.Field() gas_used = scrapy.Field() miner = scrapy.Field() receipts_root = scrapy.Field() timestamp = scrapy.Field() logs_bloom = scrapy.Field() nonce = scrapy.Field()
def parse_args(): parser = argparse.ArgumentParser(description='Generate training and validation set of ArT ') parser.add_argument('root_path', help='Root dir path of ArT') parser.add_argument('--val-ratio', help='Split ratio for val set', default=0.0, type=float) args = parser.parse_args() return args
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor): if isinstance(state_dict, dict): cpu_dict = OrderedDict() for (k, v) in state_dict.items(): cpu_dict[k] = convert_state_dict_type(v) return cpu_dict elif isinstance(state_dict, list): return [convert_state_dict_type(v) for v in state_dict] elif torch.is_tensor(state_dict): return state_dict.type(ttype) else: return state_dict
def run(): observable = 'Y3' vals = [0.8, 1.0, 1.2] solver = ScipyOdeSimulator(model, tspan) sens = InitialsSensitivity(values_to_sample=vals, observable=observable, objective_function=obj_func_cell_cycle, solver=solver) sens.run() sens.create_individual_pairwise_plots(save_name='pairwise_individual', out_dir='tyson_sensitivity') sens.create_plot_p_h_pprime(save_name='matrices', out_dir='tyson_sensitivity') sens.create_boxplot_and_heatplot(save_name='tyson_sensitivity', out_dir='tyson_sensitivity', show=False) print('Results saved in tyson_sensitivity directory')
def disk_info(): logdir = dsz.lp.GetLogsDirectory() projectdir = os.path.split(logdir)[0] infofile = os.path.join(projectdir, 'disk-version.txt') if os.path.exists(infofile): dsz.ui.Echo(('Disk version already logged; if you switched disks for some reason, rename %s and restart the LP please.' % infofile), dsz.GOOD) return True opsdisk_root = os.path.normpath((dsz.lp.GetResourcesDirectory() + '/..')) dszfiles = util.listdir(opsdisk_root, '^DSZOpsDisk-.+\\.zip$') disk = None if (len(dszfiles) == 1): disk = dszfiles[0] elif (len(dszfiles) > 1): menu = util.menu.Menu('Found mulitple DSZOpsDisk zips:', dszfiles, None, 'Which one are you executing? ') index = (menu.show()[0] - 1) if ((index > 0) and (index < len(dszfiles))): disk = dszfiles[index] else: dsz.ui.Echo('Could not determine which opsdisk is running. Version NOT recorded.', dsz.ERROR) return False else: dsz.ui.Echo('Could not find DSZOpsDisk zip. Disk version NOT recorded.', dsz.ERROR) return False with open(infofile, 'w') as output: output.write(('%s\n' % disk)) dsz.ui.Echo(('Disk version %s recorded to %s.' % (disk, infofile)), dsz.GOOD) return True
def start_test_server(): pywebio.enable_debug() from flask import Flask, send_from_directory from pywebio.platform.flask import webio_view, run_event_loop from pywebio import STATIC_PATH import threading import logging app = Flask(__name__) app.add_url_rule('/io', 'webio_view', webio_view(target, cdn=False), methods=['GET', 'POST', 'OPTIONS']) app.add_url_rule('/io2', 'webio_view_async_target', webio_view(async_target, cdn=False), methods=['GET', 'POST', 'OPTIONS']) ('/') ('/<path:static_file>') def serve_static_file(static_file='index.html'): return send_from_directory(STATIC_PATH, static_file) threading.Thread(target=run_event_loop, daemon=True).start() logging.getLogger('werkzeug').setLevel(logging.WARNING) app.run(port=8080, host='127.0.0.1')
class TestExists(): .parametrize('absolute', [True, False]) def test_existent(self, tmp_path, absolute): session_dir = (tmp_path / 'sessions') abs_session = (tmp_path / 'foo.yml') rel_session = (session_dir / 'foo.yml') session_dir.mkdir() abs_session.touch() rel_session.touch() man = sessions.SessionManager(str(session_dir)) if absolute: name = str(abs_session) else: name = 'foo' assert man.exists(name) .parametrize('absolute', [True, False]) def test_inexistent(self, tmp_path, absolute): man = sessions.SessionManager(str(tmp_path)) if absolute: name = str((tmp_path / 'foo')) else: name = 'foo' assert (not man.exists(name))
def _main(): parser = argparse.ArgumentParser(description='Find any stray release notes.') _args = parser.parse_args() files = discover_files() with multiprocessing.Pool() as pool: res = pool.map(validate_path, files) failed_files = [x for x in res if (x is not None)] if (len(failed_files) > 0): for failed_file in failed_files: sys.stderr.write(f'''{failed_file} is not in the correct location. ''') sys.exit(1) sys.exit(0)
class ShardEstimator(abc.ABC): def __init__(self, topology: Topology, constraints: Optional[Dict[(str, ParameterConstraints)]]=None) -> None: ... def estimate(self, sharding_options: List[ShardingOption], sharder_map: Optional[Dict[(str, ModuleSharder[nn.Module])]]=None) -> None: ...
def fci(dataset: ndarray, independence_test_method: str=fisherz, alpha: float=0.05, depth: int=(- 1), max_path_length: int=(- 1), verbose: bool=False, background_knowledge: (BackgroundKnowledge | None)=None, show_progress: bool=True, **kwargs) -> Tuple[(Graph, List[Edge])]: if (dataset.shape[0] < dataset.shape[1]): warnings.warn('The number of features is much larger than the sample size!') independence_test_method = CIT(dataset, method=independence_test_method, **kwargs) if ((depth is None) or (type(depth) != int)): raise TypeError("'depth' must be 'int' type!") if ((background_knowledge is not None) and (type(background_knowledge) != BackgroundKnowledge)): raise TypeError("'background_knowledge' must be 'BackgroundKnowledge' type!") if (type(max_path_length) != int): raise TypeError("'max_path_length' must be 'int' type!") nodes = [] for i in range(dataset.shape[1]): node = GraphNode(f'X{(i + 1)}') node.add_attribute('id', i) nodes.append(node) (graph, sep_sets, test_results) = fas(dataset, nodes, independence_test_method=independence_test_method, alpha=alpha, knowledge=background_knowledge, depth=depth, verbose=verbose, show_progress=show_progress) reorientAllWith(graph, Endpoint.CIRCLE) rule0(graph, nodes, sep_sets, background_knowledge, verbose) removeByPossibleDsep(graph, independence_test_method, alpha, sep_sets) reorientAllWith(graph, Endpoint.CIRCLE) rule0(graph, nodes, sep_sets, background_knowledge, verbose) change_flag = True first_time = True while change_flag: change_flag = False change_flag = rulesR1R2cycle(graph, background_knowledge, change_flag, verbose) change_flag = ruleR3(graph, sep_sets, background_knowledge, change_flag, verbose) if (change_flag or (first_time and (background_knowledge is not None) and (len(background_knowledge.forbidden_rules_specs) > 0) and (len(background_knowledge.required_rules_specs) > 0) and (len(background_knowledge.tier_map.keys()) > 0))): change_flag = ruleR4B(graph, max_path_length, dataset, independence_test_method, alpha, sep_sets, change_flag, background_knowledge, verbose) first_time = False if verbose: print('Epoch') graph.set_pag(True) edges = get_color_edges(graph) return (graph, edges)
def test_add_no_constraint(app: PoetryTestApplication, repo: TestRepository, tester: CommandTester) -> None: repo.add_package(get_package('cachy', '0.1.0')) repo.add_package(get_package('cachy', '0.2.0')) tester.execute('cachy') expected = 'Using version ^0.2.0 for cachy\n\nUpdating dependencies\nResolving dependencies...\n\nPackage operations: 1 install, 0 updates, 0 removals\n\n - Installing cachy (0.2.0)\n\nWriting lock file\n' assert (tester.io.fetch_output() == expected) assert isinstance(tester.command, InstallerCommand) assert (tester.command.installer.executor.installations_count == 1) pyproject: dict[(str, Any)] = app.poetry.file.read() content = pyproject['tool']['poetry'] assert ('cachy' in content['dependencies']) assert (content['dependencies']['cachy'] == '^0.2.0')
class LmdbBackend(BaseStorageBackend): def __init__(self, db_path, readonly=True, lock=False, readahead=False, **kwargs): try: import lmdb except ImportError: raise ImportError('Please install lmdb to enable LmdbBackend.') self.db_path = str(db_path) self._client = lmdb.open(self.db_path, readonly=readonly, lock=lock, readahead=readahead, **kwargs) def get(self, filepath): filepath = str(filepath) with self._client.begin(write=False) as txn: value_buf = txn.get(filepath.encode('ascii')) return value_buf def get_text(self, filepath, encoding=None): raise NotImplementedError
class AmplLexer(RegexLexer): name = 'Ampl' url = ' aliases = ['ampl'] filenames = ['*.run'] version_added = '2.2' tokens = {'root': [('\\n', Text), ('\\s+', Whitespace), ('#.*?\\n', Comment.Single), ('/[*](.|\\n)*?[*]/', Comment.Multiline), (words(('call', 'cd', 'close', 'commands', 'data', 'delete', 'display', 'drop', 'end', 'environ', 'exit', 'expand', 'include', 'load', 'model', 'objective', 'option', 'problem', 'purge', 'quit', 'redeclare', 'reload', 'remove', 'reset', 'restore', 'shell', 'show', 'solexpand', 'solution', 'solve', 'update', 'unload', 'xref', 'coeff', 'coef', 'cover', 'obj', 'interval', 'default', 'from', 'to', 'to_come', 'net_in', 'net_out', 'dimen', 'dimension', 'check', 'complements', 'write', 'function', 'pipe', 'format', 'if', 'then', 'else', 'in', 'while', 'repeat', 'for'), suffix='\\b'), Keyword.Reserved), ('(integer|binary|symbolic|ordered|circular|reversed|INOUT|IN|OUT|LOCAL)', Keyword.Type), ('\\".*?\\"', String.Double), ("\\'.*?\\'", String.Single), ('[()\\[\\]{},;:]+', Punctuation), ('\\b(\\w+)(\\.)(astatus|init0|init|lb0|lb1|lb2|lb|lrc|lslack|rc|relax|slack|sstatus|status|ub0|ub1|ub2|ub|urc|uslack|val)', bygroups(Name.Variable, Punctuation, Keyword.Reserved)), ('(set|param|var|arc|minimize|maximize|subject to|s\\.t\\.|subj to|node|table|suffix|read table|write table)(\\s+)(\\w+)', bygroups(Keyword.Declaration, Whitespace, Name.Variable)), ('(param)(\\s*)(:)(\\s*)(\\w+)(\\s*)(:)(\\s*)((\\w|\\s)+)', bygroups(Keyword.Declaration, Whitespace, Punctuation, Whitespace, Name.Variable, Whitespace, Punctuation, Whitespace, Name.Variable)), ('(let|fix|unfix)(\\s*)((?:\\{.*\\})?)(\\s*)(\\w+)', bygroups(Keyword.Declaration, Whitespace, using(this), Whitespace, Name.Variable)), (words(('abs', 'acos', 'acosh', 'alias', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'ctime', 'cos', 'exp', 'floor', 'log', 'log10', 'max', 'min', 'precision', 'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'time', 'trunc', 'Beta', 'Cauchy', 'Exponential', 'Gamma', 'Irand224', 'Normal', 'Normal01', 'Poisson', 'Uniform', 'Uniform01', 'num', 'num0', 'ichar', 'char', 'length', 'substr', 'sprintf', 'match', 'sub', 'gsub', 'print', 'printf', 'next', 'nextw', 'prev', 'prevw', 'first', 'last', 'ord', 'ord0', 'card', 'arity', 'indexarity'), prefix='\\b', suffix='\\b'), Name.Builtin), ('(\\+|\\-|\\*|/|\\*\\*|=|<=|>=|==|\\||\\^|<|>|\\!|\\.\\.|:=|\\&|\\!=|<<|>>)', Operator), (words(('or', 'exists', 'forall', 'and', 'in', 'not', 'within', 'union', 'diff', 'difference', 'symdiff', 'inter', 'intersect', 'intersection', 'cross', 'setof', 'by', 'less', 'sum', 'prod', 'product', 'div', 'mod'), suffix='\\b'), Keyword.Reserved), ('(\\d+\\.(?!\\.)\\d*|\\.(?!.)\\d+)([eE][+-]?\\d+)?', Number.Float), ('\\d+([eE][+-]?\\d+)?', Number.Integer), ('[+-]?Infinity', Number.Integer), ('(\\w+|(\\.(?!\\.)))', Text)]}
def test_poetry_with_non_default_multiple_sources_legacy(fixture_dir: FixtureDirGetter, with_simple_keyring: None) -> None: poetry = Factory().create_poetry(fixture_dir('with_non_default_multiple_sources_legacy')) assert (not poetry.pool.has_default()) assert poetry.pool.has_repository('bar') assert isinstance(poetry.pool.repository('bar'), LegacyRepository) assert poetry.pool.has_repository('PyPI') assert (poetry.pool.get_priority('PyPI') is Priority.SECONDARY) assert isinstance(poetry.pool.repository('PyPI'), PyPiRepository) assert poetry.pool.has_repository('foo') assert isinstance(poetry.pool.repository('foo'), LegacyRepository) assert ({repo.name for repo in poetry.pool.repositories} == {'bar', 'PyPI', 'foo'})
def list_atoms(d, re_obj, low, high): while (low <= high): try: val = d.get_atom_name(low) if (re_obj == None): print_atom(options.format, low, val) elif (re_obj.match(val) != None): print_atom(options.format, low, val) low += 1 except: sys.exit(0)
def getTackledSpeed(src, tgt, currentUntackledSpeed, srcScramRange, tgtScrammables, webMods, webDrones, webFighters, distance): if (tgt.isFit and tgt.item.ship.getModifiedItemAttr('disallowOffensiveModifiers')): return currentUntackledSpeed maxUntackledSpeed = tgt.getMaxVelocity() if (maxUntackledSpeed == 0): return maxUntackledSpeed inLockRange = checkLockRange(src=src, distance=distance) inDroneRange = checkDroneControlRange(src=src, distance=distance) speedRatio = (currentUntackledSpeed / maxUntackledSpeed) if ((not inLockRange) or (srcScramRange is None) or ((distance is not None) and (distance > srcScramRange))): tgtScrammables = () appliedMultipliers = {} if inLockRange: for wData in webMods: appliedBoost = (wData.boost * calculateRangeFactor(srcOptimalRange=wData.optimal, srcFalloffRange=wData.falloff, distance=distance)) if appliedBoost: appliedMultipliers.setdefault(wData.stackingGroup, []).append(((1 + (appliedBoost / 100)), wData.resAttrID)) maxTackledSpeed = tgt.getMaxVelocity(extraMultipliers=appliedMultipliers, ignoreAfflictors=tgtScrammables) currentTackledSpeed = (maxTackledSpeed * speedRatio) mobileWebs = [] if inLockRange: mobileWebs.extend(webFighters) if (inLockRange and inDroneRange): mobileWebs.extend(webDrones) atkRadius = src.getRadius() longEnoughMws = [mw for mw in mobileWebs if ((distance is None) or (distance <= ((mw.optimal - atkRadius) + mw.radius)))] if longEnoughMws: for mwData in longEnoughMws: appliedMultipliers.setdefault(mwData.stackingGroup, []).append(((1 + (mwData.boost / 100)), mwData.resAttrID)) mobileWebs.remove(mwData) maxTackledSpeed = tgt.getMaxVelocity(extraMultipliers=appliedMultipliers, ignoreAfflictors=tgtScrammables) currentTackledSpeed = (maxTackledSpeed * speedRatio) droneOpt = GraphSettings.getInstance().get('mobileDroneMode') while mobileWebs: fastestMwSpeed = max(mobileWebs, key=(lambda mw: mw.speed)).speed fastestMws = [mw for mw in mobileWebs if (mw.speed == fastestMwSpeed)] for mwData in fastestMws: if (((droneOpt == GraphDpsDroneMode.auto) and (mwData.speed >= currentTackledSpeed)) or (droneOpt == GraphDpsDroneMode.followTarget)): appliedMwBoost = mwData.boost else: if (distance is None): rangeFactorDistance = None else: rangeFactorDistance = ((distance + atkRadius) - mwData.radius) appliedMwBoost = (mwData.boost * calculateRangeFactor(srcOptimalRange=mwData.optimal, srcFalloffRange=mwData.falloff, distance=rangeFactorDistance)) appliedMultipliers.setdefault(mwData.stackingGroup, []).append(((1 + (appliedMwBoost / 100)), mwData.resAttrID)) mobileWebs.remove(mwData) maxTackledSpeed = tgt.getMaxVelocity(extraMultipliers=appliedMultipliers, ignoreAfflictors=tgtScrammables) currentTackledSpeed = (maxTackledSpeed * speedRatio) return floatUnerr(currentTackledSpeed)
def test_flops_to_string(): flops = (6.54321 * (10.0 ** 9)) assert (flops_to_string(flops) == '6.54 GFLOPs') assert (flops_to_string(flops, 'MFLOPs') == '6543.21 MFLOPs') assert (flops_to_string(flops, 'KFLOPs') == '6543210.0 KFLOPs') assert (flops_to_string(flops, 'FLOPs') == '.0 FLOPs') assert (flops_to_string(flops, precision=4) == '6.5432 GFLOPs') flops = (6.54321 * (10.0 ** 9)) assert (flops_to_string(flops, None) == '6.54 GFLOPs') flops = (3.21 * (10.0 ** 7)) assert (flops_to_string(flops, None) == '32.1 MFLOPs') flops = (5.4 * (10.0 ** 3)) assert (flops_to_string(flops, None) == '5.4 KFLOPs') flops = 987 assert (flops_to_string(flops, None) == '987 FLOPs')
def generate_thumbnail(original_image: Union[(FileLike, StreamDescriptor)], width: int=None, height: int=None, ratio: float=None, ratio_precision: int=5, thumbnail_type: Type[Thumbnail]=Thumbnail) -> Tuple[(int, int, float, Thumbnail)]: (width, height, ratio) = validate_width_height_ratio(width, height, ratio) thumbnail_buffer = io.BytesIO() format_ = 'jpeg' extension = '.jpg' img = PilImage.open(original_image) if (img.mode == 'RGBA'): format_ = 'png' extension = '.png' with img: original_size = img.size if callable(width): width = width(original_size) if callable(height): height = height(original_size) width = int(width) height = int(height) thumbnail_image = img.resize((width, height)) thumbnail_image.save(thumbnail_buffer, format_) thumbnail_buffer.seek(0) ratio = round((width / original_size[0]), ratio_precision) thumbnail = thumbnail_type.create_from(thumbnail_buffer, content_type=f'image/{format_}', extension=extension, dimension=(width, height)) return (width, height, ratio, thumbnail)
class FlightAdminForm(FlightMixin, forms.ModelForm): class Meta(): model = Flight fields = ('name', 'slug', 'campaign', 'start_date', 'end_date', 'hard_stop', 'live', 'priority_multiplier', 'pacing_interval', 'prioritize_ads_ctr', 'cpc', 'sold_clicks', 'cpm', 'sold_impressions', 'targeting_parameters', 'traffic_fill', 'traffic_cap', 'discount')
.parametrize('metadata_version', [None, '0.1', '0.2']) def test_inject_simple_legacy_venv(pipx_temp_env, capsys, metadata_version): assert (not run_pipx_cli(['install', 'pycowsay'])) mock_legacy_venv('pycowsay', metadata_version=metadata_version) if (metadata_version is not None): assert (not run_pipx_cli(['inject', 'pycowsay', PKG['black']['spec']])) else: assert run_pipx_cli(['inject', 'pycowsay', PKG['black']['spec']]) assert ('Please uninstall and install' in capsys.readouterr().err)
class Migration(migrations.Migration): dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('projects', '0011_refactoring')] operations = [migrations.CreateModel(name='Membership', fields=[('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('role', models.CharField(choices=[('owner', 'Owner'), ('manager', 'Manager'), ('author', 'Author'), ('guest', 'Guest')], help_text='The role for this membership.', max_length=12, verbose_name='Role'))], options={'ordering': ('project__title',), 'verbose_name': 'Membership', 'verbose_name_plural': 'Memberships'}), migrations.AlterField(model_name='project', name='owner', field=models.ManyToManyField(help_text='The list of owners for this project.', related_name='_project_owner_+', to=settings.AUTH_USER_MODEL, verbose_name='Owner')), migrations.AddField(model_name='membership', name='project', field=models.ForeignKey(help_text='The project for this membership.', on_delete=django.db.models.deletion.CASCADE, to='projects.Project', verbose_name='Project')), migrations.AddField(model_name='membership', name='user', field=models.ForeignKey(help_text='The user for this membership.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='User')), migrations.AddField(model_name='project', name='user', field=models.ManyToManyField(help_text='The list of users for this project.', through='projects.Membership', to=settings.AUTH_USER_MODEL, verbose_name='User'))]
class Effect11953(BaseEffect): type = ('projected', 'passive') def handler(fit, beacon, context, projectionRange, **kwargs): fit.modules.filteredItemMultiply((lambda mod: mod.item.requiresSkill('Vorton Projector Operation')), 'aoeVelocity', beacon.getModifiedItemAttr('aoeVelocityMultiplier'), stackingPenalties=True, penaltyGroup='postMul', **kwargs)
def clean(opts): for s in [p.root_ca_path(), p.intermediate_ca_path('1'), p.intermediate_ca_path('2'), p.result_path(), p.leaf_pair_path('server'), p.leaf_pair_path('client')]: print('Removing {}'.format(s)) try: shutil.rmtree(s) except FileNotFoundError: pass